resolve.c (get_temp_from_expr): Don't set FL_VARIABLE twice.
[gcc.git] / gcc / tree-ssa-loop-niter.c
1 /* Functions to determine/estimate number of iterations of a loop.
2 Copyright (C) 2004-2013 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "tree.h"
25 #include "tm_p.h"
26 #include "basic-block.h"
27 #include "gimple-pretty-print.h"
28 #include "intl.h"
29 #include "tree-flow.h"
30 #include "dumpfile.h"
31 #include "cfgloop.h"
32 #include "ggc.h"
33 #include "tree-chrec.h"
34 #include "tree-scalar-evolution.h"
35 #include "tree-data-ref.h"
36 #include "params.h"
37 #include "flags.h"
38 #include "diagnostic-core.h"
39 #include "tree-inline.h"
40 #include "tree-pass.h"
41
42 #define SWAP(X, Y) do { affine_iv *tmp = (X); (X) = (Y); (Y) = tmp; } while (0)
43
44 /* The maximum number of dominator BBs we search for conditions
45 of loop header copies we use for simplifying a conditional
46 expression. */
47 #define MAX_DOMINATORS_TO_WALK 8
48
49 /*
50
51 Analysis of number of iterations of an affine exit test.
52
53 */
54
55 /* Bounds on some value, BELOW <= X <= UP. */
56
57 typedef struct
58 {
59 mpz_t below, up;
60 } bounds;
61
62
63 /* Splits expression EXPR to a variable part VAR and constant OFFSET. */
64
65 static void
66 split_to_var_and_offset (tree expr, tree *var, mpz_t offset)
67 {
68 tree type = TREE_TYPE (expr);
69 tree op0, op1;
70 double_int off;
71 bool negate = false;
72
73 *var = expr;
74 mpz_set_ui (offset, 0);
75
76 switch (TREE_CODE (expr))
77 {
78 case MINUS_EXPR:
79 negate = true;
80 /* Fallthru. */
81
82 case PLUS_EXPR:
83 case POINTER_PLUS_EXPR:
84 op0 = TREE_OPERAND (expr, 0);
85 op1 = TREE_OPERAND (expr, 1);
86
87 if (TREE_CODE (op1) != INTEGER_CST)
88 break;
89
90 *var = op0;
91 /* Always sign extend the offset. */
92 off = tree_to_double_int (op1);
93 off = off.sext (TYPE_PRECISION (type));
94 mpz_set_double_int (offset, off, false);
95 if (negate)
96 mpz_neg (offset, offset);
97 break;
98
99 case INTEGER_CST:
100 *var = build_int_cst_type (type, 0);
101 off = tree_to_double_int (expr);
102 mpz_set_double_int (offset, off, TYPE_UNSIGNED (type));
103 break;
104
105 default:
106 break;
107 }
108 }
109
110 /* Stores estimate on the minimum/maximum value of the expression VAR + OFF
111 in TYPE to MIN and MAX. */
112
113 static void
114 determine_value_range (tree type, tree var, mpz_t off,
115 mpz_t min, mpz_t max)
116 {
117 /* If the expression is a constant, we know its value exactly. */
118 if (integer_zerop (var))
119 {
120 mpz_set (min, off);
121 mpz_set (max, off);
122 return;
123 }
124
125 /* If the computation may wrap, we know nothing about the value, except for
126 the range of the type. */
127 get_type_static_bounds (type, min, max);
128 if (!nowrap_type_p (type))
129 return;
130
131 /* Since the addition of OFF does not wrap, if OFF is positive, then we may
132 add it to MIN, otherwise to MAX. */
133 if (mpz_sgn (off) < 0)
134 mpz_add (max, max, off);
135 else
136 mpz_add (min, min, off);
137 }
138
139 /* Stores the bounds on the difference of the values of the expressions
140 (var + X) and (var + Y), computed in TYPE, to BNDS. */
141
142 static void
143 bound_difference_of_offsetted_base (tree type, mpz_t x, mpz_t y,
144 bounds *bnds)
145 {
146 int rel = mpz_cmp (x, y);
147 bool may_wrap = !nowrap_type_p (type);
148 mpz_t m;
149
150 /* If X == Y, then the expressions are always equal.
151 If X > Y, there are the following possibilities:
152 a) neither of var + X and var + Y overflow or underflow, or both of
153 them do. Then their difference is X - Y.
154 b) var + X overflows, and var + Y does not. Then the values of the
155 expressions are var + X - M and var + Y, where M is the range of
156 the type, and their difference is X - Y - M.
157 c) var + Y underflows and var + X does not. Their difference again
158 is M - X + Y.
159 Therefore, if the arithmetics in type does not overflow, then the
160 bounds are (X - Y, X - Y), otherwise they are (X - Y - M, X - Y)
161 Similarly, if X < Y, the bounds are either (X - Y, X - Y) or
162 (X - Y, X - Y + M). */
163
164 if (rel == 0)
165 {
166 mpz_set_ui (bnds->below, 0);
167 mpz_set_ui (bnds->up, 0);
168 return;
169 }
170
171 mpz_init (m);
172 mpz_set_double_int (m, double_int::mask (TYPE_PRECISION (type)), true);
173 mpz_add_ui (m, m, 1);
174 mpz_sub (bnds->up, x, y);
175 mpz_set (bnds->below, bnds->up);
176
177 if (may_wrap)
178 {
179 if (rel > 0)
180 mpz_sub (bnds->below, bnds->below, m);
181 else
182 mpz_add (bnds->up, bnds->up, m);
183 }
184
185 mpz_clear (m);
186 }
187
188 /* From condition C0 CMP C1 derives information regarding the
189 difference of values of VARX + OFFX and VARY + OFFY, computed in TYPE,
190 and stores it to BNDS. */
191
192 static void
193 refine_bounds_using_guard (tree type, tree varx, mpz_t offx,
194 tree vary, mpz_t offy,
195 tree c0, enum tree_code cmp, tree c1,
196 bounds *bnds)
197 {
198 tree varc0, varc1, tmp, ctype;
199 mpz_t offc0, offc1, loffx, loffy, bnd;
200 bool lbound = false;
201 bool no_wrap = nowrap_type_p (type);
202 bool x_ok, y_ok;
203
204 switch (cmp)
205 {
206 case LT_EXPR:
207 case LE_EXPR:
208 case GT_EXPR:
209 case GE_EXPR:
210 STRIP_SIGN_NOPS (c0);
211 STRIP_SIGN_NOPS (c1);
212 ctype = TREE_TYPE (c0);
213 if (!useless_type_conversion_p (ctype, type))
214 return;
215
216 break;
217
218 case EQ_EXPR:
219 /* We could derive quite precise information from EQ_EXPR, however, such
220 a guard is unlikely to appear, so we do not bother with handling
221 it. */
222 return;
223
224 case NE_EXPR:
225 /* NE_EXPR comparisons do not contain much of useful information, except for
226 special case of comparing with the bounds of the type. */
227 if (TREE_CODE (c1) != INTEGER_CST
228 || !INTEGRAL_TYPE_P (type))
229 return;
230
231 /* Ensure that the condition speaks about an expression in the same type
232 as X and Y. */
233 ctype = TREE_TYPE (c0);
234 if (TYPE_PRECISION (ctype) != TYPE_PRECISION (type))
235 return;
236 c0 = fold_convert (type, c0);
237 c1 = fold_convert (type, c1);
238
239 if (TYPE_MIN_VALUE (type)
240 && operand_equal_p (c1, TYPE_MIN_VALUE (type), 0))
241 {
242 cmp = GT_EXPR;
243 break;
244 }
245 if (TYPE_MAX_VALUE (type)
246 && operand_equal_p (c1, TYPE_MAX_VALUE (type), 0))
247 {
248 cmp = LT_EXPR;
249 break;
250 }
251
252 return;
253 default:
254 return;
255 }
256
257 mpz_init (offc0);
258 mpz_init (offc1);
259 split_to_var_and_offset (expand_simple_operations (c0), &varc0, offc0);
260 split_to_var_and_offset (expand_simple_operations (c1), &varc1, offc1);
261
262 /* We are only interested in comparisons of expressions based on VARX and
263 VARY. TODO -- we might also be able to derive some bounds from
264 expressions containing just one of the variables. */
265
266 if (operand_equal_p (varx, varc1, 0))
267 {
268 tmp = varc0; varc0 = varc1; varc1 = tmp;
269 mpz_swap (offc0, offc1);
270 cmp = swap_tree_comparison (cmp);
271 }
272
273 if (!operand_equal_p (varx, varc0, 0)
274 || !operand_equal_p (vary, varc1, 0))
275 goto end;
276
277 mpz_init_set (loffx, offx);
278 mpz_init_set (loffy, offy);
279
280 if (cmp == GT_EXPR || cmp == GE_EXPR)
281 {
282 tmp = varx; varx = vary; vary = tmp;
283 mpz_swap (offc0, offc1);
284 mpz_swap (loffx, loffy);
285 cmp = swap_tree_comparison (cmp);
286 lbound = true;
287 }
288
289 /* If there is no overflow, the condition implies that
290
291 (VARX + OFFX) cmp (VARY + OFFY) + (OFFX - OFFY + OFFC1 - OFFC0).
292
293 The overflows and underflows may complicate things a bit; each
294 overflow decreases the appropriate offset by M, and underflow
295 increases it by M. The above inequality would not necessarily be
296 true if
297
298 -- VARX + OFFX underflows and VARX + OFFC0 does not, or
299 VARX + OFFC0 overflows, but VARX + OFFX does not.
300 This may only happen if OFFX < OFFC0.
301 -- VARY + OFFY overflows and VARY + OFFC1 does not, or
302 VARY + OFFC1 underflows and VARY + OFFY does not.
303 This may only happen if OFFY > OFFC1. */
304
305 if (no_wrap)
306 {
307 x_ok = true;
308 y_ok = true;
309 }
310 else
311 {
312 x_ok = (integer_zerop (varx)
313 || mpz_cmp (loffx, offc0) >= 0);
314 y_ok = (integer_zerop (vary)
315 || mpz_cmp (loffy, offc1) <= 0);
316 }
317
318 if (x_ok && y_ok)
319 {
320 mpz_init (bnd);
321 mpz_sub (bnd, loffx, loffy);
322 mpz_add (bnd, bnd, offc1);
323 mpz_sub (bnd, bnd, offc0);
324
325 if (cmp == LT_EXPR)
326 mpz_sub_ui (bnd, bnd, 1);
327
328 if (lbound)
329 {
330 mpz_neg (bnd, bnd);
331 if (mpz_cmp (bnds->below, bnd) < 0)
332 mpz_set (bnds->below, bnd);
333 }
334 else
335 {
336 if (mpz_cmp (bnd, bnds->up) < 0)
337 mpz_set (bnds->up, bnd);
338 }
339 mpz_clear (bnd);
340 }
341
342 mpz_clear (loffx);
343 mpz_clear (loffy);
344 end:
345 mpz_clear (offc0);
346 mpz_clear (offc1);
347 }
348
349 /* Stores the bounds on the value of the expression X - Y in LOOP to BNDS.
350 The subtraction is considered to be performed in arbitrary precision,
351 without overflows.
352
353 We do not attempt to be too clever regarding the value ranges of X and
354 Y; most of the time, they are just integers or ssa names offsetted by
355 integer. However, we try to use the information contained in the
356 comparisons before the loop (usually created by loop header copying). */
357
358 static void
359 bound_difference (struct loop *loop, tree x, tree y, bounds *bnds)
360 {
361 tree type = TREE_TYPE (x);
362 tree varx, vary;
363 mpz_t offx, offy;
364 mpz_t minx, maxx, miny, maxy;
365 int cnt = 0;
366 edge e;
367 basic_block bb;
368 tree c0, c1;
369 gimple cond;
370 enum tree_code cmp;
371
372 /* Get rid of unnecessary casts, but preserve the value of
373 the expressions. */
374 STRIP_SIGN_NOPS (x);
375 STRIP_SIGN_NOPS (y);
376
377 mpz_init (bnds->below);
378 mpz_init (bnds->up);
379 mpz_init (offx);
380 mpz_init (offy);
381 split_to_var_and_offset (x, &varx, offx);
382 split_to_var_and_offset (y, &vary, offy);
383
384 if (!integer_zerop (varx)
385 && operand_equal_p (varx, vary, 0))
386 {
387 /* Special case VARX == VARY -- we just need to compare the
388 offsets. The matters are a bit more complicated in the
389 case addition of offsets may wrap. */
390 bound_difference_of_offsetted_base (type, offx, offy, bnds);
391 }
392 else
393 {
394 /* Otherwise, use the value ranges to determine the initial
395 estimates on below and up. */
396 mpz_init (minx);
397 mpz_init (maxx);
398 mpz_init (miny);
399 mpz_init (maxy);
400 determine_value_range (type, varx, offx, minx, maxx);
401 determine_value_range (type, vary, offy, miny, maxy);
402
403 mpz_sub (bnds->below, minx, maxy);
404 mpz_sub (bnds->up, maxx, miny);
405 mpz_clear (minx);
406 mpz_clear (maxx);
407 mpz_clear (miny);
408 mpz_clear (maxy);
409 }
410
411 /* If both X and Y are constants, we cannot get any more precise. */
412 if (integer_zerop (varx) && integer_zerop (vary))
413 goto end;
414
415 /* Now walk the dominators of the loop header and use the entry
416 guards to refine the estimates. */
417 for (bb = loop->header;
418 bb != ENTRY_BLOCK_PTR && cnt < MAX_DOMINATORS_TO_WALK;
419 bb = get_immediate_dominator (CDI_DOMINATORS, bb))
420 {
421 if (!single_pred_p (bb))
422 continue;
423 e = single_pred_edge (bb);
424
425 if (!(e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
426 continue;
427
428 cond = last_stmt (e->src);
429 c0 = gimple_cond_lhs (cond);
430 cmp = gimple_cond_code (cond);
431 c1 = gimple_cond_rhs (cond);
432
433 if (e->flags & EDGE_FALSE_VALUE)
434 cmp = invert_tree_comparison (cmp, false);
435
436 refine_bounds_using_guard (type, varx, offx, vary, offy,
437 c0, cmp, c1, bnds);
438 ++cnt;
439 }
440
441 end:
442 mpz_clear (offx);
443 mpz_clear (offy);
444 }
445
446 /* Update the bounds in BNDS that restrict the value of X to the bounds
447 that restrict the value of X + DELTA. X can be obtained as a
448 difference of two values in TYPE. */
449
450 static void
451 bounds_add (bounds *bnds, double_int delta, tree type)
452 {
453 mpz_t mdelta, max;
454
455 mpz_init (mdelta);
456 mpz_set_double_int (mdelta, delta, false);
457
458 mpz_init (max);
459 mpz_set_double_int (max, double_int::mask (TYPE_PRECISION (type)), true);
460
461 mpz_add (bnds->up, bnds->up, mdelta);
462 mpz_add (bnds->below, bnds->below, mdelta);
463
464 if (mpz_cmp (bnds->up, max) > 0)
465 mpz_set (bnds->up, max);
466
467 mpz_neg (max, max);
468 if (mpz_cmp (bnds->below, max) < 0)
469 mpz_set (bnds->below, max);
470
471 mpz_clear (mdelta);
472 mpz_clear (max);
473 }
474
475 /* Update the bounds in BNDS that restrict the value of X to the bounds
476 that restrict the value of -X. */
477
478 static void
479 bounds_negate (bounds *bnds)
480 {
481 mpz_t tmp;
482
483 mpz_init_set (tmp, bnds->up);
484 mpz_neg (bnds->up, bnds->below);
485 mpz_neg (bnds->below, tmp);
486 mpz_clear (tmp);
487 }
488
489 /* Returns inverse of X modulo 2^s, where MASK = 2^s-1. */
490
491 static tree
492 inverse (tree x, tree mask)
493 {
494 tree type = TREE_TYPE (x);
495 tree rslt;
496 unsigned ctr = tree_floor_log2 (mask);
497
498 if (TYPE_PRECISION (type) <= HOST_BITS_PER_WIDE_INT)
499 {
500 unsigned HOST_WIDE_INT ix;
501 unsigned HOST_WIDE_INT imask;
502 unsigned HOST_WIDE_INT irslt = 1;
503
504 gcc_assert (cst_and_fits_in_hwi (x));
505 gcc_assert (cst_and_fits_in_hwi (mask));
506
507 ix = int_cst_value (x);
508 imask = int_cst_value (mask);
509
510 for (; ctr; ctr--)
511 {
512 irslt *= ix;
513 ix *= ix;
514 }
515 irslt &= imask;
516
517 rslt = build_int_cst_type (type, irslt);
518 }
519 else
520 {
521 rslt = build_int_cst (type, 1);
522 for (; ctr; ctr--)
523 {
524 rslt = int_const_binop (MULT_EXPR, rslt, x);
525 x = int_const_binop (MULT_EXPR, x, x);
526 }
527 rslt = int_const_binop (BIT_AND_EXPR, rslt, mask);
528 }
529
530 return rslt;
531 }
532
533 /* Derives the upper bound BND on the number of executions of loop with exit
534 condition S * i <> C. If NO_OVERFLOW is true, then the control variable of
535 the loop does not overflow. EXIT_MUST_BE_TAKEN is true if we are guaranteed
536 that the loop ends through this exit, i.e., the induction variable ever
537 reaches the value of C.
538
539 The value C is equal to final - base, where final and base are the final and
540 initial value of the actual induction variable in the analysed loop. BNDS
541 bounds the value of this difference when computed in signed type with
542 unbounded range, while the computation of C is performed in an unsigned
543 type with the range matching the range of the type of the induction variable.
544 In particular, BNDS.up contains an upper bound on C in the following cases:
545 -- if the iv must reach its final value without overflow, i.e., if
546 NO_OVERFLOW && EXIT_MUST_BE_TAKEN is true, or
547 -- if final >= base, which we know to hold when BNDS.below >= 0. */
548
549 static void
550 number_of_iterations_ne_max (mpz_t bnd, bool no_overflow, tree c, tree s,
551 bounds *bnds, bool exit_must_be_taken)
552 {
553 double_int max;
554 mpz_t d;
555 tree type = TREE_TYPE (c);
556 bool bnds_u_valid = ((no_overflow && exit_must_be_taken)
557 || mpz_sgn (bnds->below) >= 0);
558
559 if (integer_onep (s)
560 || (TREE_CODE (c) == INTEGER_CST
561 && TREE_CODE (s) == INTEGER_CST
562 && tree_to_double_int (c).mod (tree_to_double_int (s),
563 TYPE_UNSIGNED (type),
564 EXACT_DIV_EXPR).is_zero ())
565 || (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (c))
566 && multiple_of_p (type, c, s)))
567 {
568 /* If C is an exact multiple of S, then its value will be reached before
569 the induction variable overflows (unless the loop is exited in some
570 other way before). Note that the actual induction variable in the
571 loop (which ranges from base to final instead of from 0 to C) may
572 overflow, in which case BNDS.up will not be giving a correct upper
573 bound on C; thus, BNDS_U_VALID had to be computed in advance. */
574 no_overflow = true;
575 exit_must_be_taken = true;
576 }
577
578 /* If the induction variable can overflow, the number of iterations is at
579 most the period of the control variable (or infinite, but in that case
580 the whole # of iterations analysis will fail). */
581 if (!no_overflow)
582 {
583 max = double_int::mask (TYPE_PRECISION (type)
584 - tree_low_cst (num_ending_zeros (s), 1));
585 mpz_set_double_int (bnd, max, true);
586 return;
587 }
588
589 /* Now we know that the induction variable does not overflow, so the loop
590 iterates at most (range of type / S) times. */
591 mpz_set_double_int (bnd, double_int::mask (TYPE_PRECISION (type)), true);
592
593 /* If the induction variable is guaranteed to reach the value of C before
594 overflow, ... */
595 if (exit_must_be_taken)
596 {
597 /* ... then we can strengthen this to C / S, and possibly we can use
598 the upper bound on C given by BNDS. */
599 if (TREE_CODE (c) == INTEGER_CST)
600 mpz_set_double_int (bnd, tree_to_double_int (c), true);
601 else if (bnds_u_valid)
602 mpz_set (bnd, bnds->up);
603 }
604
605 mpz_init (d);
606 mpz_set_double_int (d, tree_to_double_int (s), true);
607 mpz_fdiv_q (bnd, bnd, d);
608 mpz_clear (d);
609 }
610
611 /* Determines number of iterations of loop whose ending condition
612 is IV <> FINAL. TYPE is the type of the iv. The number of
613 iterations is stored to NITER. EXIT_MUST_BE_TAKEN is true if
614 we know that the exit must be taken eventually, i.e., that the IV
615 ever reaches the value FINAL (we derived this earlier, and possibly set
616 NITER->assumptions to make sure this is the case). BNDS contains the
617 bounds on the difference FINAL - IV->base. */
618
619 static bool
620 number_of_iterations_ne (tree type, affine_iv *iv, tree final,
621 struct tree_niter_desc *niter, bool exit_must_be_taken,
622 bounds *bnds)
623 {
624 tree niter_type = unsigned_type_for (type);
625 tree s, c, d, bits, assumption, tmp, bound;
626 mpz_t max;
627
628 niter->control = *iv;
629 niter->bound = final;
630 niter->cmp = NE_EXPR;
631
632 /* Rearrange the terms so that we get inequality S * i <> C, with S
633 positive. Also cast everything to the unsigned type. If IV does
634 not overflow, BNDS bounds the value of C. Also, this is the
635 case if the computation |FINAL - IV->base| does not overflow, i.e.,
636 if BNDS->below in the result is nonnegative. */
637 if (tree_int_cst_sign_bit (iv->step))
638 {
639 s = fold_convert (niter_type,
640 fold_build1 (NEGATE_EXPR, type, iv->step));
641 c = fold_build2 (MINUS_EXPR, niter_type,
642 fold_convert (niter_type, iv->base),
643 fold_convert (niter_type, final));
644 bounds_negate (bnds);
645 }
646 else
647 {
648 s = fold_convert (niter_type, iv->step);
649 c = fold_build2 (MINUS_EXPR, niter_type,
650 fold_convert (niter_type, final),
651 fold_convert (niter_type, iv->base));
652 }
653
654 mpz_init (max);
655 number_of_iterations_ne_max (max, iv->no_overflow, c, s, bnds,
656 exit_must_be_taken);
657 niter->max = mpz_get_double_int (niter_type, max, false);
658 mpz_clear (max);
659
660 /* First the trivial cases -- when the step is 1. */
661 if (integer_onep (s))
662 {
663 niter->niter = c;
664 return true;
665 }
666
667 /* Let nsd (step, size of mode) = d. If d does not divide c, the loop
668 is infinite. Otherwise, the number of iterations is
669 (inverse(s/d) * (c/d)) mod (size of mode/d). */
670 bits = num_ending_zeros (s);
671 bound = build_low_bits_mask (niter_type,
672 (TYPE_PRECISION (niter_type)
673 - tree_low_cst (bits, 1)));
674
675 d = fold_binary_to_constant (LSHIFT_EXPR, niter_type,
676 build_int_cst (niter_type, 1), bits);
677 s = fold_binary_to_constant (RSHIFT_EXPR, niter_type, s, bits);
678
679 if (!exit_must_be_taken)
680 {
681 /* If we cannot assume that the exit is taken eventually, record the
682 assumptions for divisibility of c. */
683 assumption = fold_build2 (FLOOR_MOD_EXPR, niter_type, c, d);
684 assumption = fold_build2 (EQ_EXPR, boolean_type_node,
685 assumption, build_int_cst (niter_type, 0));
686 if (!integer_nonzerop (assumption))
687 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
688 niter->assumptions, assumption);
689 }
690
691 c = fold_build2 (EXACT_DIV_EXPR, niter_type, c, d);
692 tmp = fold_build2 (MULT_EXPR, niter_type, c, inverse (s, bound));
693 niter->niter = fold_build2 (BIT_AND_EXPR, niter_type, tmp, bound);
694 return true;
695 }
696
697 /* Checks whether we can determine the final value of the control variable
698 of the loop with ending condition IV0 < IV1 (computed in TYPE).
699 DELTA is the difference IV1->base - IV0->base, STEP is the absolute value
700 of the step. The assumptions necessary to ensure that the computation
701 of the final value does not overflow are recorded in NITER. If we
702 find the final value, we adjust DELTA and return TRUE. Otherwise
703 we return false. BNDS bounds the value of IV1->base - IV0->base,
704 and will be updated by the same amount as DELTA. EXIT_MUST_BE_TAKEN is
705 true if we know that the exit must be taken eventually. */
706
707 static bool
708 number_of_iterations_lt_to_ne (tree type, affine_iv *iv0, affine_iv *iv1,
709 struct tree_niter_desc *niter,
710 tree *delta, tree step,
711 bool exit_must_be_taken, bounds *bnds)
712 {
713 tree niter_type = TREE_TYPE (step);
714 tree mod = fold_build2 (FLOOR_MOD_EXPR, niter_type, *delta, step);
715 tree tmod;
716 mpz_t mmod;
717 tree assumption = boolean_true_node, bound, noloop;
718 bool ret = false, fv_comp_no_overflow;
719 tree type1 = type;
720 if (POINTER_TYPE_P (type))
721 type1 = sizetype;
722
723 if (TREE_CODE (mod) != INTEGER_CST)
724 return false;
725 if (integer_nonzerop (mod))
726 mod = fold_build2 (MINUS_EXPR, niter_type, step, mod);
727 tmod = fold_convert (type1, mod);
728
729 mpz_init (mmod);
730 mpz_set_double_int (mmod, tree_to_double_int (mod), true);
731 mpz_neg (mmod, mmod);
732
733 /* If the induction variable does not overflow and the exit is taken,
734 then the computation of the final value does not overflow. This is
735 also obviously the case if the new final value is equal to the
736 current one. Finally, we postulate this for pointer type variables,
737 as the code cannot rely on the object to that the pointer points being
738 placed at the end of the address space (and more pragmatically,
739 TYPE_{MIN,MAX}_VALUE is not defined for pointers). */
740 if (integer_zerop (mod) || POINTER_TYPE_P (type))
741 fv_comp_no_overflow = true;
742 else if (!exit_must_be_taken)
743 fv_comp_no_overflow = false;
744 else
745 fv_comp_no_overflow =
746 (iv0->no_overflow && integer_nonzerop (iv0->step))
747 || (iv1->no_overflow && integer_nonzerop (iv1->step));
748
749 if (integer_nonzerop (iv0->step))
750 {
751 /* The final value of the iv is iv1->base + MOD, assuming that this
752 computation does not overflow, and that
753 iv0->base <= iv1->base + MOD. */
754 if (!fv_comp_no_overflow)
755 {
756 bound = fold_build2 (MINUS_EXPR, type1,
757 TYPE_MAX_VALUE (type1), tmod);
758 assumption = fold_build2 (LE_EXPR, boolean_type_node,
759 iv1->base, bound);
760 if (integer_zerop (assumption))
761 goto end;
762 }
763 if (mpz_cmp (mmod, bnds->below) < 0)
764 noloop = boolean_false_node;
765 else if (POINTER_TYPE_P (type))
766 noloop = fold_build2 (GT_EXPR, boolean_type_node,
767 iv0->base,
768 fold_build_pointer_plus (iv1->base, tmod));
769 else
770 noloop = fold_build2 (GT_EXPR, boolean_type_node,
771 iv0->base,
772 fold_build2 (PLUS_EXPR, type1,
773 iv1->base, tmod));
774 }
775 else
776 {
777 /* The final value of the iv is iv0->base - MOD, assuming that this
778 computation does not overflow, and that
779 iv0->base - MOD <= iv1->base. */
780 if (!fv_comp_no_overflow)
781 {
782 bound = fold_build2 (PLUS_EXPR, type1,
783 TYPE_MIN_VALUE (type1), tmod);
784 assumption = fold_build2 (GE_EXPR, boolean_type_node,
785 iv0->base, bound);
786 if (integer_zerop (assumption))
787 goto end;
788 }
789 if (mpz_cmp (mmod, bnds->below) < 0)
790 noloop = boolean_false_node;
791 else if (POINTER_TYPE_P (type))
792 noloop = fold_build2 (GT_EXPR, boolean_type_node,
793 fold_build_pointer_plus (iv0->base,
794 fold_build1 (NEGATE_EXPR,
795 type1, tmod)),
796 iv1->base);
797 else
798 noloop = fold_build2 (GT_EXPR, boolean_type_node,
799 fold_build2 (MINUS_EXPR, type1,
800 iv0->base, tmod),
801 iv1->base);
802 }
803
804 if (!integer_nonzerop (assumption))
805 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
806 niter->assumptions,
807 assumption);
808 if (!integer_zerop (noloop))
809 niter->may_be_zero = fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
810 niter->may_be_zero,
811 noloop);
812 bounds_add (bnds, tree_to_double_int (mod), type);
813 *delta = fold_build2 (PLUS_EXPR, niter_type, *delta, mod);
814
815 ret = true;
816 end:
817 mpz_clear (mmod);
818 return ret;
819 }
820
821 /* Add assertions to NITER that ensure that the control variable of the loop
822 with ending condition IV0 < IV1 does not overflow. Types of IV0 and IV1
823 are TYPE. Returns false if we can prove that there is an overflow, true
824 otherwise. STEP is the absolute value of the step. */
825
826 static bool
827 assert_no_overflow_lt (tree type, affine_iv *iv0, affine_iv *iv1,
828 struct tree_niter_desc *niter, tree step)
829 {
830 tree bound, d, assumption, diff;
831 tree niter_type = TREE_TYPE (step);
832
833 if (integer_nonzerop (iv0->step))
834 {
835 /* for (i = iv0->base; i < iv1->base; i += iv0->step) */
836 if (iv0->no_overflow)
837 return true;
838
839 /* If iv0->base is a constant, we can determine the last value before
840 overflow precisely; otherwise we conservatively assume
841 MAX - STEP + 1. */
842
843 if (TREE_CODE (iv0->base) == INTEGER_CST)
844 {
845 d = fold_build2 (MINUS_EXPR, niter_type,
846 fold_convert (niter_type, TYPE_MAX_VALUE (type)),
847 fold_convert (niter_type, iv0->base));
848 diff = fold_build2 (FLOOR_MOD_EXPR, niter_type, d, step);
849 }
850 else
851 diff = fold_build2 (MINUS_EXPR, niter_type, step,
852 build_int_cst (niter_type, 1));
853 bound = fold_build2 (MINUS_EXPR, type,
854 TYPE_MAX_VALUE (type), fold_convert (type, diff));
855 assumption = fold_build2 (LE_EXPR, boolean_type_node,
856 iv1->base, bound);
857 }
858 else
859 {
860 /* for (i = iv1->base; i > iv0->base; i += iv1->step) */
861 if (iv1->no_overflow)
862 return true;
863
864 if (TREE_CODE (iv1->base) == INTEGER_CST)
865 {
866 d = fold_build2 (MINUS_EXPR, niter_type,
867 fold_convert (niter_type, iv1->base),
868 fold_convert (niter_type, TYPE_MIN_VALUE (type)));
869 diff = fold_build2 (FLOOR_MOD_EXPR, niter_type, d, step);
870 }
871 else
872 diff = fold_build2 (MINUS_EXPR, niter_type, step,
873 build_int_cst (niter_type, 1));
874 bound = fold_build2 (PLUS_EXPR, type,
875 TYPE_MIN_VALUE (type), fold_convert (type, diff));
876 assumption = fold_build2 (GE_EXPR, boolean_type_node,
877 iv0->base, bound);
878 }
879
880 if (integer_zerop (assumption))
881 return false;
882 if (!integer_nonzerop (assumption))
883 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
884 niter->assumptions, assumption);
885
886 iv0->no_overflow = true;
887 iv1->no_overflow = true;
888 return true;
889 }
890
891 /* Add an assumption to NITER that a loop whose ending condition
892 is IV0 < IV1 rolls. TYPE is the type of the control iv. BNDS
893 bounds the value of IV1->base - IV0->base. */
894
895 static void
896 assert_loop_rolls_lt (tree type, affine_iv *iv0, affine_iv *iv1,
897 struct tree_niter_desc *niter, bounds *bnds)
898 {
899 tree assumption = boolean_true_node, bound, diff;
900 tree mbz, mbzl, mbzr, type1;
901 bool rolls_p, no_overflow_p;
902 double_int dstep;
903 mpz_t mstep, max;
904
905 /* We are going to compute the number of iterations as
906 (iv1->base - iv0->base + step - 1) / step, computed in the unsigned
907 variant of TYPE. This formula only works if
908
909 -step + 1 <= (iv1->base - iv0->base) <= MAX - step + 1
910
911 (where MAX is the maximum value of the unsigned variant of TYPE, and
912 the computations in this formula are performed in full precision,
913 i.e., without overflows).
914
915 Usually, for loops with exit condition iv0->base + step * i < iv1->base,
916 we have a condition of the form iv0->base - step < iv1->base before the loop,
917 and for loops iv0->base < iv1->base - step * i the condition
918 iv0->base < iv1->base + step, due to loop header copying, which enable us
919 to prove the lower bound.
920
921 The upper bound is more complicated. Unless the expressions for initial
922 and final value themselves contain enough information, we usually cannot
923 derive it from the context. */
924
925 /* First check whether the answer does not follow from the bounds we gathered
926 before. */
927 if (integer_nonzerop (iv0->step))
928 dstep = tree_to_double_int (iv0->step);
929 else
930 {
931 dstep = tree_to_double_int (iv1->step).sext (TYPE_PRECISION (type));
932 dstep = -dstep;
933 }
934
935 mpz_init (mstep);
936 mpz_set_double_int (mstep, dstep, true);
937 mpz_neg (mstep, mstep);
938 mpz_add_ui (mstep, mstep, 1);
939
940 rolls_p = mpz_cmp (mstep, bnds->below) <= 0;
941
942 mpz_init (max);
943 mpz_set_double_int (max, double_int::mask (TYPE_PRECISION (type)), true);
944 mpz_add (max, max, mstep);
945 no_overflow_p = (mpz_cmp (bnds->up, max) <= 0
946 /* For pointers, only values lying inside a single object
947 can be compared or manipulated by pointer arithmetics.
948 Gcc in general does not allow or handle objects larger
949 than half of the address space, hence the upper bound
950 is satisfied for pointers. */
951 || POINTER_TYPE_P (type));
952 mpz_clear (mstep);
953 mpz_clear (max);
954
955 if (rolls_p && no_overflow_p)
956 return;
957
958 type1 = type;
959 if (POINTER_TYPE_P (type))
960 type1 = sizetype;
961
962 /* Now the hard part; we must formulate the assumption(s) as expressions, and
963 we must be careful not to introduce overflow. */
964
965 if (integer_nonzerop (iv0->step))
966 {
967 diff = fold_build2 (MINUS_EXPR, type1,
968 iv0->step, build_int_cst (type1, 1));
969
970 /* We need to know that iv0->base >= MIN + iv0->step - 1. Since
971 0 address never belongs to any object, we can assume this for
972 pointers. */
973 if (!POINTER_TYPE_P (type))
974 {
975 bound = fold_build2 (PLUS_EXPR, type1,
976 TYPE_MIN_VALUE (type), diff);
977 assumption = fold_build2 (GE_EXPR, boolean_type_node,
978 iv0->base, bound);
979 }
980
981 /* And then we can compute iv0->base - diff, and compare it with
982 iv1->base. */
983 mbzl = fold_build2 (MINUS_EXPR, type1,
984 fold_convert (type1, iv0->base), diff);
985 mbzr = fold_convert (type1, iv1->base);
986 }
987 else
988 {
989 diff = fold_build2 (PLUS_EXPR, type1,
990 iv1->step, build_int_cst (type1, 1));
991
992 if (!POINTER_TYPE_P (type))
993 {
994 bound = fold_build2 (PLUS_EXPR, type1,
995 TYPE_MAX_VALUE (type), diff);
996 assumption = fold_build2 (LE_EXPR, boolean_type_node,
997 iv1->base, bound);
998 }
999
1000 mbzl = fold_convert (type1, iv0->base);
1001 mbzr = fold_build2 (MINUS_EXPR, type1,
1002 fold_convert (type1, iv1->base), diff);
1003 }
1004
1005 if (!integer_nonzerop (assumption))
1006 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1007 niter->assumptions, assumption);
1008 if (!rolls_p)
1009 {
1010 mbz = fold_build2 (GT_EXPR, boolean_type_node, mbzl, mbzr);
1011 niter->may_be_zero = fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
1012 niter->may_be_zero, mbz);
1013 }
1014 }
1015
1016 /* Determines number of iterations of loop whose ending condition
1017 is IV0 < IV1. TYPE is the type of the iv. The number of
1018 iterations is stored to NITER. BNDS bounds the difference
1019 IV1->base - IV0->base. EXIT_MUST_BE_TAKEN is true if we know
1020 that the exit must be taken eventually. */
1021
1022 static bool
1023 number_of_iterations_lt (tree type, affine_iv *iv0, affine_iv *iv1,
1024 struct tree_niter_desc *niter,
1025 bool exit_must_be_taken, bounds *bnds)
1026 {
1027 tree niter_type = unsigned_type_for (type);
1028 tree delta, step, s;
1029 mpz_t mstep, tmp;
1030
1031 if (integer_nonzerop (iv0->step))
1032 {
1033 niter->control = *iv0;
1034 niter->cmp = LT_EXPR;
1035 niter->bound = iv1->base;
1036 }
1037 else
1038 {
1039 niter->control = *iv1;
1040 niter->cmp = GT_EXPR;
1041 niter->bound = iv0->base;
1042 }
1043
1044 delta = fold_build2 (MINUS_EXPR, niter_type,
1045 fold_convert (niter_type, iv1->base),
1046 fold_convert (niter_type, iv0->base));
1047
1048 /* First handle the special case that the step is +-1. */
1049 if ((integer_onep (iv0->step) && integer_zerop (iv1->step))
1050 || (integer_all_onesp (iv1->step) && integer_zerop (iv0->step)))
1051 {
1052 /* for (i = iv0->base; i < iv1->base; i++)
1053
1054 or
1055
1056 for (i = iv1->base; i > iv0->base; i--).
1057
1058 In both cases # of iterations is iv1->base - iv0->base, assuming that
1059 iv1->base >= iv0->base.
1060
1061 First try to derive a lower bound on the value of
1062 iv1->base - iv0->base, computed in full precision. If the difference
1063 is nonnegative, we are done, otherwise we must record the
1064 condition. */
1065
1066 if (mpz_sgn (bnds->below) < 0)
1067 niter->may_be_zero = fold_build2 (LT_EXPR, boolean_type_node,
1068 iv1->base, iv0->base);
1069 niter->niter = delta;
1070 niter->max = mpz_get_double_int (niter_type, bnds->up, false);
1071 return true;
1072 }
1073
1074 if (integer_nonzerop (iv0->step))
1075 step = fold_convert (niter_type, iv0->step);
1076 else
1077 step = fold_convert (niter_type,
1078 fold_build1 (NEGATE_EXPR, type, iv1->step));
1079
1080 /* If we can determine the final value of the control iv exactly, we can
1081 transform the condition to != comparison. In particular, this will be
1082 the case if DELTA is constant. */
1083 if (number_of_iterations_lt_to_ne (type, iv0, iv1, niter, &delta, step,
1084 exit_must_be_taken, bnds))
1085 {
1086 affine_iv zps;
1087
1088 zps.base = build_int_cst (niter_type, 0);
1089 zps.step = step;
1090 /* number_of_iterations_lt_to_ne will add assumptions that ensure that
1091 zps does not overflow. */
1092 zps.no_overflow = true;
1093
1094 return number_of_iterations_ne (type, &zps, delta, niter, true, bnds);
1095 }
1096
1097 /* Make sure that the control iv does not overflow. */
1098 if (!assert_no_overflow_lt (type, iv0, iv1, niter, step))
1099 return false;
1100
1101 /* We determine the number of iterations as (delta + step - 1) / step. For
1102 this to work, we must know that iv1->base >= iv0->base - step + 1,
1103 otherwise the loop does not roll. */
1104 assert_loop_rolls_lt (type, iv0, iv1, niter, bnds);
1105
1106 s = fold_build2 (MINUS_EXPR, niter_type,
1107 step, build_int_cst (niter_type, 1));
1108 delta = fold_build2 (PLUS_EXPR, niter_type, delta, s);
1109 niter->niter = fold_build2 (FLOOR_DIV_EXPR, niter_type, delta, step);
1110
1111 mpz_init (mstep);
1112 mpz_init (tmp);
1113 mpz_set_double_int (mstep, tree_to_double_int (step), true);
1114 mpz_add (tmp, bnds->up, mstep);
1115 mpz_sub_ui (tmp, tmp, 1);
1116 mpz_fdiv_q (tmp, tmp, mstep);
1117 niter->max = mpz_get_double_int (niter_type, tmp, false);
1118 mpz_clear (mstep);
1119 mpz_clear (tmp);
1120
1121 return true;
1122 }
1123
1124 /* Determines number of iterations of loop whose ending condition
1125 is IV0 <= IV1. TYPE is the type of the iv. The number of
1126 iterations is stored to NITER. EXIT_MUST_BE_TAKEN is true if
1127 we know that this condition must eventually become false (we derived this
1128 earlier, and possibly set NITER->assumptions to make sure this
1129 is the case). BNDS bounds the difference IV1->base - IV0->base. */
1130
1131 static bool
1132 number_of_iterations_le (tree type, affine_iv *iv0, affine_iv *iv1,
1133 struct tree_niter_desc *niter, bool exit_must_be_taken,
1134 bounds *bnds)
1135 {
1136 tree assumption;
1137 tree type1 = type;
1138 if (POINTER_TYPE_P (type))
1139 type1 = sizetype;
1140
1141 /* Say that IV0 is the control variable. Then IV0 <= IV1 iff
1142 IV0 < IV1 + 1, assuming that IV1 is not equal to the greatest
1143 value of the type. This we must know anyway, since if it is
1144 equal to this value, the loop rolls forever. We do not check
1145 this condition for pointer type ivs, as the code cannot rely on
1146 the object to that the pointer points being placed at the end of
1147 the address space (and more pragmatically, TYPE_{MIN,MAX}_VALUE is
1148 not defined for pointers). */
1149
1150 if (!exit_must_be_taken && !POINTER_TYPE_P (type))
1151 {
1152 if (integer_nonzerop (iv0->step))
1153 assumption = fold_build2 (NE_EXPR, boolean_type_node,
1154 iv1->base, TYPE_MAX_VALUE (type));
1155 else
1156 assumption = fold_build2 (NE_EXPR, boolean_type_node,
1157 iv0->base, TYPE_MIN_VALUE (type));
1158
1159 if (integer_zerop (assumption))
1160 return false;
1161 if (!integer_nonzerop (assumption))
1162 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1163 niter->assumptions, assumption);
1164 }
1165
1166 if (integer_nonzerop (iv0->step))
1167 {
1168 if (POINTER_TYPE_P (type))
1169 iv1->base = fold_build_pointer_plus_hwi (iv1->base, 1);
1170 else
1171 iv1->base = fold_build2 (PLUS_EXPR, type1, iv1->base,
1172 build_int_cst (type1, 1));
1173 }
1174 else if (POINTER_TYPE_P (type))
1175 iv0->base = fold_build_pointer_plus_hwi (iv0->base, -1);
1176 else
1177 iv0->base = fold_build2 (MINUS_EXPR, type1,
1178 iv0->base, build_int_cst (type1, 1));
1179
1180 bounds_add (bnds, double_int_one, type1);
1181
1182 return number_of_iterations_lt (type, iv0, iv1, niter, exit_must_be_taken,
1183 bnds);
1184 }
1185
1186 /* Dumps description of affine induction variable IV to FILE. */
1187
1188 static void
1189 dump_affine_iv (FILE *file, affine_iv *iv)
1190 {
1191 if (!integer_zerop (iv->step))
1192 fprintf (file, "[");
1193
1194 print_generic_expr (dump_file, iv->base, TDF_SLIM);
1195
1196 if (!integer_zerop (iv->step))
1197 {
1198 fprintf (file, ", + , ");
1199 print_generic_expr (dump_file, iv->step, TDF_SLIM);
1200 fprintf (file, "]%s", iv->no_overflow ? "(no_overflow)" : "");
1201 }
1202 }
1203
1204 /* Determine the number of iterations according to condition (for staying
1205 inside loop) which compares two induction variables using comparison
1206 operator CODE. The induction variable on left side of the comparison
1207 is IV0, the right-hand side is IV1. Both induction variables must have
1208 type TYPE, which must be an integer or pointer type. The steps of the
1209 ivs must be constants (or NULL_TREE, which is interpreted as constant zero).
1210
1211 LOOP is the loop whose number of iterations we are determining.
1212
1213 ONLY_EXIT is true if we are sure this is the only way the loop could be
1214 exited (including possibly non-returning function calls, exceptions, etc.)
1215 -- in this case we can use the information whether the control induction
1216 variables can overflow or not in a more efficient way.
1217
1218 if EVERY_ITERATION is true, we know the test is executed on every iteration.
1219
1220 The results (number of iterations and assumptions as described in
1221 comments at struct tree_niter_desc in tree-flow.h) are stored to NITER.
1222 Returns false if it fails to determine number of iterations, true if it
1223 was determined (possibly with some assumptions). */
1224
1225 static bool
1226 number_of_iterations_cond (struct loop *loop,
1227 tree type, affine_iv *iv0, enum tree_code code,
1228 affine_iv *iv1, struct tree_niter_desc *niter,
1229 bool only_exit, bool every_iteration)
1230 {
1231 bool exit_must_be_taken = false, ret;
1232 bounds bnds;
1233
1234 /* If the test is not executed every iteration, wrapping may make the test
1235 to pass again.
1236 TODO: the overflow case can be still used as unreliable estimate of upper
1237 bound. But we have no API to pass it down to number of iterations code
1238 and, at present, it will not use it anyway. */
1239 if (!every_iteration
1240 && (!iv0->no_overflow || !iv1->no_overflow
1241 || code == NE_EXPR || code == EQ_EXPR))
1242 return false;
1243
1244 /* The meaning of these assumptions is this:
1245 if !assumptions
1246 then the rest of information does not have to be valid
1247 if may_be_zero then the loop does not roll, even if
1248 niter != 0. */
1249 niter->assumptions = boolean_true_node;
1250 niter->may_be_zero = boolean_false_node;
1251 niter->niter = NULL_TREE;
1252 niter->max = double_int_zero;
1253
1254 niter->bound = NULL_TREE;
1255 niter->cmp = ERROR_MARK;
1256
1257 /* Make < comparison from > ones, and for NE_EXPR comparisons, ensure that
1258 the control variable is on lhs. */
1259 if (code == GE_EXPR || code == GT_EXPR
1260 || (code == NE_EXPR && integer_zerop (iv0->step)))
1261 {
1262 SWAP (iv0, iv1);
1263 code = swap_tree_comparison (code);
1264 }
1265
1266 if (POINTER_TYPE_P (type))
1267 {
1268 /* Comparison of pointers is undefined unless both iv0 and iv1 point
1269 to the same object. If they do, the control variable cannot wrap
1270 (as wrap around the bounds of memory will never return a pointer
1271 that would be guaranteed to point to the same object, even if we
1272 avoid undefined behavior by casting to size_t and back). */
1273 iv0->no_overflow = true;
1274 iv1->no_overflow = true;
1275 }
1276
1277 /* If the control induction variable does not overflow and the only exit
1278 from the loop is the one that we analyze, we know it must be taken
1279 eventually. */
1280 if (only_exit)
1281 {
1282 if (!integer_zerop (iv0->step) && iv0->no_overflow)
1283 exit_must_be_taken = true;
1284 else if (!integer_zerop (iv1->step) && iv1->no_overflow)
1285 exit_must_be_taken = true;
1286 }
1287
1288 /* We can handle the case when neither of the sides of the comparison is
1289 invariant, provided that the test is NE_EXPR. This rarely occurs in
1290 practice, but it is simple enough to manage. */
1291 if (!integer_zerop (iv0->step) && !integer_zerop (iv1->step))
1292 {
1293 tree step_type = POINTER_TYPE_P (type) ? sizetype : type;
1294 if (code != NE_EXPR)
1295 return false;
1296
1297 iv0->step = fold_binary_to_constant (MINUS_EXPR, step_type,
1298 iv0->step, iv1->step);
1299 iv0->no_overflow = false;
1300 iv1->step = build_int_cst (step_type, 0);
1301 iv1->no_overflow = true;
1302 }
1303
1304 /* If the result of the comparison is a constant, the loop is weird. More
1305 precise handling would be possible, but the situation is not common enough
1306 to waste time on it. */
1307 if (integer_zerop (iv0->step) && integer_zerop (iv1->step))
1308 return false;
1309
1310 /* Ignore loops of while (i-- < 10) type. */
1311 if (code != NE_EXPR)
1312 {
1313 if (iv0->step && tree_int_cst_sign_bit (iv0->step))
1314 return false;
1315
1316 if (!integer_zerop (iv1->step) && !tree_int_cst_sign_bit (iv1->step))
1317 return false;
1318 }
1319
1320 /* If the loop exits immediately, there is nothing to do. */
1321 tree tem = fold_binary (code, boolean_type_node, iv0->base, iv1->base);
1322 if (tem && integer_zerop (tem))
1323 {
1324 niter->niter = build_int_cst (unsigned_type_for (type), 0);
1325 niter->max = double_int_zero;
1326 return true;
1327 }
1328
1329 /* OK, now we know we have a senseful loop. Handle several cases, depending
1330 on what comparison operator is used. */
1331 bound_difference (loop, iv1->base, iv0->base, &bnds);
1332
1333 if (dump_file && (dump_flags & TDF_DETAILS))
1334 {
1335 fprintf (dump_file,
1336 "Analyzing # of iterations of loop %d\n", loop->num);
1337
1338 fprintf (dump_file, " exit condition ");
1339 dump_affine_iv (dump_file, iv0);
1340 fprintf (dump_file, " %s ",
1341 code == NE_EXPR ? "!="
1342 : code == LT_EXPR ? "<"
1343 : "<=");
1344 dump_affine_iv (dump_file, iv1);
1345 fprintf (dump_file, "\n");
1346
1347 fprintf (dump_file, " bounds on difference of bases: ");
1348 mpz_out_str (dump_file, 10, bnds.below);
1349 fprintf (dump_file, " ... ");
1350 mpz_out_str (dump_file, 10, bnds.up);
1351 fprintf (dump_file, "\n");
1352 }
1353
1354 switch (code)
1355 {
1356 case NE_EXPR:
1357 gcc_assert (integer_zerop (iv1->step));
1358 ret = number_of_iterations_ne (type, iv0, iv1->base, niter,
1359 exit_must_be_taken, &bnds);
1360 break;
1361
1362 case LT_EXPR:
1363 ret = number_of_iterations_lt (type, iv0, iv1, niter, exit_must_be_taken,
1364 &bnds);
1365 break;
1366
1367 case LE_EXPR:
1368 ret = number_of_iterations_le (type, iv0, iv1, niter, exit_must_be_taken,
1369 &bnds);
1370 break;
1371
1372 default:
1373 gcc_unreachable ();
1374 }
1375
1376 mpz_clear (bnds.up);
1377 mpz_clear (bnds.below);
1378
1379 if (dump_file && (dump_flags & TDF_DETAILS))
1380 {
1381 if (ret)
1382 {
1383 fprintf (dump_file, " result:\n");
1384 if (!integer_nonzerop (niter->assumptions))
1385 {
1386 fprintf (dump_file, " under assumptions ");
1387 print_generic_expr (dump_file, niter->assumptions, TDF_SLIM);
1388 fprintf (dump_file, "\n");
1389 }
1390
1391 if (!integer_zerop (niter->may_be_zero))
1392 {
1393 fprintf (dump_file, " zero if ");
1394 print_generic_expr (dump_file, niter->may_be_zero, TDF_SLIM);
1395 fprintf (dump_file, "\n");
1396 }
1397
1398 fprintf (dump_file, " # of iterations ");
1399 print_generic_expr (dump_file, niter->niter, TDF_SLIM);
1400 fprintf (dump_file, ", bounded by ");
1401 dump_double_int (dump_file, niter->max, true);
1402 fprintf (dump_file, "\n");
1403 }
1404 else
1405 fprintf (dump_file, " failed\n\n");
1406 }
1407 return ret;
1408 }
1409
1410 /* Substitute NEW for OLD in EXPR and fold the result. */
1411
1412 static tree
1413 simplify_replace_tree (tree expr, tree old, tree new_tree)
1414 {
1415 unsigned i, n;
1416 tree ret = NULL_TREE, e, se;
1417
1418 if (!expr)
1419 return NULL_TREE;
1420
1421 /* Do not bother to replace constants. */
1422 if (CONSTANT_CLASS_P (old))
1423 return expr;
1424
1425 if (expr == old
1426 || operand_equal_p (expr, old, 0))
1427 return unshare_expr (new_tree);
1428
1429 if (!EXPR_P (expr))
1430 return expr;
1431
1432 n = TREE_OPERAND_LENGTH (expr);
1433 for (i = 0; i < n; i++)
1434 {
1435 e = TREE_OPERAND (expr, i);
1436 se = simplify_replace_tree (e, old, new_tree);
1437 if (e == se)
1438 continue;
1439
1440 if (!ret)
1441 ret = copy_node (expr);
1442
1443 TREE_OPERAND (ret, i) = se;
1444 }
1445
1446 return (ret ? fold (ret) : expr);
1447 }
1448
1449 /* Expand definitions of ssa names in EXPR as long as they are simple
1450 enough, and return the new expression. */
1451
1452 tree
1453 expand_simple_operations (tree expr)
1454 {
1455 unsigned i, n;
1456 tree ret = NULL_TREE, e, ee, e1;
1457 enum tree_code code;
1458 gimple stmt;
1459
1460 if (expr == NULL_TREE)
1461 return expr;
1462
1463 if (is_gimple_min_invariant (expr))
1464 return expr;
1465
1466 code = TREE_CODE (expr);
1467 if (IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (code)))
1468 {
1469 n = TREE_OPERAND_LENGTH (expr);
1470 for (i = 0; i < n; i++)
1471 {
1472 e = TREE_OPERAND (expr, i);
1473 ee = expand_simple_operations (e);
1474 if (e == ee)
1475 continue;
1476
1477 if (!ret)
1478 ret = copy_node (expr);
1479
1480 TREE_OPERAND (ret, i) = ee;
1481 }
1482
1483 if (!ret)
1484 return expr;
1485
1486 fold_defer_overflow_warnings ();
1487 ret = fold (ret);
1488 fold_undefer_and_ignore_overflow_warnings ();
1489 return ret;
1490 }
1491
1492 if (TREE_CODE (expr) != SSA_NAME)
1493 return expr;
1494
1495 stmt = SSA_NAME_DEF_STMT (expr);
1496 if (gimple_code (stmt) == GIMPLE_PHI)
1497 {
1498 basic_block src, dest;
1499
1500 if (gimple_phi_num_args (stmt) != 1)
1501 return expr;
1502 e = PHI_ARG_DEF (stmt, 0);
1503
1504 /* Avoid propagating through loop exit phi nodes, which
1505 could break loop-closed SSA form restrictions. */
1506 dest = gimple_bb (stmt);
1507 src = single_pred (dest);
1508 if (TREE_CODE (e) == SSA_NAME
1509 && src->loop_father != dest->loop_father)
1510 return expr;
1511
1512 return expand_simple_operations (e);
1513 }
1514 if (gimple_code (stmt) != GIMPLE_ASSIGN)
1515 return expr;
1516
1517 e = gimple_assign_rhs1 (stmt);
1518 code = gimple_assign_rhs_code (stmt);
1519 if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS)
1520 {
1521 if (is_gimple_min_invariant (e))
1522 return e;
1523
1524 if (code == SSA_NAME)
1525 return expand_simple_operations (e);
1526
1527 return expr;
1528 }
1529
1530 switch (code)
1531 {
1532 CASE_CONVERT:
1533 /* Casts are simple. */
1534 ee = expand_simple_operations (e);
1535 return fold_build1 (code, TREE_TYPE (expr), ee);
1536
1537 case PLUS_EXPR:
1538 case MINUS_EXPR:
1539 case POINTER_PLUS_EXPR:
1540 /* And increments and decrements by a constant are simple. */
1541 e1 = gimple_assign_rhs2 (stmt);
1542 if (!is_gimple_min_invariant (e1))
1543 return expr;
1544
1545 ee = expand_simple_operations (e);
1546 return fold_build2 (code, TREE_TYPE (expr), ee, e1);
1547
1548 default:
1549 return expr;
1550 }
1551 }
1552
1553 /* Tries to simplify EXPR using the condition COND. Returns the simplified
1554 expression (or EXPR unchanged, if no simplification was possible). */
1555
1556 static tree
1557 tree_simplify_using_condition_1 (tree cond, tree expr)
1558 {
1559 bool changed;
1560 tree e, te, e0, e1, e2, notcond;
1561 enum tree_code code = TREE_CODE (expr);
1562
1563 if (code == INTEGER_CST)
1564 return expr;
1565
1566 if (code == TRUTH_OR_EXPR
1567 || code == TRUTH_AND_EXPR
1568 || code == COND_EXPR)
1569 {
1570 changed = false;
1571
1572 e0 = tree_simplify_using_condition_1 (cond, TREE_OPERAND (expr, 0));
1573 if (TREE_OPERAND (expr, 0) != e0)
1574 changed = true;
1575
1576 e1 = tree_simplify_using_condition_1 (cond, TREE_OPERAND (expr, 1));
1577 if (TREE_OPERAND (expr, 1) != e1)
1578 changed = true;
1579
1580 if (code == COND_EXPR)
1581 {
1582 e2 = tree_simplify_using_condition_1 (cond, TREE_OPERAND (expr, 2));
1583 if (TREE_OPERAND (expr, 2) != e2)
1584 changed = true;
1585 }
1586 else
1587 e2 = NULL_TREE;
1588
1589 if (changed)
1590 {
1591 if (code == COND_EXPR)
1592 expr = fold_build3 (code, boolean_type_node, e0, e1, e2);
1593 else
1594 expr = fold_build2 (code, boolean_type_node, e0, e1);
1595 }
1596
1597 return expr;
1598 }
1599
1600 /* In case COND is equality, we may be able to simplify EXPR by copy/constant
1601 propagation, and vice versa. Fold does not handle this, since it is
1602 considered too expensive. */
1603 if (TREE_CODE (cond) == EQ_EXPR)
1604 {
1605 e0 = TREE_OPERAND (cond, 0);
1606 e1 = TREE_OPERAND (cond, 1);
1607
1608 /* We know that e0 == e1. Check whether we cannot simplify expr
1609 using this fact. */
1610 e = simplify_replace_tree (expr, e0, e1);
1611 if (integer_zerop (e) || integer_nonzerop (e))
1612 return e;
1613
1614 e = simplify_replace_tree (expr, e1, e0);
1615 if (integer_zerop (e) || integer_nonzerop (e))
1616 return e;
1617 }
1618 if (TREE_CODE (expr) == EQ_EXPR)
1619 {
1620 e0 = TREE_OPERAND (expr, 0);
1621 e1 = TREE_OPERAND (expr, 1);
1622
1623 /* If e0 == e1 (EXPR) implies !COND, then EXPR cannot be true. */
1624 e = simplify_replace_tree (cond, e0, e1);
1625 if (integer_zerop (e))
1626 return e;
1627 e = simplify_replace_tree (cond, e1, e0);
1628 if (integer_zerop (e))
1629 return e;
1630 }
1631 if (TREE_CODE (expr) == NE_EXPR)
1632 {
1633 e0 = TREE_OPERAND (expr, 0);
1634 e1 = TREE_OPERAND (expr, 1);
1635
1636 /* If e0 == e1 (!EXPR) implies !COND, then EXPR must be true. */
1637 e = simplify_replace_tree (cond, e0, e1);
1638 if (integer_zerop (e))
1639 return boolean_true_node;
1640 e = simplify_replace_tree (cond, e1, e0);
1641 if (integer_zerop (e))
1642 return boolean_true_node;
1643 }
1644
1645 te = expand_simple_operations (expr);
1646
1647 /* Check whether COND ==> EXPR. */
1648 notcond = invert_truthvalue (cond);
1649 e = fold_binary (TRUTH_OR_EXPR, boolean_type_node, notcond, te);
1650 if (e && integer_nonzerop (e))
1651 return e;
1652
1653 /* Check whether COND ==> not EXPR. */
1654 e = fold_binary (TRUTH_AND_EXPR, boolean_type_node, cond, te);
1655 if (e && integer_zerop (e))
1656 return e;
1657
1658 return expr;
1659 }
1660
1661 /* Tries to simplify EXPR using the condition COND. Returns the simplified
1662 expression (or EXPR unchanged, if no simplification was possible).
1663 Wrapper around tree_simplify_using_condition_1 that ensures that chains
1664 of simple operations in definitions of ssa names in COND are expanded,
1665 so that things like casts or incrementing the value of the bound before
1666 the loop do not cause us to fail. */
1667
1668 static tree
1669 tree_simplify_using_condition (tree cond, tree expr)
1670 {
1671 cond = expand_simple_operations (cond);
1672
1673 return tree_simplify_using_condition_1 (cond, expr);
1674 }
1675
1676 /* Tries to simplify EXPR using the conditions on entry to LOOP.
1677 Returns the simplified expression (or EXPR unchanged, if no
1678 simplification was possible).*/
1679
1680 static tree
1681 simplify_using_initial_conditions (struct loop *loop, tree expr)
1682 {
1683 edge e;
1684 basic_block bb;
1685 gimple stmt;
1686 tree cond;
1687 int cnt = 0;
1688
1689 if (TREE_CODE (expr) == INTEGER_CST)
1690 return expr;
1691
1692 /* Limit walking the dominators to avoid quadraticness in
1693 the number of BBs times the number of loops in degenerate
1694 cases. */
1695 for (bb = loop->header;
1696 bb != ENTRY_BLOCK_PTR && cnt < MAX_DOMINATORS_TO_WALK;
1697 bb = get_immediate_dominator (CDI_DOMINATORS, bb))
1698 {
1699 if (!single_pred_p (bb))
1700 continue;
1701 e = single_pred_edge (bb);
1702
1703 if (!(e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
1704 continue;
1705
1706 stmt = last_stmt (e->src);
1707 cond = fold_build2 (gimple_cond_code (stmt),
1708 boolean_type_node,
1709 gimple_cond_lhs (stmt),
1710 gimple_cond_rhs (stmt));
1711 if (e->flags & EDGE_FALSE_VALUE)
1712 cond = invert_truthvalue (cond);
1713 expr = tree_simplify_using_condition (cond, expr);
1714 ++cnt;
1715 }
1716
1717 return expr;
1718 }
1719
1720 /* Tries to simplify EXPR using the evolutions of the loop invariants
1721 in the superloops of LOOP. Returns the simplified expression
1722 (or EXPR unchanged, if no simplification was possible). */
1723
1724 static tree
1725 simplify_using_outer_evolutions (struct loop *loop, tree expr)
1726 {
1727 enum tree_code code = TREE_CODE (expr);
1728 bool changed;
1729 tree e, e0, e1, e2;
1730
1731 if (is_gimple_min_invariant (expr))
1732 return expr;
1733
1734 if (code == TRUTH_OR_EXPR
1735 || code == TRUTH_AND_EXPR
1736 || code == COND_EXPR)
1737 {
1738 changed = false;
1739
1740 e0 = simplify_using_outer_evolutions (loop, TREE_OPERAND (expr, 0));
1741 if (TREE_OPERAND (expr, 0) != e0)
1742 changed = true;
1743
1744 e1 = simplify_using_outer_evolutions (loop, TREE_OPERAND (expr, 1));
1745 if (TREE_OPERAND (expr, 1) != e1)
1746 changed = true;
1747
1748 if (code == COND_EXPR)
1749 {
1750 e2 = simplify_using_outer_evolutions (loop, TREE_OPERAND (expr, 2));
1751 if (TREE_OPERAND (expr, 2) != e2)
1752 changed = true;
1753 }
1754 else
1755 e2 = NULL_TREE;
1756
1757 if (changed)
1758 {
1759 if (code == COND_EXPR)
1760 expr = fold_build3 (code, boolean_type_node, e0, e1, e2);
1761 else
1762 expr = fold_build2 (code, boolean_type_node, e0, e1);
1763 }
1764
1765 return expr;
1766 }
1767
1768 e = instantiate_parameters (loop, expr);
1769 if (is_gimple_min_invariant (e))
1770 return e;
1771
1772 return expr;
1773 }
1774
1775 /* Returns true if EXIT is the only possible exit from LOOP. */
1776
1777 bool
1778 loop_only_exit_p (const struct loop *loop, const_edge exit)
1779 {
1780 basic_block *body;
1781 gimple_stmt_iterator bsi;
1782 unsigned i;
1783 gimple call;
1784
1785 if (exit != single_exit (loop))
1786 return false;
1787
1788 body = get_loop_body (loop);
1789 for (i = 0; i < loop->num_nodes; i++)
1790 {
1791 for (bsi = gsi_start_bb (body[i]); !gsi_end_p (bsi); gsi_next (&bsi))
1792 {
1793 call = gsi_stmt (bsi);
1794 if (gimple_code (call) != GIMPLE_CALL)
1795 continue;
1796
1797 if (gimple_has_side_effects (call))
1798 {
1799 free (body);
1800 return false;
1801 }
1802 }
1803 }
1804
1805 free (body);
1806 return true;
1807 }
1808
1809 /* Stores description of number of iterations of LOOP derived from
1810 EXIT (an exit edge of the LOOP) in NITER. Returns true if some
1811 useful information could be derived (and fields of NITER has
1812 meaning described in comments at struct tree_niter_desc
1813 declaration), false otherwise. If WARN is true and
1814 -Wunsafe-loop-optimizations was given, warn if the optimizer is going to use
1815 potentially unsafe assumptions.
1816 When EVERY_ITERATION is true, only tests that are known to be executed
1817 every iteration are considered (i.e. only test that alone bounds the loop).
1818 */
1819
1820 bool
1821 number_of_iterations_exit (struct loop *loop, edge exit,
1822 struct tree_niter_desc *niter,
1823 bool warn, bool every_iteration)
1824 {
1825 gimple stmt;
1826 tree type;
1827 tree op0, op1;
1828 enum tree_code code;
1829 affine_iv iv0, iv1;
1830 bool safe;
1831
1832 safe = dominated_by_p (CDI_DOMINATORS, loop->latch, exit->src);
1833
1834 if (every_iteration && !safe)
1835 return false;
1836
1837 niter->assumptions = boolean_false_node;
1838 stmt = last_stmt (exit->src);
1839 if (!stmt || gimple_code (stmt) != GIMPLE_COND)
1840 return false;
1841
1842 /* We want the condition for staying inside loop. */
1843 code = gimple_cond_code (stmt);
1844 if (exit->flags & EDGE_TRUE_VALUE)
1845 code = invert_tree_comparison (code, false);
1846
1847 switch (code)
1848 {
1849 case GT_EXPR:
1850 case GE_EXPR:
1851 case LT_EXPR:
1852 case LE_EXPR:
1853 case NE_EXPR:
1854 break;
1855
1856 default:
1857 return false;
1858 }
1859
1860 op0 = gimple_cond_lhs (stmt);
1861 op1 = gimple_cond_rhs (stmt);
1862 type = TREE_TYPE (op0);
1863
1864 if (TREE_CODE (type) != INTEGER_TYPE
1865 && !POINTER_TYPE_P (type))
1866 return false;
1867
1868 if (!simple_iv (loop, loop_containing_stmt (stmt), op0, &iv0, false))
1869 return false;
1870 if (!simple_iv (loop, loop_containing_stmt (stmt), op1, &iv1, false))
1871 return false;
1872
1873 /* We don't want to see undefined signed overflow warnings while
1874 computing the number of iterations. */
1875 fold_defer_overflow_warnings ();
1876
1877 iv0.base = expand_simple_operations (iv0.base);
1878 iv1.base = expand_simple_operations (iv1.base);
1879 if (!number_of_iterations_cond (loop, type, &iv0, code, &iv1, niter,
1880 loop_only_exit_p (loop, exit), safe))
1881 {
1882 fold_undefer_and_ignore_overflow_warnings ();
1883 return false;
1884 }
1885
1886 if (optimize >= 3)
1887 {
1888 niter->assumptions = simplify_using_outer_evolutions (loop,
1889 niter->assumptions);
1890 niter->may_be_zero = simplify_using_outer_evolutions (loop,
1891 niter->may_be_zero);
1892 niter->niter = simplify_using_outer_evolutions (loop, niter->niter);
1893 }
1894
1895 niter->assumptions
1896 = simplify_using_initial_conditions (loop,
1897 niter->assumptions);
1898 niter->may_be_zero
1899 = simplify_using_initial_conditions (loop,
1900 niter->may_be_zero);
1901
1902 fold_undefer_and_ignore_overflow_warnings ();
1903
1904 /* If NITER has simplified into a constant, update MAX. */
1905 if (TREE_CODE (niter->niter) == INTEGER_CST)
1906 niter->max = tree_to_double_int (niter->niter);
1907
1908 if (integer_onep (niter->assumptions))
1909 return true;
1910
1911 /* With -funsafe-loop-optimizations we assume that nothing bad can happen.
1912 But if we can prove that there is overflow or some other source of weird
1913 behavior, ignore the loop even with -funsafe-loop-optimizations. */
1914 if (integer_zerop (niter->assumptions) || !single_exit (loop))
1915 return false;
1916
1917 if (flag_unsafe_loop_optimizations)
1918 niter->assumptions = boolean_true_node;
1919
1920 if (warn)
1921 {
1922 const char *wording;
1923 location_t loc = gimple_location (stmt);
1924
1925 /* We can provide a more specific warning if one of the operator is
1926 constant and the other advances by +1 or -1. */
1927 if (!integer_zerop (iv1.step)
1928 ? (integer_zerop (iv0.step)
1929 && (integer_onep (iv1.step) || integer_all_onesp (iv1.step)))
1930 : (integer_onep (iv0.step) || integer_all_onesp (iv0.step)))
1931 wording =
1932 flag_unsafe_loop_optimizations
1933 ? N_("assuming that the loop is not infinite")
1934 : N_("cannot optimize possibly infinite loops");
1935 else
1936 wording =
1937 flag_unsafe_loop_optimizations
1938 ? N_("assuming that the loop counter does not overflow")
1939 : N_("cannot optimize loop, the loop counter may overflow");
1940
1941 warning_at ((LOCATION_LINE (loc) > 0) ? loc : input_location,
1942 OPT_Wunsafe_loop_optimizations, "%s", gettext (wording));
1943 }
1944
1945 return flag_unsafe_loop_optimizations;
1946 }
1947
1948 /* Try to determine the number of iterations of LOOP. If we succeed,
1949 expression giving number of iterations is returned and *EXIT is
1950 set to the edge from that the information is obtained. Otherwise
1951 chrec_dont_know is returned. */
1952
1953 tree
1954 find_loop_niter (struct loop *loop, edge *exit)
1955 {
1956 unsigned i;
1957 vec<edge> exits = get_loop_exit_edges (loop);
1958 edge ex;
1959 tree niter = NULL_TREE, aniter;
1960 struct tree_niter_desc desc;
1961
1962 *exit = NULL;
1963 FOR_EACH_VEC_ELT (exits, i, ex)
1964 {
1965 if (!number_of_iterations_exit (loop, ex, &desc, false))
1966 continue;
1967
1968 if (integer_nonzerop (desc.may_be_zero))
1969 {
1970 /* We exit in the first iteration through this exit.
1971 We won't find anything better. */
1972 niter = build_int_cst (unsigned_type_node, 0);
1973 *exit = ex;
1974 break;
1975 }
1976
1977 if (!integer_zerop (desc.may_be_zero))
1978 continue;
1979
1980 aniter = desc.niter;
1981
1982 if (!niter)
1983 {
1984 /* Nothing recorded yet. */
1985 niter = aniter;
1986 *exit = ex;
1987 continue;
1988 }
1989
1990 /* Prefer constants, the lower the better. */
1991 if (TREE_CODE (aniter) != INTEGER_CST)
1992 continue;
1993
1994 if (TREE_CODE (niter) != INTEGER_CST)
1995 {
1996 niter = aniter;
1997 *exit = ex;
1998 continue;
1999 }
2000
2001 if (tree_int_cst_lt (aniter, niter))
2002 {
2003 niter = aniter;
2004 *exit = ex;
2005 continue;
2006 }
2007 }
2008 exits.release ();
2009
2010 return niter ? niter : chrec_dont_know;
2011 }
2012
2013 /* Return true if loop is known to have bounded number of iterations. */
2014
2015 bool
2016 finite_loop_p (struct loop *loop)
2017 {
2018 double_int nit;
2019 int flags;
2020
2021 if (flag_unsafe_loop_optimizations)
2022 return true;
2023 flags = flags_from_decl_or_type (current_function_decl);
2024 if ((flags & (ECF_CONST|ECF_PURE)) && !(flags & ECF_LOOPING_CONST_OR_PURE))
2025 {
2026 if (dump_file && (dump_flags & TDF_DETAILS))
2027 fprintf (dump_file, "Found loop %i to be finite: it is within pure or const function.\n",
2028 loop->num);
2029 return true;
2030 }
2031
2032 if (loop->any_upper_bound
2033 || max_loop_iterations (loop, &nit))
2034 {
2035 if (dump_file && (dump_flags & TDF_DETAILS))
2036 fprintf (dump_file, "Found loop %i to be finite: upper bound found.\n",
2037 loop->num);
2038 return true;
2039 }
2040 return false;
2041 }
2042
2043 /*
2044
2045 Analysis of a number of iterations of a loop by a brute-force evaluation.
2046
2047 */
2048
2049 /* Bound on the number of iterations we try to evaluate. */
2050
2051 #define MAX_ITERATIONS_TO_TRACK \
2052 ((unsigned) PARAM_VALUE (PARAM_MAX_ITERATIONS_TO_TRACK))
2053
2054 /* Returns the loop phi node of LOOP such that ssa name X is derived from its
2055 result by a chain of operations such that all but exactly one of their
2056 operands are constants. */
2057
2058 static gimple
2059 chain_of_csts_start (struct loop *loop, tree x)
2060 {
2061 gimple stmt = SSA_NAME_DEF_STMT (x);
2062 tree use;
2063 basic_block bb = gimple_bb (stmt);
2064 enum tree_code code;
2065
2066 if (!bb
2067 || !flow_bb_inside_loop_p (loop, bb))
2068 return NULL;
2069
2070 if (gimple_code (stmt) == GIMPLE_PHI)
2071 {
2072 if (bb == loop->header)
2073 return stmt;
2074
2075 return NULL;
2076 }
2077
2078 if (gimple_code (stmt) != GIMPLE_ASSIGN)
2079 return NULL;
2080
2081 code = gimple_assign_rhs_code (stmt);
2082 if (gimple_references_memory_p (stmt)
2083 || TREE_CODE_CLASS (code) == tcc_reference
2084 || (code == ADDR_EXPR
2085 && !is_gimple_min_invariant (gimple_assign_rhs1 (stmt))))
2086 return NULL;
2087
2088 use = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_USE);
2089 if (use == NULL_TREE)
2090 return NULL;
2091
2092 return chain_of_csts_start (loop, use);
2093 }
2094
2095 /* Determines whether the expression X is derived from a result of a phi node
2096 in header of LOOP such that
2097
2098 * the derivation of X consists only from operations with constants
2099 * the initial value of the phi node is constant
2100 * the value of the phi node in the next iteration can be derived from the
2101 value in the current iteration by a chain of operations with constants.
2102
2103 If such phi node exists, it is returned, otherwise NULL is returned. */
2104
2105 static gimple
2106 get_base_for (struct loop *loop, tree x)
2107 {
2108 gimple phi;
2109 tree init, next;
2110
2111 if (is_gimple_min_invariant (x))
2112 return NULL;
2113
2114 phi = chain_of_csts_start (loop, x);
2115 if (!phi)
2116 return NULL;
2117
2118 init = PHI_ARG_DEF_FROM_EDGE (phi, loop_preheader_edge (loop));
2119 next = PHI_ARG_DEF_FROM_EDGE (phi, loop_latch_edge (loop));
2120
2121 if (TREE_CODE (next) != SSA_NAME)
2122 return NULL;
2123
2124 if (!is_gimple_min_invariant (init))
2125 return NULL;
2126
2127 if (chain_of_csts_start (loop, next) != phi)
2128 return NULL;
2129
2130 return phi;
2131 }
2132
2133 /* Given an expression X, then
2134
2135 * if X is NULL_TREE, we return the constant BASE.
2136 * otherwise X is a SSA name, whose value in the considered loop is derived
2137 by a chain of operations with constant from a result of a phi node in
2138 the header of the loop. Then we return value of X when the value of the
2139 result of this phi node is given by the constant BASE. */
2140
2141 static tree
2142 get_val_for (tree x, tree base)
2143 {
2144 gimple stmt;
2145
2146 gcc_assert (is_gimple_min_invariant (base));
2147
2148 if (!x)
2149 return base;
2150
2151 stmt = SSA_NAME_DEF_STMT (x);
2152 if (gimple_code (stmt) == GIMPLE_PHI)
2153 return base;
2154
2155 gcc_assert (is_gimple_assign (stmt));
2156
2157 /* STMT must be either an assignment of a single SSA name or an
2158 expression involving an SSA name and a constant. Try to fold that
2159 expression using the value for the SSA name. */
2160 if (gimple_assign_ssa_name_copy_p (stmt))
2161 return get_val_for (gimple_assign_rhs1 (stmt), base);
2162 else if (gimple_assign_rhs_class (stmt) == GIMPLE_UNARY_RHS
2163 && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME)
2164 {
2165 return fold_build1 (gimple_assign_rhs_code (stmt),
2166 gimple_expr_type (stmt),
2167 get_val_for (gimple_assign_rhs1 (stmt), base));
2168 }
2169 else if (gimple_assign_rhs_class (stmt) == GIMPLE_BINARY_RHS)
2170 {
2171 tree rhs1 = gimple_assign_rhs1 (stmt);
2172 tree rhs2 = gimple_assign_rhs2 (stmt);
2173 if (TREE_CODE (rhs1) == SSA_NAME)
2174 rhs1 = get_val_for (rhs1, base);
2175 else if (TREE_CODE (rhs2) == SSA_NAME)
2176 rhs2 = get_val_for (rhs2, base);
2177 else
2178 gcc_unreachable ();
2179 return fold_build2 (gimple_assign_rhs_code (stmt),
2180 gimple_expr_type (stmt), rhs1, rhs2);
2181 }
2182 else
2183 gcc_unreachable ();
2184 }
2185
2186
2187 /* Tries to count the number of iterations of LOOP till it exits by EXIT
2188 by brute force -- i.e. by determining the value of the operands of the
2189 condition at EXIT in first few iterations of the loop (assuming that
2190 these values are constant) and determining the first one in that the
2191 condition is not satisfied. Returns the constant giving the number
2192 of the iterations of LOOP if successful, chrec_dont_know otherwise. */
2193
2194 tree
2195 loop_niter_by_eval (struct loop *loop, edge exit)
2196 {
2197 tree acnd;
2198 tree op[2], val[2], next[2], aval[2];
2199 gimple phi, cond;
2200 unsigned i, j;
2201 enum tree_code cmp;
2202
2203 cond = last_stmt (exit->src);
2204 if (!cond || gimple_code (cond) != GIMPLE_COND)
2205 return chrec_dont_know;
2206
2207 cmp = gimple_cond_code (cond);
2208 if (exit->flags & EDGE_TRUE_VALUE)
2209 cmp = invert_tree_comparison (cmp, false);
2210
2211 switch (cmp)
2212 {
2213 case EQ_EXPR:
2214 case NE_EXPR:
2215 case GT_EXPR:
2216 case GE_EXPR:
2217 case LT_EXPR:
2218 case LE_EXPR:
2219 op[0] = gimple_cond_lhs (cond);
2220 op[1] = gimple_cond_rhs (cond);
2221 break;
2222
2223 default:
2224 return chrec_dont_know;
2225 }
2226
2227 for (j = 0; j < 2; j++)
2228 {
2229 if (is_gimple_min_invariant (op[j]))
2230 {
2231 val[j] = op[j];
2232 next[j] = NULL_TREE;
2233 op[j] = NULL_TREE;
2234 }
2235 else
2236 {
2237 phi = get_base_for (loop, op[j]);
2238 if (!phi)
2239 return chrec_dont_know;
2240 val[j] = PHI_ARG_DEF_FROM_EDGE (phi, loop_preheader_edge (loop));
2241 next[j] = PHI_ARG_DEF_FROM_EDGE (phi, loop_latch_edge (loop));
2242 }
2243 }
2244
2245 /* Don't issue signed overflow warnings. */
2246 fold_defer_overflow_warnings ();
2247
2248 for (i = 0; i < MAX_ITERATIONS_TO_TRACK; i++)
2249 {
2250 for (j = 0; j < 2; j++)
2251 aval[j] = get_val_for (op[j], val[j]);
2252
2253 acnd = fold_binary (cmp, boolean_type_node, aval[0], aval[1]);
2254 if (acnd && integer_zerop (acnd))
2255 {
2256 fold_undefer_and_ignore_overflow_warnings ();
2257 if (dump_file && (dump_flags & TDF_DETAILS))
2258 fprintf (dump_file,
2259 "Proved that loop %d iterates %d times using brute force.\n",
2260 loop->num, i);
2261 return build_int_cst (unsigned_type_node, i);
2262 }
2263
2264 for (j = 0; j < 2; j++)
2265 {
2266 val[j] = get_val_for (next[j], val[j]);
2267 if (!is_gimple_min_invariant (val[j]))
2268 {
2269 fold_undefer_and_ignore_overflow_warnings ();
2270 return chrec_dont_know;
2271 }
2272 }
2273 }
2274
2275 fold_undefer_and_ignore_overflow_warnings ();
2276
2277 return chrec_dont_know;
2278 }
2279
2280 /* Finds the exit of the LOOP by that the loop exits after a constant
2281 number of iterations and stores the exit edge to *EXIT. The constant
2282 giving the number of iterations of LOOP is returned. The number of
2283 iterations is determined using loop_niter_by_eval (i.e. by brute force
2284 evaluation). If we are unable to find the exit for that loop_niter_by_eval
2285 determines the number of iterations, chrec_dont_know is returned. */
2286
2287 tree
2288 find_loop_niter_by_eval (struct loop *loop, edge *exit)
2289 {
2290 unsigned i;
2291 vec<edge> exits = get_loop_exit_edges (loop);
2292 edge ex;
2293 tree niter = NULL_TREE, aniter;
2294
2295 *exit = NULL;
2296
2297 /* Loops with multiple exits are expensive to handle and less important. */
2298 if (!flag_expensive_optimizations
2299 && exits.length () > 1)
2300 {
2301 exits.release ();
2302 return chrec_dont_know;
2303 }
2304
2305 FOR_EACH_VEC_ELT (exits, i, ex)
2306 {
2307 if (!just_once_each_iteration_p (loop, ex->src))
2308 continue;
2309
2310 aniter = loop_niter_by_eval (loop, ex);
2311 if (chrec_contains_undetermined (aniter))
2312 continue;
2313
2314 if (niter
2315 && !tree_int_cst_lt (aniter, niter))
2316 continue;
2317
2318 niter = aniter;
2319 *exit = ex;
2320 }
2321 exits.release ();
2322
2323 return niter ? niter : chrec_dont_know;
2324 }
2325
2326 /*
2327
2328 Analysis of upper bounds on number of iterations of a loop.
2329
2330 */
2331
2332 static double_int derive_constant_upper_bound_ops (tree, tree,
2333 enum tree_code, tree);
2334
2335 /* Returns a constant upper bound on the value of the right-hand side of
2336 an assignment statement STMT. */
2337
2338 static double_int
2339 derive_constant_upper_bound_assign (gimple stmt)
2340 {
2341 enum tree_code code = gimple_assign_rhs_code (stmt);
2342 tree op0 = gimple_assign_rhs1 (stmt);
2343 tree op1 = gimple_assign_rhs2 (stmt);
2344
2345 return derive_constant_upper_bound_ops (TREE_TYPE (gimple_assign_lhs (stmt)),
2346 op0, code, op1);
2347 }
2348
2349 /* Returns a constant upper bound on the value of expression VAL. VAL
2350 is considered to be unsigned. If its type is signed, its value must
2351 be nonnegative. */
2352
2353 static double_int
2354 derive_constant_upper_bound (tree val)
2355 {
2356 enum tree_code code;
2357 tree op0, op1;
2358
2359 extract_ops_from_tree (val, &code, &op0, &op1);
2360 return derive_constant_upper_bound_ops (TREE_TYPE (val), op0, code, op1);
2361 }
2362
2363 /* Returns a constant upper bound on the value of expression OP0 CODE OP1,
2364 whose type is TYPE. The expression is considered to be unsigned. If
2365 its type is signed, its value must be nonnegative. */
2366
2367 static double_int
2368 derive_constant_upper_bound_ops (tree type, tree op0,
2369 enum tree_code code, tree op1)
2370 {
2371 tree subtype, maxt;
2372 double_int bnd, max, mmax, cst;
2373 gimple stmt;
2374
2375 if (INTEGRAL_TYPE_P (type))
2376 maxt = TYPE_MAX_VALUE (type);
2377 else
2378 maxt = upper_bound_in_type (type, type);
2379
2380 max = tree_to_double_int (maxt);
2381
2382 switch (code)
2383 {
2384 case INTEGER_CST:
2385 return tree_to_double_int (op0);
2386
2387 CASE_CONVERT:
2388 subtype = TREE_TYPE (op0);
2389 if (!TYPE_UNSIGNED (subtype)
2390 /* If TYPE is also signed, the fact that VAL is nonnegative implies
2391 that OP0 is nonnegative. */
2392 && TYPE_UNSIGNED (type)
2393 && !tree_expr_nonnegative_p (op0))
2394 {
2395 /* If we cannot prove that the casted expression is nonnegative,
2396 we cannot establish more useful upper bound than the precision
2397 of the type gives us. */
2398 return max;
2399 }
2400
2401 /* We now know that op0 is an nonnegative value. Try deriving an upper
2402 bound for it. */
2403 bnd = derive_constant_upper_bound (op0);
2404
2405 /* If the bound does not fit in TYPE, max. value of TYPE could be
2406 attained. */
2407 if (max.ult (bnd))
2408 return max;
2409
2410 return bnd;
2411
2412 case PLUS_EXPR:
2413 case POINTER_PLUS_EXPR:
2414 case MINUS_EXPR:
2415 if (TREE_CODE (op1) != INTEGER_CST
2416 || !tree_expr_nonnegative_p (op0))
2417 return max;
2418
2419 /* Canonicalize to OP0 - CST. Consider CST to be signed, in order to
2420 choose the most logical way how to treat this constant regardless
2421 of the signedness of the type. */
2422 cst = tree_to_double_int (op1);
2423 cst = cst.sext (TYPE_PRECISION (type));
2424 if (code != MINUS_EXPR)
2425 cst = -cst;
2426
2427 bnd = derive_constant_upper_bound (op0);
2428
2429 if (cst.is_negative ())
2430 {
2431 cst = -cst;
2432 /* Avoid CST == 0x80000... */
2433 if (cst.is_negative ())
2434 return max;;
2435
2436 /* OP0 + CST. We need to check that
2437 BND <= MAX (type) - CST. */
2438
2439 mmax -= cst;
2440 if (bnd.ugt (mmax))
2441 return max;
2442
2443 return bnd + cst;
2444 }
2445 else
2446 {
2447 /* OP0 - CST, where CST >= 0.
2448
2449 If TYPE is signed, we have already verified that OP0 >= 0, and we
2450 know that the result is nonnegative. This implies that
2451 VAL <= BND - CST.
2452
2453 If TYPE is unsigned, we must additionally know that OP0 >= CST,
2454 otherwise the operation underflows.
2455 */
2456
2457 /* This should only happen if the type is unsigned; however, for
2458 buggy programs that use overflowing signed arithmetics even with
2459 -fno-wrapv, this condition may also be true for signed values. */
2460 if (bnd.ult (cst))
2461 return max;
2462
2463 if (TYPE_UNSIGNED (type))
2464 {
2465 tree tem = fold_binary (GE_EXPR, boolean_type_node, op0,
2466 double_int_to_tree (type, cst));
2467 if (!tem || integer_nonzerop (tem))
2468 return max;
2469 }
2470
2471 bnd -= cst;
2472 }
2473
2474 return bnd;
2475
2476 case FLOOR_DIV_EXPR:
2477 case EXACT_DIV_EXPR:
2478 if (TREE_CODE (op1) != INTEGER_CST
2479 || tree_int_cst_sign_bit (op1))
2480 return max;
2481
2482 bnd = derive_constant_upper_bound (op0);
2483 return bnd.udiv (tree_to_double_int (op1), FLOOR_DIV_EXPR);
2484
2485 case BIT_AND_EXPR:
2486 if (TREE_CODE (op1) != INTEGER_CST
2487 || tree_int_cst_sign_bit (op1))
2488 return max;
2489 return tree_to_double_int (op1);
2490
2491 case SSA_NAME:
2492 stmt = SSA_NAME_DEF_STMT (op0);
2493 if (gimple_code (stmt) != GIMPLE_ASSIGN
2494 || gimple_assign_lhs (stmt) != op0)
2495 return max;
2496 return derive_constant_upper_bound_assign (stmt);
2497
2498 default:
2499 return max;
2500 }
2501 }
2502
2503 /* Records that every statement in LOOP is executed I_BOUND times.
2504 REALISTIC is true if I_BOUND is expected to be close to the real number
2505 of iterations. UPPER is true if we are sure the loop iterates at most
2506 I_BOUND times. */
2507
2508 void
2509 record_niter_bound (struct loop *loop, double_int i_bound, bool realistic,
2510 bool upper)
2511 {
2512 /* Update the bounds only when there is no previous estimation, or when the
2513 current estimation is smaller. */
2514 if (upper
2515 && (!loop->any_upper_bound
2516 || i_bound.ult (loop->nb_iterations_upper_bound)))
2517 {
2518 loop->any_upper_bound = true;
2519 loop->nb_iterations_upper_bound = i_bound;
2520 }
2521 if (realistic
2522 && (!loop->any_estimate
2523 || i_bound.ult (loop->nb_iterations_estimate)))
2524 {
2525 loop->any_estimate = true;
2526 loop->nb_iterations_estimate = i_bound;
2527 }
2528
2529 /* If an upper bound is smaller than the realistic estimate of the
2530 number of iterations, use the upper bound instead. */
2531 if (loop->any_upper_bound
2532 && loop->any_estimate
2533 && loop->nb_iterations_upper_bound.ult (loop->nb_iterations_estimate))
2534 loop->nb_iterations_estimate = loop->nb_iterations_upper_bound;
2535 }
2536
2537 /* Emit a -Waggressive-loop-optimizations warning if needed. */
2538
2539 static void
2540 do_warn_aggressive_loop_optimizations (struct loop *loop,
2541 double_int i_bound, gimple stmt)
2542 {
2543 /* Don't warn if the loop doesn't have known constant bound. */
2544 if (!loop->nb_iterations
2545 || TREE_CODE (loop->nb_iterations) != INTEGER_CST
2546 || !warn_aggressive_loop_optimizations
2547 /* To avoid warning multiple times for the same loop,
2548 only start warning when we preserve loops. */
2549 || (cfun->curr_properties & PROP_loops) == 0
2550 /* Only warn once per loop. */
2551 || loop->warned_aggressive_loop_optimizations
2552 /* Only warn if undefined behavior gives us lower estimate than the
2553 known constant bound. */
2554 || i_bound.ucmp (tree_to_double_int (loop->nb_iterations)) >= 0
2555 /* And undefined behavior happens unconditionally. */
2556 || !dominated_by_p (CDI_DOMINATORS, loop->latch, gimple_bb (stmt)))
2557 return;
2558
2559 edge e = single_exit (loop);
2560 if (e == NULL)
2561 return;
2562
2563 gimple estmt = last_stmt (e->src);
2564 if (warning_at (gimple_location (stmt), OPT_Waggressive_loop_optimizations,
2565 "iteration %E invokes undefined behavior",
2566 double_int_to_tree (TREE_TYPE (loop->nb_iterations),
2567 i_bound)))
2568 inform (gimple_location (estmt), "containing loop");
2569 loop->warned_aggressive_loop_optimizations = true;
2570 }
2571
2572 /* Records that AT_STMT is executed at most BOUND + 1 times in LOOP. IS_EXIT
2573 is true if the loop is exited immediately after STMT, and this exit
2574 is taken at last when the STMT is executed BOUND + 1 times.
2575 REALISTIC is true if BOUND is expected to be close to the real number
2576 of iterations. UPPER is true if we are sure the loop iterates at most
2577 BOUND times. I_BOUND is an unsigned double_int upper estimate on BOUND. */
2578
2579 static void
2580 record_estimate (struct loop *loop, tree bound, double_int i_bound,
2581 gimple at_stmt, bool is_exit, bool realistic, bool upper)
2582 {
2583 double_int delta;
2584
2585 if (dump_file && (dump_flags & TDF_DETAILS))
2586 {
2587 fprintf (dump_file, "Statement %s", is_exit ? "(exit)" : "");
2588 print_gimple_stmt (dump_file, at_stmt, 0, TDF_SLIM);
2589 fprintf (dump_file, " is %sexecuted at most ",
2590 upper ? "" : "probably ");
2591 print_generic_expr (dump_file, bound, TDF_SLIM);
2592 fprintf (dump_file, " (bounded by ");
2593 dump_double_int (dump_file, i_bound, true);
2594 fprintf (dump_file, ") + 1 times in loop %d.\n", loop->num);
2595 }
2596
2597 /* If the I_BOUND is just an estimate of BOUND, it rarely is close to the
2598 real number of iterations. */
2599 if (TREE_CODE (bound) != INTEGER_CST)
2600 realistic = false;
2601 else
2602 gcc_checking_assert (i_bound == tree_to_double_int (bound));
2603 if (!upper && !realistic)
2604 return;
2605
2606 /* If we have a guaranteed upper bound, record it in the appropriate
2607 list, unless this is an !is_exit bound (i.e. undefined behavior in
2608 at_stmt) in a loop with known constant number of iterations. */
2609 if (upper
2610 && (is_exit
2611 || loop->nb_iterations == NULL_TREE
2612 || TREE_CODE (loop->nb_iterations) != INTEGER_CST))
2613 {
2614 struct nb_iter_bound *elt = ggc_alloc_nb_iter_bound ();
2615
2616 elt->bound = i_bound;
2617 elt->stmt = at_stmt;
2618 elt->is_exit = is_exit;
2619 elt->next = loop->bounds;
2620 loop->bounds = elt;
2621 }
2622
2623 /* If statement is executed on every path to the loop latch, we can directly
2624 infer the upper bound on the # of iterations of the loop. */
2625 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, gimple_bb (at_stmt)))
2626 return;
2627
2628 /* Update the number of iteration estimates according to the bound.
2629 If at_stmt is an exit then the loop latch is executed at most BOUND times,
2630 otherwise it can be executed BOUND + 1 times. We will lower the estimate
2631 later if such statement must be executed on last iteration */
2632 if (is_exit)
2633 delta = double_int_zero;
2634 else
2635 delta = double_int_one;
2636 i_bound += delta;
2637
2638 /* If an overflow occurred, ignore the result. */
2639 if (i_bound.ult (delta))
2640 return;
2641
2642 if (upper && !is_exit)
2643 do_warn_aggressive_loop_optimizations (loop, i_bound, at_stmt);
2644 record_niter_bound (loop, i_bound, realistic, upper);
2645 }
2646
2647 /* Record the estimate on number of iterations of LOOP based on the fact that
2648 the induction variable BASE + STEP * i evaluated in STMT does not wrap and
2649 its values belong to the range <LOW, HIGH>. REALISTIC is true if the
2650 estimated number of iterations is expected to be close to the real one.
2651 UPPER is true if we are sure the induction variable does not wrap. */
2652
2653 static void
2654 record_nonwrapping_iv (struct loop *loop, tree base, tree step, gimple stmt,
2655 tree low, tree high, bool realistic, bool upper)
2656 {
2657 tree niter_bound, extreme, delta;
2658 tree type = TREE_TYPE (base), unsigned_type;
2659 double_int max;
2660
2661 if (TREE_CODE (step) != INTEGER_CST || integer_zerop (step))
2662 return;
2663
2664 if (dump_file && (dump_flags & TDF_DETAILS))
2665 {
2666 fprintf (dump_file, "Induction variable (");
2667 print_generic_expr (dump_file, TREE_TYPE (base), TDF_SLIM);
2668 fprintf (dump_file, ") ");
2669 print_generic_expr (dump_file, base, TDF_SLIM);
2670 fprintf (dump_file, " + ");
2671 print_generic_expr (dump_file, step, TDF_SLIM);
2672 fprintf (dump_file, " * iteration does not wrap in statement ");
2673 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2674 fprintf (dump_file, " in loop %d.\n", loop->num);
2675 }
2676
2677 unsigned_type = unsigned_type_for (type);
2678 base = fold_convert (unsigned_type, base);
2679 step = fold_convert (unsigned_type, step);
2680
2681 if (tree_int_cst_sign_bit (step))
2682 {
2683 extreme = fold_convert (unsigned_type, low);
2684 if (TREE_CODE (base) != INTEGER_CST)
2685 base = fold_convert (unsigned_type, high);
2686 delta = fold_build2 (MINUS_EXPR, unsigned_type, base, extreme);
2687 step = fold_build1 (NEGATE_EXPR, unsigned_type, step);
2688 }
2689 else
2690 {
2691 extreme = fold_convert (unsigned_type, high);
2692 if (TREE_CODE (base) != INTEGER_CST)
2693 base = fold_convert (unsigned_type, low);
2694 delta = fold_build2 (MINUS_EXPR, unsigned_type, extreme, base);
2695 }
2696
2697 /* STMT is executed at most NITER_BOUND + 1 times, since otherwise the value
2698 would get out of the range. */
2699 niter_bound = fold_build2 (FLOOR_DIV_EXPR, unsigned_type, delta, step);
2700 max = derive_constant_upper_bound (niter_bound);
2701 record_estimate (loop, niter_bound, max, stmt, false, realistic, upper);
2702 }
2703
2704 /* Determine information about number of iterations a LOOP from the index
2705 IDX of a data reference accessed in STMT. RELIABLE is true if STMT is
2706 guaranteed to be executed in every iteration of LOOP. Callback for
2707 for_each_index. */
2708
2709 struct ilb_data
2710 {
2711 struct loop *loop;
2712 gimple stmt;
2713 };
2714
2715 static bool
2716 idx_infer_loop_bounds (tree base, tree *idx, void *dta)
2717 {
2718 struct ilb_data *data = (struct ilb_data *) dta;
2719 tree ev, init, step;
2720 tree low, high, type, next;
2721 bool sign, upper = true, at_end = false;
2722 struct loop *loop = data->loop;
2723 bool reliable = true;
2724
2725 if (TREE_CODE (base) != ARRAY_REF)
2726 return true;
2727
2728 /* For arrays at the end of the structure, we are not guaranteed that they
2729 do not really extend over their declared size. However, for arrays of
2730 size greater than one, this is unlikely to be intended. */
2731 if (array_at_struct_end_p (base))
2732 {
2733 at_end = true;
2734 upper = false;
2735 }
2736
2737 struct loop *dloop = loop_containing_stmt (data->stmt);
2738 if (!dloop)
2739 return true;
2740
2741 ev = analyze_scalar_evolution (dloop, *idx);
2742 ev = instantiate_parameters (loop, ev);
2743 init = initial_condition (ev);
2744 step = evolution_part_in_loop_num (ev, loop->num);
2745
2746 if (!init
2747 || !step
2748 || TREE_CODE (step) != INTEGER_CST
2749 || integer_zerop (step)
2750 || tree_contains_chrecs (init, NULL)
2751 || chrec_contains_symbols_defined_in_loop (init, loop->num))
2752 return true;
2753
2754 low = array_ref_low_bound (base);
2755 high = array_ref_up_bound (base);
2756
2757 /* The case of nonconstant bounds could be handled, but it would be
2758 complicated. */
2759 if (TREE_CODE (low) != INTEGER_CST
2760 || !high
2761 || TREE_CODE (high) != INTEGER_CST)
2762 return true;
2763 sign = tree_int_cst_sign_bit (step);
2764 type = TREE_TYPE (step);
2765
2766 /* The array of length 1 at the end of a structure most likely extends
2767 beyond its bounds. */
2768 if (at_end
2769 && operand_equal_p (low, high, 0))
2770 return true;
2771
2772 /* In case the relevant bound of the array does not fit in type, or
2773 it does, but bound + step (in type) still belongs into the range of the
2774 array, the index may wrap and still stay within the range of the array
2775 (consider e.g. if the array is indexed by the full range of
2776 unsigned char).
2777
2778 To make things simpler, we require both bounds to fit into type, although
2779 there are cases where this would not be strictly necessary. */
2780 if (!int_fits_type_p (high, type)
2781 || !int_fits_type_p (low, type))
2782 return true;
2783 low = fold_convert (type, low);
2784 high = fold_convert (type, high);
2785
2786 if (sign)
2787 next = fold_binary (PLUS_EXPR, type, low, step);
2788 else
2789 next = fold_binary (PLUS_EXPR, type, high, step);
2790
2791 if (tree_int_cst_compare (low, next) <= 0
2792 && tree_int_cst_compare (next, high) <= 0)
2793 return true;
2794
2795 /* If access is not executed on every iteration, we must ensure that overlow may
2796 not make the access valid later. */
2797 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, gimple_bb (data->stmt))
2798 && scev_probably_wraps_p (initial_condition_in_loop_num (ev, loop->num),
2799 step, data->stmt, loop, true))
2800 reliable = false;
2801
2802 record_nonwrapping_iv (loop, init, step, data->stmt, low, high, reliable, upper);
2803 return true;
2804 }
2805
2806 /* Determine information about number of iterations a LOOP from the bounds
2807 of arrays in the data reference REF accessed in STMT. RELIABLE is true if
2808 STMT is guaranteed to be executed in every iteration of LOOP.*/
2809
2810 static void
2811 infer_loop_bounds_from_ref (struct loop *loop, gimple stmt, tree ref)
2812 {
2813 struct ilb_data data;
2814
2815 data.loop = loop;
2816 data.stmt = stmt;
2817 for_each_index (&ref, idx_infer_loop_bounds, &data);
2818 }
2819
2820 /* Determine information about number of iterations of a LOOP from the way
2821 arrays are used in STMT. RELIABLE is true if STMT is guaranteed to be
2822 executed in every iteration of LOOP. */
2823
2824 static void
2825 infer_loop_bounds_from_array (struct loop *loop, gimple stmt)
2826 {
2827 if (is_gimple_assign (stmt))
2828 {
2829 tree op0 = gimple_assign_lhs (stmt);
2830 tree op1 = gimple_assign_rhs1 (stmt);
2831
2832 /* For each memory access, analyze its access function
2833 and record a bound on the loop iteration domain. */
2834 if (REFERENCE_CLASS_P (op0))
2835 infer_loop_bounds_from_ref (loop, stmt, op0);
2836
2837 if (REFERENCE_CLASS_P (op1))
2838 infer_loop_bounds_from_ref (loop, stmt, op1);
2839 }
2840 else if (is_gimple_call (stmt))
2841 {
2842 tree arg, lhs;
2843 unsigned i, n = gimple_call_num_args (stmt);
2844
2845 lhs = gimple_call_lhs (stmt);
2846 if (lhs && REFERENCE_CLASS_P (lhs))
2847 infer_loop_bounds_from_ref (loop, stmt, lhs);
2848
2849 for (i = 0; i < n; i++)
2850 {
2851 arg = gimple_call_arg (stmt, i);
2852 if (REFERENCE_CLASS_P (arg))
2853 infer_loop_bounds_from_ref (loop, stmt, arg);
2854 }
2855 }
2856 }
2857
2858 /* Determine information about number of iterations of a LOOP from the fact
2859 that pointer arithmetics in STMT does not overflow. */
2860
2861 static void
2862 infer_loop_bounds_from_pointer_arith (struct loop *loop, gimple stmt)
2863 {
2864 tree def, base, step, scev, type, low, high;
2865 tree var, ptr;
2866
2867 if (!is_gimple_assign (stmt)
2868 || gimple_assign_rhs_code (stmt) != POINTER_PLUS_EXPR)
2869 return;
2870
2871 def = gimple_assign_lhs (stmt);
2872 if (TREE_CODE (def) != SSA_NAME)
2873 return;
2874
2875 type = TREE_TYPE (def);
2876 if (!nowrap_type_p (type))
2877 return;
2878
2879 ptr = gimple_assign_rhs1 (stmt);
2880 if (!expr_invariant_in_loop_p (loop, ptr))
2881 return;
2882
2883 var = gimple_assign_rhs2 (stmt);
2884 if (TYPE_PRECISION (type) != TYPE_PRECISION (TREE_TYPE (var)))
2885 return;
2886
2887 scev = instantiate_parameters (loop, analyze_scalar_evolution (loop, def));
2888 if (chrec_contains_undetermined (scev))
2889 return;
2890
2891 base = initial_condition_in_loop_num (scev, loop->num);
2892 step = evolution_part_in_loop_num (scev, loop->num);
2893
2894 if (!base || !step
2895 || TREE_CODE (step) != INTEGER_CST
2896 || tree_contains_chrecs (base, NULL)
2897 || chrec_contains_symbols_defined_in_loop (base, loop->num))
2898 return;
2899
2900 low = lower_bound_in_type (type, type);
2901 high = upper_bound_in_type (type, type);
2902
2903 /* In C, pointer arithmetic p + 1 cannot use a NULL pointer, and p - 1 cannot
2904 produce a NULL pointer. The contrary would mean NULL points to an object,
2905 while NULL is supposed to compare unequal with the address of all objects.
2906 Furthermore, p + 1 cannot produce a NULL pointer and p - 1 cannot use a
2907 NULL pointer since that would mean wrapping, which we assume here not to
2908 happen. So, we can exclude NULL from the valid range of pointer
2909 arithmetic. */
2910 if (flag_delete_null_pointer_checks && int_cst_value (low) == 0)
2911 low = build_int_cstu (TREE_TYPE (low), TYPE_ALIGN_UNIT (TREE_TYPE (type)));
2912
2913 record_nonwrapping_iv (loop, base, step, stmt, low, high, false, true);
2914 }
2915
2916 /* Determine information about number of iterations of a LOOP from the fact
2917 that signed arithmetics in STMT does not overflow. */
2918
2919 static void
2920 infer_loop_bounds_from_signedness (struct loop *loop, gimple stmt)
2921 {
2922 tree def, base, step, scev, type, low, high;
2923
2924 if (gimple_code (stmt) != GIMPLE_ASSIGN)
2925 return;
2926
2927 def = gimple_assign_lhs (stmt);
2928
2929 if (TREE_CODE (def) != SSA_NAME)
2930 return;
2931
2932 type = TREE_TYPE (def);
2933 if (!INTEGRAL_TYPE_P (type)
2934 || !TYPE_OVERFLOW_UNDEFINED (type))
2935 return;
2936
2937 scev = instantiate_parameters (loop, analyze_scalar_evolution (loop, def));
2938 if (chrec_contains_undetermined (scev))
2939 return;
2940
2941 base = initial_condition_in_loop_num (scev, loop->num);
2942 step = evolution_part_in_loop_num (scev, loop->num);
2943
2944 if (!base || !step
2945 || TREE_CODE (step) != INTEGER_CST
2946 || tree_contains_chrecs (base, NULL)
2947 || chrec_contains_symbols_defined_in_loop (base, loop->num))
2948 return;
2949
2950 low = lower_bound_in_type (type, type);
2951 high = upper_bound_in_type (type, type);
2952
2953 record_nonwrapping_iv (loop, base, step, stmt, low, high, false, true);
2954 }
2955
2956 /* The following analyzers are extracting informations on the bounds
2957 of LOOP from the following undefined behaviors:
2958
2959 - data references should not access elements over the statically
2960 allocated size,
2961
2962 - signed variables should not overflow when flag_wrapv is not set.
2963 */
2964
2965 static void
2966 infer_loop_bounds_from_undefined (struct loop *loop)
2967 {
2968 unsigned i;
2969 basic_block *bbs;
2970 gimple_stmt_iterator bsi;
2971 basic_block bb;
2972 bool reliable;
2973
2974 bbs = get_loop_body (loop);
2975
2976 for (i = 0; i < loop->num_nodes; i++)
2977 {
2978 bb = bbs[i];
2979
2980 /* If BB is not executed in each iteration of the loop, we cannot
2981 use the operations in it to infer reliable upper bound on the
2982 # of iterations of the loop. However, we can use it as a guess.
2983 Reliable guesses come only from array bounds. */
2984 reliable = dominated_by_p (CDI_DOMINATORS, loop->latch, bb);
2985
2986 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
2987 {
2988 gimple stmt = gsi_stmt (bsi);
2989
2990 infer_loop_bounds_from_array (loop, stmt);
2991
2992 if (reliable)
2993 {
2994 infer_loop_bounds_from_signedness (loop, stmt);
2995 infer_loop_bounds_from_pointer_arith (loop, stmt);
2996 }
2997 }
2998
2999 }
3000
3001 free (bbs);
3002 }
3003
3004 /* Converts VAL to double_int. */
3005
3006 static double_int
3007 gcov_type_to_double_int (gcov_type val)
3008 {
3009 double_int ret;
3010
3011 ret.low = (unsigned HOST_WIDE_INT) val;
3012 /* If HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_WIDEST_INT, avoid shifting by
3013 the size of type. */
3014 val >>= HOST_BITS_PER_WIDE_INT - 1;
3015 val >>= 1;
3016 ret.high = (unsigned HOST_WIDE_INT) val;
3017
3018 return ret;
3019 }
3020
3021 /* Compare double ints, callback for qsort. */
3022
3023 int
3024 double_int_cmp (const void *p1, const void *p2)
3025 {
3026 const double_int *d1 = (const double_int *)p1;
3027 const double_int *d2 = (const double_int *)p2;
3028 if (*d1 == *d2)
3029 return 0;
3030 if (d1->ult (*d2))
3031 return -1;
3032 return 1;
3033 }
3034
3035 /* Return index of BOUND in BOUNDS array sorted in increasing order.
3036 Lookup by binary search. */
3037
3038 int
3039 bound_index (vec<double_int> bounds, double_int bound)
3040 {
3041 unsigned int end = bounds.length ();
3042 unsigned int begin = 0;
3043
3044 /* Find a matching index by means of a binary search. */
3045 while (begin != end)
3046 {
3047 unsigned int middle = (begin + end) / 2;
3048 double_int index = bounds[middle];
3049
3050 if (index == bound)
3051 return middle;
3052 else if (index.ult (bound))
3053 begin = middle + 1;
3054 else
3055 end = middle;
3056 }
3057 gcc_unreachable ();
3058 }
3059
3060 /* We recorded loop bounds only for statements dominating loop latch (and thus
3061 executed each loop iteration). If there are any bounds on statements not
3062 dominating the loop latch we can improve the estimate by walking the loop
3063 body and seeing if every path from loop header to loop latch contains
3064 some bounded statement. */
3065
3066 static void
3067 discover_iteration_bound_by_body_walk (struct loop *loop)
3068 {
3069 pointer_map_t *bb_bounds;
3070 struct nb_iter_bound *elt;
3071 vec<double_int> bounds = vNULL;
3072 vec<vec<basic_block> > queues = vNULL;
3073 vec<basic_block> queue = vNULL;
3074 ptrdiff_t queue_index;
3075 ptrdiff_t latch_index = 0;
3076 pointer_map_t *block_priority;
3077
3078 /* Discover what bounds may interest us. */
3079 for (elt = loop->bounds; elt; elt = elt->next)
3080 {
3081 double_int bound = elt->bound;
3082
3083 /* Exit terminates loop at given iteration, while non-exits produce undefined
3084 effect on the next iteration. */
3085 if (!elt->is_exit)
3086 {
3087 bound += double_int_one;
3088 /* If an overflow occurred, ignore the result. */
3089 if (bound.is_zero ())
3090 continue;
3091 }
3092
3093 if (!loop->any_upper_bound
3094 || bound.ult (loop->nb_iterations_upper_bound))
3095 bounds.safe_push (bound);
3096 }
3097
3098 /* Exit early if there is nothing to do. */
3099 if (!bounds.exists ())
3100 return;
3101
3102 if (dump_file && (dump_flags & TDF_DETAILS))
3103 fprintf (dump_file, " Trying to walk loop body to reduce the bound.\n");
3104
3105 /* Sort the bounds in decreasing order. */
3106 qsort (bounds.address (), bounds.length (),
3107 sizeof (double_int), double_int_cmp);
3108
3109 /* For every basic block record the lowest bound that is guaranteed to
3110 terminate the loop. */
3111
3112 bb_bounds = pointer_map_create ();
3113 for (elt = loop->bounds; elt; elt = elt->next)
3114 {
3115 double_int bound = elt->bound;
3116 if (!elt->is_exit)
3117 {
3118 bound += double_int_one;
3119 /* If an overflow occurred, ignore the result. */
3120 if (bound.is_zero ())
3121 continue;
3122 }
3123
3124 if (!loop->any_upper_bound
3125 || bound.ult (loop->nb_iterations_upper_bound))
3126 {
3127 ptrdiff_t index = bound_index (bounds, bound);
3128 void **entry = pointer_map_contains (bb_bounds,
3129 gimple_bb (elt->stmt));
3130 if (!entry)
3131 *pointer_map_insert (bb_bounds,
3132 gimple_bb (elt->stmt)) = (void *)index;
3133 else if ((ptrdiff_t)*entry > index)
3134 *entry = (void *)index;
3135 }
3136 }
3137
3138 block_priority = pointer_map_create ();
3139
3140 /* Perform shortest path discovery loop->header ... loop->latch.
3141
3142 The "distance" is given by the smallest loop bound of basic block
3143 present in the path and we look for path with largest smallest bound
3144 on it.
3145
3146 To avoid the need for fibonacci heap on double ints we simply compress
3147 double ints into indexes to BOUNDS array and then represent the queue
3148 as arrays of queues for every index.
3149 Index of BOUNDS.length() means that the execution of given BB has
3150 no bounds determined.
3151
3152 VISITED is a pointer map translating basic block into smallest index
3153 it was inserted into the priority queue with. */
3154 latch_index = -1;
3155
3156 /* Start walk in loop header with index set to infinite bound. */
3157 queue_index = bounds.length ();
3158 queues.safe_grow_cleared (queue_index + 1);
3159 queue.safe_push (loop->header);
3160 queues[queue_index] = queue;
3161 *pointer_map_insert (block_priority, loop->header) = (void *)queue_index;
3162
3163 for (; queue_index >= 0; queue_index--)
3164 {
3165 if (latch_index < queue_index)
3166 {
3167 while (queues[queue_index].length ())
3168 {
3169 basic_block bb;
3170 ptrdiff_t bound_index = queue_index;
3171 void **entry;
3172 edge e;
3173 edge_iterator ei;
3174
3175 queue = queues[queue_index];
3176 bb = queue.pop ();
3177
3178 /* OK, we later inserted the BB with lower priority, skip it. */
3179 if ((ptrdiff_t)*pointer_map_contains (block_priority, bb) > queue_index)
3180 continue;
3181
3182 /* See if we can improve the bound. */
3183 entry = pointer_map_contains (bb_bounds, bb);
3184 if (entry && (ptrdiff_t)*entry < bound_index)
3185 bound_index = (ptrdiff_t)*entry;
3186
3187 /* Insert succesors into the queue, watch for latch edge
3188 and record greatest index we saw. */
3189 FOR_EACH_EDGE (e, ei, bb->succs)
3190 {
3191 bool insert = false;
3192 void **entry;
3193
3194 if (loop_exit_edge_p (loop, e))
3195 continue;
3196
3197 if (e == loop_latch_edge (loop)
3198 && latch_index < bound_index)
3199 latch_index = bound_index;
3200 else if (!(entry = pointer_map_contains (block_priority, e->dest)))
3201 {
3202 insert = true;
3203 *pointer_map_insert (block_priority, e->dest) = (void *)bound_index;
3204 }
3205 else if ((ptrdiff_t)*entry < bound_index)
3206 {
3207 insert = true;
3208 *entry = (void *)bound_index;
3209 }
3210
3211 if (insert)
3212 queues[bound_index].safe_push (e->dest);
3213 }
3214 }
3215 }
3216 queues[queue_index].release ();
3217 }
3218
3219 gcc_assert (latch_index >= 0);
3220 if ((unsigned)latch_index < bounds.length ())
3221 {
3222 if (dump_file && (dump_flags & TDF_DETAILS))
3223 {
3224 fprintf (dump_file, "Found better loop bound ");
3225 dump_double_int (dump_file, bounds[latch_index], true);
3226 fprintf (dump_file, "\n");
3227 }
3228 record_niter_bound (loop, bounds[latch_index], false, true);
3229 }
3230
3231 queues.release ();
3232 bounds.release ();
3233 pointer_map_destroy (bb_bounds);
3234 pointer_map_destroy (block_priority);
3235 }
3236
3237 /* See if every path cross the loop goes through a statement that is known
3238 to not execute at the last iteration. In that case we can decrese iteration
3239 count by 1. */
3240
3241 static void
3242 maybe_lower_iteration_bound (struct loop *loop)
3243 {
3244 pointer_set_t *not_executed_last_iteration = NULL;
3245 struct nb_iter_bound *elt;
3246 bool found_exit = false;
3247 vec<basic_block> queue = vNULL;
3248 bitmap visited;
3249
3250 /* Collect all statements with interesting (i.e. lower than
3251 nb_iterations_upper_bound) bound on them.
3252
3253 TODO: Due to the way record_estimate choose estimates to store, the bounds
3254 will be always nb_iterations_upper_bound-1. We can change this to record
3255 also statements not dominating the loop latch and update the walk bellow
3256 to the shortest path algorthm. */
3257 for (elt = loop->bounds; elt; elt = elt->next)
3258 {
3259 if (!elt->is_exit
3260 && elt->bound.ult (loop->nb_iterations_upper_bound))
3261 {
3262 if (!not_executed_last_iteration)
3263 not_executed_last_iteration = pointer_set_create ();
3264 pointer_set_insert (not_executed_last_iteration, elt->stmt);
3265 }
3266 }
3267 if (!not_executed_last_iteration)
3268 return;
3269
3270 /* Start DFS walk in the loop header and see if we can reach the
3271 loop latch or any of the exits (including statements with side
3272 effects that may terminate the loop otherwise) without visiting
3273 any of the statements known to have undefined effect on the last
3274 iteration. */
3275 queue.safe_push (loop->header);
3276 visited = BITMAP_ALLOC (NULL);
3277 bitmap_set_bit (visited, loop->header->index);
3278 found_exit = false;
3279
3280 do
3281 {
3282 basic_block bb = queue.pop ();
3283 gimple_stmt_iterator gsi;
3284 bool stmt_found = false;
3285
3286 /* Loop for possible exits and statements bounding the execution. */
3287 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3288 {
3289 gimple stmt = gsi_stmt (gsi);
3290 if (pointer_set_contains (not_executed_last_iteration, stmt))
3291 {
3292 stmt_found = true;
3293 break;
3294 }
3295 if (gimple_has_side_effects (stmt))
3296 {
3297 found_exit = true;
3298 break;
3299 }
3300 }
3301 if (found_exit)
3302 break;
3303
3304 /* If no bounding statement is found, continue the walk. */
3305 if (!stmt_found)
3306 {
3307 edge e;
3308 edge_iterator ei;
3309
3310 FOR_EACH_EDGE (e, ei, bb->succs)
3311 {
3312 if (loop_exit_edge_p (loop, e)
3313 || e == loop_latch_edge (loop))
3314 {
3315 found_exit = true;
3316 break;
3317 }
3318 if (bitmap_set_bit (visited, e->dest->index))
3319 queue.safe_push (e->dest);
3320 }
3321 }
3322 }
3323 while (queue.length () && !found_exit);
3324
3325 /* If every path through the loop reach bounding statement before exit,
3326 then we know the last iteration of the loop will have undefined effect
3327 and we can decrease number of iterations. */
3328
3329 if (!found_exit)
3330 {
3331 if (dump_file && (dump_flags & TDF_DETAILS))
3332 fprintf (dump_file, "Reducing loop iteration estimate by 1; "
3333 "undefined statement must be executed at the last iteration.\n");
3334 record_niter_bound (loop, loop->nb_iterations_upper_bound - double_int_one,
3335 false, true);
3336 }
3337 BITMAP_FREE (visited);
3338 queue.release ();
3339 pointer_set_destroy (not_executed_last_iteration);
3340 }
3341
3342 /* Records estimates on numbers of iterations of LOOP. If USE_UNDEFINED_P
3343 is true also use estimates derived from undefined behavior. */
3344
3345 void
3346 estimate_numbers_of_iterations_loop (struct loop *loop)
3347 {
3348 vec<edge> exits;
3349 tree niter, type;
3350 unsigned i;
3351 struct tree_niter_desc niter_desc;
3352 edge ex;
3353 double_int bound;
3354 edge likely_exit;
3355
3356 /* Give up if we already have tried to compute an estimation. */
3357 if (loop->estimate_state != EST_NOT_COMPUTED)
3358 return;
3359
3360 loop->estimate_state = EST_AVAILABLE;
3361 /* Force estimate compuation but leave any existing upper bound in place. */
3362 loop->any_estimate = false;
3363
3364 /* Ensure that loop->nb_iterations is computed if possible. If it turns out
3365 to be constant, we avoid undefined behavior implied bounds and instead
3366 diagnose those loops with -Waggressive-loop-optimizations. */
3367 number_of_latch_executions (loop);
3368
3369 exits = get_loop_exit_edges (loop);
3370 likely_exit = single_likely_exit (loop);
3371 FOR_EACH_VEC_ELT (exits, i, ex)
3372 {
3373 if (!number_of_iterations_exit (loop, ex, &niter_desc, false, false))
3374 continue;
3375
3376 niter = niter_desc.niter;
3377 type = TREE_TYPE (niter);
3378 if (TREE_CODE (niter_desc.may_be_zero) != INTEGER_CST)
3379 niter = build3 (COND_EXPR, type, niter_desc.may_be_zero,
3380 build_int_cst (type, 0),
3381 niter);
3382 record_estimate (loop, niter, niter_desc.max,
3383 last_stmt (ex->src),
3384 true, ex == likely_exit, true);
3385 }
3386 exits.release ();
3387
3388 if (flag_aggressive_loop_optimizations)
3389 infer_loop_bounds_from_undefined (loop);
3390
3391 discover_iteration_bound_by_body_walk (loop);
3392
3393 maybe_lower_iteration_bound (loop);
3394
3395 /* If we have a measured profile, use it to estimate the number of
3396 iterations. */
3397 if (loop->header->count != 0)
3398 {
3399 gcov_type nit = expected_loop_iterations_unbounded (loop) + 1;
3400 bound = gcov_type_to_double_int (nit);
3401 record_niter_bound (loop, bound, true, false);
3402 }
3403
3404 /* If we know the exact number of iterations of this loop, try to
3405 not break code with undefined behavior by not recording smaller
3406 maximum number of iterations. */
3407 if (loop->nb_iterations
3408 && TREE_CODE (loop->nb_iterations) == INTEGER_CST)
3409 {
3410 loop->any_upper_bound = true;
3411 loop->nb_iterations_upper_bound
3412 = tree_to_double_int (loop->nb_iterations);
3413 }
3414 }
3415
3416 /* Sets NIT to the estimated number of executions of the latch of the
3417 LOOP. If CONSERVATIVE is true, we must be sure that NIT is at least as
3418 large as the number of iterations. If we have no reliable estimate,
3419 the function returns false, otherwise returns true. */
3420
3421 bool
3422 estimated_loop_iterations (struct loop *loop, double_int *nit)
3423 {
3424 /* When SCEV information is available, try to update loop iterations
3425 estimate. Otherwise just return whatever we recorded earlier. */
3426 if (scev_initialized_p ())
3427 estimate_numbers_of_iterations_loop (loop);
3428
3429 /* Even if the bound is not recorded, possibly we can derrive one from
3430 profile. */
3431 if (!loop->any_estimate)
3432 {
3433 if (loop->header->count)
3434 {
3435 *nit = gcov_type_to_double_int
3436 (expected_loop_iterations_unbounded (loop) + 1);
3437 return true;
3438 }
3439 return false;
3440 }
3441
3442 *nit = loop->nb_iterations_estimate;
3443 return true;
3444 }
3445
3446 /* Sets NIT to an upper bound for the maximum number of executions of the
3447 latch of the LOOP. If we have no reliable estimate, the function returns
3448 false, otherwise returns true. */
3449
3450 bool
3451 max_loop_iterations (struct loop *loop, double_int *nit)
3452 {
3453 /* When SCEV information is available, try to update loop iterations
3454 estimate. Otherwise just return whatever we recorded earlier. */
3455 if (scev_initialized_p ())
3456 estimate_numbers_of_iterations_loop (loop);
3457 if (!loop->any_upper_bound)
3458 return false;
3459
3460 *nit = loop->nb_iterations_upper_bound;
3461 return true;
3462 }
3463
3464 /* Similar to estimated_loop_iterations, but returns the estimate only
3465 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
3466 on the number of iterations of LOOP could not be derived, returns -1. */
3467
3468 HOST_WIDE_INT
3469 estimated_loop_iterations_int (struct loop *loop)
3470 {
3471 double_int nit;
3472 HOST_WIDE_INT hwi_nit;
3473
3474 if (!estimated_loop_iterations (loop, &nit))
3475 return -1;
3476
3477 if (!nit.fits_shwi ())
3478 return -1;
3479 hwi_nit = nit.to_shwi ();
3480
3481 return hwi_nit < 0 ? -1 : hwi_nit;
3482 }
3483
3484 /* Similar to max_loop_iterations, but returns the estimate only
3485 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
3486 on the number of iterations of LOOP could not be derived, returns -1. */
3487
3488 HOST_WIDE_INT
3489 max_loop_iterations_int (struct loop *loop)
3490 {
3491 double_int nit;
3492 HOST_WIDE_INT hwi_nit;
3493
3494 if (!max_loop_iterations (loop, &nit))
3495 return -1;
3496
3497 if (!nit.fits_shwi ())
3498 return -1;
3499 hwi_nit = nit.to_shwi ();
3500
3501 return hwi_nit < 0 ? -1 : hwi_nit;
3502 }
3503
3504 /* Returns an upper bound on the number of executions of statements
3505 in the LOOP. For statements before the loop exit, this exceeds
3506 the number of execution of the latch by one. */
3507
3508 HOST_WIDE_INT
3509 max_stmt_executions_int (struct loop *loop)
3510 {
3511 HOST_WIDE_INT nit = max_loop_iterations_int (loop);
3512 HOST_WIDE_INT snit;
3513
3514 if (nit == -1)
3515 return -1;
3516
3517 snit = (HOST_WIDE_INT) ((unsigned HOST_WIDE_INT) nit + 1);
3518
3519 /* If the computation overflows, return -1. */
3520 return snit < 0 ? -1 : snit;
3521 }
3522
3523 /* Returns an estimate for the number of executions of statements
3524 in the LOOP. For statements before the loop exit, this exceeds
3525 the number of execution of the latch by one. */
3526
3527 HOST_WIDE_INT
3528 estimated_stmt_executions_int (struct loop *loop)
3529 {
3530 HOST_WIDE_INT nit = estimated_loop_iterations_int (loop);
3531 HOST_WIDE_INT snit;
3532
3533 if (nit == -1)
3534 return -1;
3535
3536 snit = (HOST_WIDE_INT) ((unsigned HOST_WIDE_INT) nit + 1);
3537
3538 /* If the computation overflows, return -1. */
3539 return snit < 0 ? -1 : snit;
3540 }
3541
3542 /* Sets NIT to the estimated maximum number of executions of the latch of the
3543 LOOP, plus one. If we have no reliable estimate, the function returns
3544 false, otherwise returns true. */
3545
3546 bool
3547 max_stmt_executions (struct loop *loop, double_int *nit)
3548 {
3549 double_int nit_minus_one;
3550
3551 if (!max_loop_iterations (loop, nit))
3552 return false;
3553
3554 nit_minus_one = *nit;
3555
3556 *nit += double_int_one;
3557
3558 return (*nit).ugt (nit_minus_one);
3559 }
3560
3561 /* Sets NIT to the estimated number of executions of the latch of the
3562 LOOP, plus one. If we have no reliable estimate, the function returns
3563 false, otherwise returns true. */
3564
3565 bool
3566 estimated_stmt_executions (struct loop *loop, double_int *nit)
3567 {
3568 double_int nit_minus_one;
3569
3570 if (!estimated_loop_iterations (loop, nit))
3571 return false;
3572
3573 nit_minus_one = *nit;
3574
3575 *nit += double_int_one;
3576
3577 return (*nit).ugt (nit_minus_one);
3578 }
3579
3580 /* Records estimates on numbers of iterations of loops. */
3581
3582 void
3583 estimate_numbers_of_iterations (void)
3584 {
3585 loop_iterator li;
3586 struct loop *loop;
3587
3588 /* We don't want to issue signed overflow warnings while getting
3589 loop iteration estimates. */
3590 fold_defer_overflow_warnings ();
3591
3592 FOR_EACH_LOOP (li, loop, 0)
3593 {
3594 estimate_numbers_of_iterations_loop (loop);
3595 }
3596
3597 fold_undefer_and_ignore_overflow_warnings ();
3598 }
3599
3600 /* Returns true if statement S1 dominates statement S2. */
3601
3602 bool
3603 stmt_dominates_stmt_p (gimple s1, gimple s2)
3604 {
3605 basic_block bb1 = gimple_bb (s1), bb2 = gimple_bb (s2);
3606
3607 if (!bb1
3608 || s1 == s2)
3609 return true;
3610
3611 if (bb1 == bb2)
3612 {
3613 gimple_stmt_iterator bsi;
3614
3615 if (gimple_code (s2) == GIMPLE_PHI)
3616 return false;
3617
3618 if (gimple_code (s1) == GIMPLE_PHI)
3619 return true;
3620
3621 for (bsi = gsi_start_bb (bb1); gsi_stmt (bsi) != s2; gsi_next (&bsi))
3622 if (gsi_stmt (bsi) == s1)
3623 return true;
3624
3625 return false;
3626 }
3627
3628 return dominated_by_p (CDI_DOMINATORS, bb2, bb1);
3629 }
3630
3631 /* Returns true when we can prove that the number of executions of
3632 STMT in the loop is at most NITER, according to the bound on
3633 the number of executions of the statement NITER_BOUND->stmt recorded in
3634 NITER_BOUND and fact that NITER_BOUND->stmt dominate STMT.
3635
3636 ??? This code can become quite a CPU hog - we can have many bounds,
3637 and large basic block forcing stmt_dominates_stmt_p to be queried
3638 many times on a large basic blocks, so the whole thing is O(n^2)
3639 for scev_probably_wraps_p invocation (that can be done n times).
3640
3641 It would make more sense (and give better answers) to remember BB
3642 bounds computed by discover_iteration_bound_by_body_walk. */
3643
3644 static bool
3645 n_of_executions_at_most (gimple stmt,
3646 struct nb_iter_bound *niter_bound,
3647 tree niter)
3648 {
3649 double_int bound = niter_bound->bound;
3650 tree nit_type = TREE_TYPE (niter), e;
3651 enum tree_code cmp;
3652
3653 gcc_assert (TYPE_UNSIGNED (nit_type));
3654
3655 /* If the bound does not even fit into NIT_TYPE, it cannot tell us that
3656 the number of iterations is small. */
3657 if (!double_int_fits_to_tree_p (nit_type, bound))
3658 return false;
3659
3660 /* We know that NITER_BOUND->stmt is executed at most NITER_BOUND->bound + 1
3661 times. This means that:
3662
3663 -- if NITER_BOUND->is_exit is true, then everything after
3664 it at most NITER_BOUND->bound times.
3665
3666 -- If NITER_BOUND->is_exit is false, then if we can prove that when STMT
3667 is executed, then NITER_BOUND->stmt is executed as well in the same
3668 iteration then STMT is executed at most NITER_BOUND->bound + 1 times.
3669
3670 If we can determine that NITER_BOUND->stmt is always executed
3671 after STMT, then STMT is executed at most NITER_BOUND->bound + 2 times.
3672 We conclude that if both statements belong to the same
3673 basic block and STMT is before NITER_BOUND->stmt and there are no
3674 statements with side effects in between. */
3675
3676 if (niter_bound->is_exit)
3677 {
3678 if (stmt == niter_bound->stmt
3679 || !stmt_dominates_stmt_p (niter_bound->stmt, stmt))
3680 return false;
3681 cmp = GE_EXPR;
3682 }
3683 else
3684 {
3685 if (!stmt_dominates_stmt_p (niter_bound->stmt, stmt))
3686 {
3687 gimple_stmt_iterator bsi;
3688 if (gimple_bb (stmt) != gimple_bb (niter_bound->stmt)
3689 || gimple_code (stmt) == GIMPLE_PHI
3690 || gimple_code (niter_bound->stmt) == GIMPLE_PHI)
3691 return false;
3692
3693 /* By stmt_dominates_stmt_p we already know that STMT appears
3694 before NITER_BOUND->STMT. Still need to test that the loop
3695 can not be terinated by a side effect in between. */
3696 for (bsi = gsi_for_stmt (stmt); gsi_stmt (bsi) != niter_bound->stmt;
3697 gsi_next (&bsi))
3698 if (gimple_has_side_effects (gsi_stmt (bsi)))
3699 return false;
3700 bound += double_int_one;
3701 if (bound.is_zero ()
3702 || !double_int_fits_to_tree_p (nit_type, bound))
3703 return false;
3704 }
3705 cmp = GT_EXPR;
3706 }
3707
3708 e = fold_binary (cmp, boolean_type_node,
3709 niter, double_int_to_tree (nit_type, bound));
3710 return e && integer_nonzerop (e);
3711 }
3712
3713 /* Returns true if the arithmetics in TYPE can be assumed not to wrap. */
3714
3715 bool
3716 nowrap_type_p (tree type)
3717 {
3718 if (INTEGRAL_TYPE_P (type)
3719 && TYPE_OVERFLOW_UNDEFINED (type))
3720 return true;
3721
3722 if (POINTER_TYPE_P (type))
3723 return true;
3724
3725 return false;
3726 }
3727
3728 /* Return false only when the induction variable BASE + STEP * I is
3729 known to not overflow: i.e. when the number of iterations is small
3730 enough with respect to the step and initial condition in order to
3731 keep the evolution confined in TYPEs bounds. Return true when the
3732 iv is known to overflow or when the property is not computable.
3733
3734 USE_OVERFLOW_SEMANTICS is true if this function should assume that
3735 the rules for overflow of the given language apply (e.g., that signed
3736 arithmetics in C does not overflow). */
3737
3738 bool
3739 scev_probably_wraps_p (tree base, tree step,
3740 gimple at_stmt, struct loop *loop,
3741 bool use_overflow_semantics)
3742 {
3743 tree delta, step_abs;
3744 tree unsigned_type, valid_niter;
3745 tree type = TREE_TYPE (step);
3746 tree e;
3747 double_int niter;
3748 struct nb_iter_bound *bound;
3749
3750 /* FIXME: We really need something like
3751 http://gcc.gnu.org/ml/gcc-patches/2005-06/msg02025.html.
3752
3753 We used to test for the following situation that frequently appears
3754 during address arithmetics:
3755
3756 D.1621_13 = (long unsigned intD.4) D.1620_12;
3757 D.1622_14 = D.1621_13 * 8;
3758 D.1623_15 = (doubleD.29 *) D.1622_14;
3759
3760 And derived that the sequence corresponding to D_14
3761 can be proved to not wrap because it is used for computing a
3762 memory access; however, this is not really the case -- for example,
3763 if D_12 = (unsigned char) [254,+,1], then D_14 has values
3764 2032, 2040, 0, 8, ..., but the code is still legal. */
3765
3766 if (chrec_contains_undetermined (base)
3767 || chrec_contains_undetermined (step))
3768 return true;
3769
3770 if (integer_zerop (step))
3771 return false;
3772
3773 /* If we can use the fact that signed and pointer arithmetics does not
3774 wrap, we are done. */
3775 if (use_overflow_semantics && nowrap_type_p (TREE_TYPE (base)))
3776 return false;
3777
3778 /* To be able to use estimates on number of iterations of the loop,
3779 we must have an upper bound on the absolute value of the step. */
3780 if (TREE_CODE (step) != INTEGER_CST)
3781 return true;
3782
3783 /* Don't issue signed overflow warnings. */
3784 fold_defer_overflow_warnings ();
3785
3786 /* Otherwise, compute the number of iterations before we reach the
3787 bound of the type, and verify that the loop is exited before this
3788 occurs. */
3789 unsigned_type = unsigned_type_for (type);
3790 base = fold_convert (unsigned_type, base);
3791
3792 if (tree_int_cst_sign_bit (step))
3793 {
3794 tree extreme = fold_convert (unsigned_type,
3795 lower_bound_in_type (type, type));
3796 delta = fold_build2 (MINUS_EXPR, unsigned_type, base, extreme);
3797 step_abs = fold_build1 (NEGATE_EXPR, unsigned_type,
3798 fold_convert (unsigned_type, step));
3799 }
3800 else
3801 {
3802 tree extreme = fold_convert (unsigned_type,
3803 upper_bound_in_type (type, type));
3804 delta = fold_build2 (MINUS_EXPR, unsigned_type, extreme, base);
3805 step_abs = fold_convert (unsigned_type, step);
3806 }
3807
3808 valid_niter = fold_build2 (FLOOR_DIV_EXPR, unsigned_type, delta, step_abs);
3809
3810 estimate_numbers_of_iterations_loop (loop);
3811
3812 if (max_loop_iterations (loop, &niter)
3813 && double_int_fits_to_tree_p (TREE_TYPE (valid_niter), niter)
3814 && (e = fold_binary (GT_EXPR, boolean_type_node, valid_niter,
3815 double_int_to_tree (TREE_TYPE (valid_niter),
3816 niter))) != NULL
3817 && integer_nonzerop (e))
3818 {
3819 fold_undefer_and_ignore_overflow_warnings ();
3820 return false;
3821 }
3822 if (at_stmt)
3823 for (bound = loop->bounds; bound; bound = bound->next)
3824 {
3825 if (n_of_executions_at_most (at_stmt, bound, valid_niter))
3826 {
3827 fold_undefer_and_ignore_overflow_warnings ();
3828 return false;
3829 }
3830 }
3831
3832 fold_undefer_and_ignore_overflow_warnings ();
3833
3834 /* At this point we still don't have a proof that the iv does not
3835 overflow: give up. */
3836 return true;
3837 }
3838
3839 /* Frees the information on upper bounds on numbers of iterations of LOOP. */
3840
3841 void
3842 free_numbers_of_iterations_estimates_loop (struct loop *loop)
3843 {
3844 struct nb_iter_bound *bound, *next;
3845
3846 loop->nb_iterations = NULL;
3847 loop->estimate_state = EST_NOT_COMPUTED;
3848 for (bound = loop->bounds; bound; bound = next)
3849 {
3850 next = bound->next;
3851 ggc_free (bound);
3852 }
3853
3854 loop->bounds = NULL;
3855 }
3856
3857 /* Frees the information on upper bounds on numbers of iterations of loops. */
3858
3859 void
3860 free_numbers_of_iterations_estimates (void)
3861 {
3862 loop_iterator li;
3863 struct loop *loop;
3864
3865 FOR_EACH_LOOP (li, loop, 0)
3866 {
3867 free_numbers_of_iterations_estimates_loop (loop);
3868 }
3869 }
3870
3871 /* Substitute value VAL for ssa name NAME inside expressions held
3872 at LOOP. */
3873
3874 void
3875 substitute_in_loop_info (struct loop *loop, tree name, tree val)
3876 {
3877 loop->nb_iterations = simplify_replace_tree (loop->nb_iterations, name, val);
3878 }