ipa/97673 - fix input_location leak
[gcc.git] / gcc / tree-ssa-loop-niter.c
1 /* Functions to determine/estimate number of iterations of a loop.
2 Copyright (C) 2004-2021 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "rtl.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "tree-pass.h"
28 #include "ssa.h"
29 #include "gimple-pretty-print.h"
30 #include "diagnostic-core.h"
31 #include "stor-layout.h"
32 #include "fold-const.h"
33 #include "calls.h"
34 #include "intl.h"
35 #include "gimplify.h"
36 #include "gimple-iterator.h"
37 #include "tree-cfg.h"
38 #include "tree-ssa-loop-ivopts.h"
39 #include "tree-ssa-loop-niter.h"
40 #include "tree-ssa-loop.h"
41 #include "cfgloop.h"
42 #include "tree-chrec.h"
43 #include "tree-scalar-evolution.h"
44 #include "tree-dfa.h"
45
46
47 /* The maximum number of dominator BBs we search for conditions
48 of loop header copies we use for simplifying a conditional
49 expression. */
50 #define MAX_DOMINATORS_TO_WALK 8
51
52 /*
53
54 Analysis of number of iterations of an affine exit test.
55
56 */
57
58 /* Bounds on some value, BELOW <= X <= UP. */
59
60 struct bounds
61 {
62 mpz_t below, up;
63 };
64
65 static bool number_of_iterations_popcount (loop_p loop, edge exit,
66 enum tree_code code,
67 class tree_niter_desc *niter);
68
69
70 /* Splits expression EXPR to a variable part VAR and constant OFFSET. */
71
72 static void
73 split_to_var_and_offset (tree expr, tree *var, mpz_t offset)
74 {
75 tree type = TREE_TYPE (expr);
76 tree op0, op1;
77 bool negate = false;
78
79 *var = expr;
80 mpz_set_ui (offset, 0);
81
82 switch (TREE_CODE (expr))
83 {
84 case MINUS_EXPR:
85 negate = true;
86 /* Fallthru. */
87
88 case PLUS_EXPR:
89 case POINTER_PLUS_EXPR:
90 op0 = TREE_OPERAND (expr, 0);
91 op1 = TREE_OPERAND (expr, 1);
92
93 if (TREE_CODE (op1) != INTEGER_CST)
94 break;
95
96 *var = op0;
97 /* Always sign extend the offset. */
98 wi::to_mpz (wi::to_wide (op1), offset, SIGNED);
99 if (negate)
100 mpz_neg (offset, offset);
101 break;
102
103 case INTEGER_CST:
104 *var = build_int_cst_type (type, 0);
105 wi::to_mpz (wi::to_wide (expr), offset, TYPE_SIGN (type));
106 break;
107
108 default:
109 break;
110 }
111 }
112
113 /* From condition C0 CMP C1 derives information regarding the value range
114 of VAR, which is of TYPE. Results are stored in to BELOW and UP. */
115
116 static void
117 refine_value_range_using_guard (tree type, tree var,
118 tree c0, enum tree_code cmp, tree c1,
119 mpz_t below, mpz_t up)
120 {
121 tree varc0, varc1, ctype;
122 mpz_t offc0, offc1;
123 mpz_t mint, maxt, minc1, maxc1;
124 wide_int minv, maxv;
125 bool no_wrap = nowrap_type_p (type);
126 bool c0_ok, c1_ok;
127 signop sgn = TYPE_SIGN (type);
128
129 switch (cmp)
130 {
131 case LT_EXPR:
132 case LE_EXPR:
133 case GT_EXPR:
134 case GE_EXPR:
135 STRIP_SIGN_NOPS (c0);
136 STRIP_SIGN_NOPS (c1);
137 ctype = TREE_TYPE (c0);
138 if (!useless_type_conversion_p (ctype, type))
139 return;
140
141 break;
142
143 case EQ_EXPR:
144 /* We could derive quite precise information from EQ_EXPR, however,
145 such a guard is unlikely to appear, so we do not bother with
146 handling it. */
147 return;
148
149 case NE_EXPR:
150 /* NE_EXPR comparisons do not contain much of useful information,
151 except for cases of comparing with bounds. */
152 if (TREE_CODE (c1) != INTEGER_CST
153 || !INTEGRAL_TYPE_P (type))
154 return;
155
156 /* Ensure that the condition speaks about an expression in the same
157 type as X and Y. */
158 ctype = TREE_TYPE (c0);
159 if (TYPE_PRECISION (ctype) != TYPE_PRECISION (type))
160 return;
161 c0 = fold_convert (type, c0);
162 c1 = fold_convert (type, c1);
163
164 if (operand_equal_p (var, c0, 0))
165 {
166 mpz_t valc1;
167
168 /* Case of comparing VAR with its below/up bounds. */
169 mpz_init (valc1);
170 wi::to_mpz (wi::to_wide (c1), valc1, TYPE_SIGN (type));
171 if (mpz_cmp (valc1, below) == 0)
172 cmp = GT_EXPR;
173 if (mpz_cmp (valc1, up) == 0)
174 cmp = LT_EXPR;
175
176 mpz_clear (valc1);
177 }
178 else
179 {
180 /* Case of comparing with the bounds of the type. */
181 wide_int min = wi::min_value (type);
182 wide_int max = wi::max_value (type);
183
184 if (wi::to_wide (c1) == min)
185 cmp = GT_EXPR;
186 if (wi::to_wide (c1) == max)
187 cmp = LT_EXPR;
188 }
189
190 /* Quick return if no useful information. */
191 if (cmp == NE_EXPR)
192 return;
193
194 break;
195
196 default:
197 return;
198 }
199
200 mpz_init (offc0);
201 mpz_init (offc1);
202 split_to_var_and_offset (expand_simple_operations (c0), &varc0, offc0);
203 split_to_var_and_offset (expand_simple_operations (c1), &varc1, offc1);
204
205 /* We are only interested in comparisons of expressions based on VAR. */
206 if (operand_equal_p (var, varc1, 0))
207 {
208 std::swap (varc0, varc1);
209 mpz_swap (offc0, offc1);
210 cmp = swap_tree_comparison (cmp);
211 }
212 else if (!operand_equal_p (var, varc0, 0))
213 {
214 mpz_clear (offc0);
215 mpz_clear (offc1);
216 return;
217 }
218
219 mpz_init (mint);
220 mpz_init (maxt);
221 get_type_static_bounds (type, mint, maxt);
222 mpz_init (minc1);
223 mpz_init (maxc1);
224 /* Setup range information for varc1. */
225 if (integer_zerop (varc1))
226 {
227 wi::to_mpz (0, minc1, TYPE_SIGN (type));
228 wi::to_mpz (0, maxc1, TYPE_SIGN (type));
229 }
230 else if (TREE_CODE (varc1) == SSA_NAME
231 && INTEGRAL_TYPE_P (type)
232 && get_range_info (varc1, &minv, &maxv) == VR_RANGE)
233 {
234 gcc_assert (wi::le_p (minv, maxv, sgn));
235 wi::to_mpz (minv, minc1, sgn);
236 wi::to_mpz (maxv, maxc1, sgn);
237 }
238 else
239 {
240 mpz_set (minc1, mint);
241 mpz_set (maxc1, maxt);
242 }
243
244 /* Compute valid range information for varc1 + offc1. Note nothing
245 useful can be derived if it overflows or underflows. Overflow or
246 underflow could happen when:
247
248 offc1 > 0 && varc1 + offc1 > MAX_VAL (type)
249 offc1 < 0 && varc1 + offc1 < MIN_VAL (type). */
250 mpz_add (minc1, minc1, offc1);
251 mpz_add (maxc1, maxc1, offc1);
252 c1_ok = (no_wrap
253 || mpz_sgn (offc1) == 0
254 || (mpz_sgn (offc1) < 0 && mpz_cmp (minc1, mint) >= 0)
255 || (mpz_sgn (offc1) > 0 && mpz_cmp (maxc1, maxt) <= 0));
256 if (!c1_ok)
257 goto end;
258
259 if (mpz_cmp (minc1, mint) < 0)
260 mpz_set (minc1, mint);
261 if (mpz_cmp (maxc1, maxt) > 0)
262 mpz_set (maxc1, maxt);
263
264 if (cmp == LT_EXPR)
265 {
266 cmp = LE_EXPR;
267 mpz_sub_ui (maxc1, maxc1, 1);
268 }
269 if (cmp == GT_EXPR)
270 {
271 cmp = GE_EXPR;
272 mpz_add_ui (minc1, minc1, 1);
273 }
274
275 /* Compute range information for varc0. If there is no overflow,
276 the condition implied that
277
278 (varc0) cmp (varc1 + offc1 - offc0)
279
280 We can possibly improve the upper bound of varc0 if cmp is LE_EXPR,
281 or the below bound if cmp is GE_EXPR.
282
283 To prove there is no overflow/underflow, we need to check below
284 four cases:
285 1) cmp == LE_EXPR && offc0 > 0
286
287 (varc0 + offc0) doesn't overflow
288 && (varc1 + offc1 - offc0) doesn't underflow
289
290 2) cmp == LE_EXPR && offc0 < 0
291
292 (varc0 + offc0) doesn't underflow
293 && (varc1 + offc1 - offc0) doesn't overfloe
294
295 In this case, (varc0 + offc0) will never underflow if we can
296 prove (varc1 + offc1 - offc0) doesn't overflow.
297
298 3) cmp == GE_EXPR && offc0 < 0
299
300 (varc0 + offc0) doesn't underflow
301 && (varc1 + offc1 - offc0) doesn't overflow
302
303 4) cmp == GE_EXPR && offc0 > 0
304
305 (varc0 + offc0) doesn't overflow
306 && (varc1 + offc1 - offc0) doesn't underflow
307
308 In this case, (varc0 + offc0) will never overflow if we can
309 prove (varc1 + offc1 - offc0) doesn't underflow.
310
311 Note we only handle case 2 and 4 in below code. */
312
313 mpz_sub (minc1, minc1, offc0);
314 mpz_sub (maxc1, maxc1, offc0);
315 c0_ok = (no_wrap
316 || mpz_sgn (offc0) == 0
317 || (cmp == LE_EXPR
318 && mpz_sgn (offc0) < 0 && mpz_cmp (maxc1, maxt) <= 0)
319 || (cmp == GE_EXPR
320 && mpz_sgn (offc0) > 0 && mpz_cmp (minc1, mint) >= 0));
321 if (!c0_ok)
322 goto end;
323
324 if (cmp == LE_EXPR)
325 {
326 if (mpz_cmp (up, maxc1) > 0)
327 mpz_set (up, maxc1);
328 }
329 else
330 {
331 if (mpz_cmp (below, minc1) < 0)
332 mpz_set (below, minc1);
333 }
334
335 end:
336 mpz_clear (mint);
337 mpz_clear (maxt);
338 mpz_clear (minc1);
339 mpz_clear (maxc1);
340 mpz_clear (offc0);
341 mpz_clear (offc1);
342 }
343
344 /* Stores estimate on the minimum/maximum value of the expression VAR + OFF
345 in TYPE to MIN and MAX. */
346
347 static void
348 determine_value_range (class loop *loop, tree type, tree var, mpz_t off,
349 mpz_t min, mpz_t max)
350 {
351 int cnt = 0;
352 mpz_t minm, maxm;
353 basic_block bb;
354 wide_int minv, maxv;
355 enum value_range_kind rtype = VR_VARYING;
356
357 /* If the expression is a constant, we know its value exactly. */
358 if (integer_zerop (var))
359 {
360 mpz_set (min, off);
361 mpz_set (max, off);
362 return;
363 }
364
365 get_type_static_bounds (type, min, max);
366
367 /* See if we have some range info from VRP. */
368 if (TREE_CODE (var) == SSA_NAME && INTEGRAL_TYPE_P (type))
369 {
370 edge e = loop_preheader_edge (loop);
371 signop sgn = TYPE_SIGN (type);
372 gphi_iterator gsi;
373
374 /* Either for VAR itself... */
375 rtype = get_range_info (var, &minv, &maxv);
376 /* Or for PHI results in loop->header where VAR is used as
377 PHI argument from the loop preheader edge. */
378 for (gsi = gsi_start_phis (loop->header); !gsi_end_p (gsi); gsi_next (&gsi))
379 {
380 gphi *phi = gsi.phi ();
381 wide_int minc, maxc;
382 if (PHI_ARG_DEF_FROM_EDGE (phi, e) == var
383 && (get_range_info (gimple_phi_result (phi), &minc, &maxc)
384 == VR_RANGE))
385 {
386 if (rtype != VR_RANGE)
387 {
388 rtype = VR_RANGE;
389 minv = minc;
390 maxv = maxc;
391 }
392 else
393 {
394 minv = wi::max (minv, minc, sgn);
395 maxv = wi::min (maxv, maxc, sgn);
396 /* If the PHI result range are inconsistent with
397 the VAR range, give up on looking at the PHI
398 results. This can happen if VR_UNDEFINED is
399 involved. */
400 if (wi::gt_p (minv, maxv, sgn))
401 {
402 rtype = get_range_info (var, &minv, &maxv);
403 break;
404 }
405 }
406 }
407 }
408 mpz_init (minm);
409 mpz_init (maxm);
410 if (rtype != VR_RANGE)
411 {
412 mpz_set (minm, min);
413 mpz_set (maxm, max);
414 }
415 else
416 {
417 gcc_assert (wi::le_p (minv, maxv, sgn));
418 wi::to_mpz (minv, minm, sgn);
419 wi::to_mpz (maxv, maxm, sgn);
420 }
421 /* Now walk the dominators of the loop header and use the entry
422 guards to refine the estimates. */
423 for (bb = loop->header;
424 bb != ENTRY_BLOCK_PTR_FOR_FN (cfun) && cnt < MAX_DOMINATORS_TO_WALK;
425 bb = get_immediate_dominator (CDI_DOMINATORS, bb))
426 {
427 edge e;
428 tree c0, c1;
429 gimple *cond;
430 enum tree_code cmp;
431
432 if (!single_pred_p (bb))
433 continue;
434 e = single_pred_edge (bb);
435
436 if (!(e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
437 continue;
438
439 cond = last_stmt (e->src);
440 c0 = gimple_cond_lhs (cond);
441 cmp = gimple_cond_code (cond);
442 c1 = gimple_cond_rhs (cond);
443
444 if (e->flags & EDGE_FALSE_VALUE)
445 cmp = invert_tree_comparison (cmp, false);
446
447 refine_value_range_using_guard (type, var, c0, cmp, c1, minm, maxm);
448 ++cnt;
449 }
450
451 mpz_add (minm, minm, off);
452 mpz_add (maxm, maxm, off);
453 /* If the computation may not wrap or off is zero, then this
454 is always fine. If off is negative and minv + off isn't
455 smaller than type's minimum, or off is positive and
456 maxv + off isn't bigger than type's maximum, use the more
457 precise range too. */
458 if (nowrap_type_p (type)
459 || mpz_sgn (off) == 0
460 || (mpz_sgn (off) < 0 && mpz_cmp (minm, min) >= 0)
461 || (mpz_sgn (off) > 0 && mpz_cmp (maxm, max) <= 0))
462 {
463 mpz_set (min, minm);
464 mpz_set (max, maxm);
465 mpz_clear (minm);
466 mpz_clear (maxm);
467 return;
468 }
469 mpz_clear (minm);
470 mpz_clear (maxm);
471 }
472
473 /* If the computation may wrap, we know nothing about the value, except for
474 the range of the type. */
475 if (!nowrap_type_p (type))
476 return;
477
478 /* Since the addition of OFF does not wrap, if OFF is positive, then we may
479 add it to MIN, otherwise to MAX. */
480 if (mpz_sgn (off) < 0)
481 mpz_add (max, max, off);
482 else
483 mpz_add (min, min, off);
484 }
485
486 /* Stores the bounds on the difference of the values of the expressions
487 (var + X) and (var + Y), computed in TYPE, to BNDS. */
488
489 static void
490 bound_difference_of_offsetted_base (tree type, mpz_t x, mpz_t y,
491 bounds *bnds)
492 {
493 int rel = mpz_cmp (x, y);
494 bool may_wrap = !nowrap_type_p (type);
495 mpz_t m;
496
497 /* If X == Y, then the expressions are always equal.
498 If X > Y, there are the following possibilities:
499 a) neither of var + X and var + Y overflow or underflow, or both of
500 them do. Then their difference is X - Y.
501 b) var + X overflows, and var + Y does not. Then the values of the
502 expressions are var + X - M and var + Y, where M is the range of
503 the type, and their difference is X - Y - M.
504 c) var + Y underflows and var + X does not. Their difference again
505 is M - X + Y.
506 Therefore, if the arithmetics in type does not overflow, then the
507 bounds are (X - Y, X - Y), otherwise they are (X - Y - M, X - Y)
508 Similarly, if X < Y, the bounds are either (X - Y, X - Y) or
509 (X - Y, X - Y + M). */
510
511 if (rel == 0)
512 {
513 mpz_set_ui (bnds->below, 0);
514 mpz_set_ui (bnds->up, 0);
515 return;
516 }
517
518 mpz_init (m);
519 wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), m, UNSIGNED);
520 mpz_add_ui (m, m, 1);
521 mpz_sub (bnds->up, x, y);
522 mpz_set (bnds->below, bnds->up);
523
524 if (may_wrap)
525 {
526 if (rel > 0)
527 mpz_sub (bnds->below, bnds->below, m);
528 else
529 mpz_add (bnds->up, bnds->up, m);
530 }
531
532 mpz_clear (m);
533 }
534
535 /* From condition C0 CMP C1 derives information regarding the
536 difference of values of VARX + OFFX and VARY + OFFY, computed in TYPE,
537 and stores it to BNDS. */
538
539 static void
540 refine_bounds_using_guard (tree type, tree varx, mpz_t offx,
541 tree vary, mpz_t offy,
542 tree c0, enum tree_code cmp, tree c1,
543 bounds *bnds)
544 {
545 tree varc0, varc1, ctype;
546 mpz_t offc0, offc1, loffx, loffy, bnd;
547 bool lbound = false;
548 bool no_wrap = nowrap_type_p (type);
549 bool x_ok, y_ok;
550
551 switch (cmp)
552 {
553 case LT_EXPR:
554 case LE_EXPR:
555 case GT_EXPR:
556 case GE_EXPR:
557 STRIP_SIGN_NOPS (c0);
558 STRIP_SIGN_NOPS (c1);
559 ctype = TREE_TYPE (c0);
560 if (!useless_type_conversion_p (ctype, type))
561 return;
562
563 break;
564
565 case EQ_EXPR:
566 /* We could derive quite precise information from EQ_EXPR, however, such
567 a guard is unlikely to appear, so we do not bother with handling
568 it. */
569 return;
570
571 case NE_EXPR:
572 /* NE_EXPR comparisons do not contain much of useful information, except for
573 special case of comparing with the bounds of the type. */
574 if (TREE_CODE (c1) != INTEGER_CST
575 || !INTEGRAL_TYPE_P (type))
576 return;
577
578 /* Ensure that the condition speaks about an expression in the same type
579 as X and Y. */
580 ctype = TREE_TYPE (c0);
581 if (TYPE_PRECISION (ctype) != TYPE_PRECISION (type))
582 return;
583 c0 = fold_convert (type, c0);
584 c1 = fold_convert (type, c1);
585
586 if (TYPE_MIN_VALUE (type)
587 && operand_equal_p (c1, TYPE_MIN_VALUE (type), 0))
588 {
589 cmp = GT_EXPR;
590 break;
591 }
592 if (TYPE_MAX_VALUE (type)
593 && operand_equal_p (c1, TYPE_MAX_VALUE (type), 0))
594 {
595 cmp = LT_EXPR;
596 break;
597 }
598
599 return;
600 default:
601 return;
602 }
603
604 mpz_init (offc0);
605 mpz_init (offc1);
606 split_to_var_and_offset (expand_simple_operations (c0), &varc0, offc0);
607 split_to_var_and_offset (expand_simple_operations (c1), &varc1, offc1);
608
609 /* We are only interested in comparisons of expressions based on VARX and
610 VARY. TODO -- we might also be able to derive some bounds from
611 expressions containing just one of the variables. */
612
613 if (operand_equal_p (varx, varc1, 0))
614 {
615 std::swap (varc0, varc1);
616 mpz_swap (offc0, offc1);
617 cmp = swap_tree_comparison (cmp);
618 }
619
620 if (!operand_equal_p (varx, varc0, 0)
621 || !operand_equal_p (vary, varc1, 0))
622 goto end;
623
624 mpz_init_set (loffx, offx);
625 mpz_init_set (loffy, offy);
626
627 if (cmp == GT_EXPR || cmp == GE_EXPR)
628 {
629 std::swap (varx, vary);
630 mpz_swap (offc0, offc1);
631 mpz_swap (loffx, loffy);
632 cmp = swap_tree_comparison (cmp);
633 lbound = true;
634 }
635
636 /* If there is no overflow, the condition implies that
637
638 (VARX + OFFX) cmp (VARY + OFFY) + (OFFX - OFFY + OFFC1 - OFFC0).
639
640 The overflows and underflows may complicate things a bit; each
641 overflow decreases the appropriate offset by M, and underflow
642 increases it by M. The above inequality would not necessarily be
643 true if
644
645 -- VARX + OFFX underflows and VARX + OFFC0 does not, or
646 VARX + OFFC0 overflows, but VARX + OFFX does not.
647 This may only happen if OFFX < OFFC0.
648 -- VARY + OFFY overflows and VARY + OFFC1 does not, or
649 VARY + OFFC1 underflows and VARY + OFFY does not.
650 This may only happen if OFFY > OFFC1. */
651
652 if (no_wrap)
653 {
654 x_ok = true;
655 y_ok = true;
656 }
657 else
658 {
659 x_ok = (integer_zerop (varx)
660 || mpz_cmp (loffx, offc0) >= 0);
661 y_ok = (integer_zerop (vary)
662 || mpz_cmp (loffy, offc1) <= 0);
663 }
664
665 if (x_ok && y_ok)
666 {
667 mpz_init (bnd);
668 mpz_sub (bnd, loffx, loffy);
669 mpz_add (bnd, bnd, offc1);
670 mpz_sub (bnd, bnd, offc0);
671
672 if (cmp == LT_EXPR)
673 mpz_sub_ui (bnd, bnd, 1);
674
675 if (lbound)
676 {
677 mpz_neg (bnd, bnd);
678 if (mpz_cmp (bnds->below, bnd) < 0)
679 mpz_set (bnds->below, bnd);
680 }
681 else
682 {
683 if (mpz_cmp (bnd, bnds->up) < 0)
684 mpz_set (bnds->up, bnd);
685 }
686 mpz_clear (bnd);
687 }
688
689 mpz_clear (loffx);
690 mpz_clear (loffy);
691 end:
692 mpz_clear (offc0);
693 mpz_clear (offc1);
694 }
695
696 /* Stores the bounds on the value of the expression X - Y in LOOP to BNDS.
697 The subtraction is considered to be performed in arbitrary precision,
698 without overflows.
699
700 We do not attempt to be too clever regarding the value ranges of X and
701 Y; most of the time, they are just integers or ssa names offsetted by
702 integer. However, we try to use the information contained in the
703 comparisons before the loop (usually created by loop header copying). */
704
705 static void
706 bound_difference (class loop *loop, tree x, tree y, bounds *bnds)
707 {
708 tree type = TREE_TYPE (x);
709 tree varx, vary;
710 mpz_t offx, offy;
711 mpz_t minx, maxx, miny, maxy;
712 int cnt = 0;
713 edge e;
714 basic_block bb;
715 tree c0, c1;
716 gimple *cond;
717 enum tree_code cmp;
718
719 /* Get rid of unnecessary casts, but preserve the value of
720 the expressions. */
721 STRIP_SIGN_NOPS (x);
722 STRIP_SIGN_NOPS (y);
723
724 mpz_init (bnds->below);
725 mpz_init (bnds->up);
726 mpz_init (offx);
727 mpz_init (offy);
728 split_to_var_and_offset (x, &varx, offx);
729 split_to_var_and_offset (y, &vary, offy);
730
731 if (!integer_zerop (varx)
732 && operand_equal_p (varx, vary, 0))
733 {
734 /* Special case VARX == VARY -- we just need to compare the
735 offsets. The matters are a bit more complicated in the
736 case addition of offsets may wrap. */
737 bound_difference_of_offsetted_base (type, offx, offy, bnds);
738 }
739 else
740 {
741 /* Otherwise, use the value ranges to determine the initial
742 estimates on below and up. */
743 mpz_init (minx);
744 mpz_init (maxx);
745 mpz_init (miny);
746 mpz_init (maxy);
747 determine_value_range (loop, type, varx, offx, minx, maxx);
748 determine_value_range (loop, type, vary, offy, miny, maxy);
749
750 mpz_sub (bnds->below, minx, maxy);
751 mpz_sub (bnds->up, maxx, miny);
752 mpz_clear (minx);
753 mpz_clear (maxx);
754 mpz_clear (miny);
755 mpz_clear (maxy);
756 }
757
758 /* If both X and Y are constants, we cannot get any more precise. */
759 if (integer_zerop (varx) && integer_zerop (vary))
760 goto end;
761
762 /* Now walk the dominators of the loop header and use the entry
763 guards to refine the estimates. */
764 for (bb = loop->header;
765 bb != ENTRY_BLOCK_PTR_FOR_FN (cfun) && cnt < MAX_DOMINATORS_TO_WALK;
766 bb = get_immediate_dominator (CDI_DOMINATORS, bb))
767 {
768 if (!single_pred_p (bb))
769 continue;
770 e = single_pred_edge (bb);
771
772 if (!(e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
773 continue;
774
775 cond = last_stmt (e->src);
776 c0 = gimple_cond_lhs (cond);
777 cmp = gimple_cond_code (cond);
778 c1 = gimple_cond_rhs (cond);
779
780 if (e->flags & EDGE_FALSE_VALUE)
781 cmp = invert_tree_comparison (cmp, false);
782
783 refine_bounds_using_guard (type, varx, offx, vary, offy,
784 c0, cmp, c1, bnds);
785 ++cnt;
786 }
787
788 end:
789 mpz_clear (offx);
790 mpz_clear (offy);
791 }
792
793 /* Update the bounds in BNDS that restrict the value of X to the bounds
794 that restrict the value of X + DELTA. X can be obtained as a
795 difference of two values in TYPE. */
796
797 static void
798 bounds_add (bounds *bnds, const widest_int &delta, tree type)
799 {
800 mpz_t mdelta, max;
801
802 mpz_init (mdelta);
803 wi::to_mpz (delta, mdelta, SIGNED);
804
805 mpz_init (max);
806 wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), max, UNSIGNED);
807
808 mpz_add (bnds->up, bnds->up, mdelta);
809 mpz_add (bnds->below, bnds->below, mdelta);
810
811 if (mpz_cmp (bnds->up, max) > 0)
812 mpz_set (bnds->up, max);
813
814 mpz_neg (max, max);
815 if (mpz_cmp (bnds->below, max) < 0)
816 mpz_set (bnds->below, max);
817
818 mpz_clear (mdelta);
819 mpz_clear (max);
820 }
821
822 /* Update the bounds in BNDS that restrict the value of X to the bounds
823 that restrict the value of -X. */
824
825 static void
826 bounds_negate (bounds *bnds)
827 {
828 mpz_t tmp;
829
830 mpz_init_set (tmp, bnds->up);
831 mpz_neg (bnds->up, bnds->below);
832 mpz_neg (bnds->below, tmp);
833 mpz_clear (tmp);
834 }
835
836 /* Returns inverse of X modulo 2^s, where MASK = 2^s-1. */
837
838 static tree
839 inverse (tree x, tree mask)
840 {
841 tree type = TREE_TYPE (x);
842 tree rslt;
843 unsigned ctr = tree_floor_log2 (mask);
844
845 if (TYPE_PRECISION (type) <= HOST_BITS_PER_WIDE_INT)
846 {
847 unsigned HOST_WIDE_INT ix;
848 unsigned HOST_WIDE_INT imask;
849 unsigned HOST_WIDE_INT irslt = 1;
850
851 gcc_assert (cst_and_fits_in_hwi (x));
852 gcc_assert (cst_and_fits_in_hwi (mask));
853
854 ix = int_cst_value (x);
855 imask = int_cst_value (mask);
856
857 for (; ctr; ctr--)
858 {
859 irslt *= ix;
860 ix *= ix;
861 }
862 irslt &= imask;
863
864 rslt = build_int_cst_type (type, irslt);
865 }
866 else
867 {
868 rslt = build_int_cst (type, 1);
869 for (; ctr; ctr--)
870 {
871 rslt = int_const_binop (MULT_EXPR, rslt, x);
872 x = int_const_binop (MULT_EXPR, x, x);
873 }
874 rslt = int_const_binop (BIT_AND_EXPR, rslt, mask);
875 }
876
877 return rslt;
878 }
879
880 /* Derives the upper bound BND on the number of executions of loop with exit
881 condition S * i <> C. If NO_OVERFLOW is true, then the control variable of
882 the loop does not overflow. EXIT_MUST_BE_TAKEN is true if we are guaranteed
883 that the loop ends through this exit, i.e., the induction variable ever
884 reaches the value of C.
885
886 The value C is equal to final - base, where final and base are the final and
887 initial value of the actual induction variable in the analysed loop. BNDS
888 bounds the value of this difference when computed in signed type with
889 unbounded range, while the computation of C is performed in an unsigned
890 type with the range matching the range of the type of the induction variable.
891 In particular, BNDS.up contains an upper bound on C in the following cases:
892 -- if the iv must reach its final value without overflow, i.e., if
893 NO_OVERFLOW && EXIT_MUST_BE_TAKEN is true, or
894 -- if final >= base, which we know to hold when BNDS.below >= 0. */
895
896 static void
897 number_of_iterations_ne_max (mpz_t bnd, bool no_overflow, tree c, tree s,
898 bounds *bnds, bool exit_must_be_taken)
899 {
900 widest_int max;
901 mpz_t d;
902 tree type = TREE_TYPE (c);
903 bool bnds_u_valid = ((no_overflow && exit_must_be_taken)
904 || mpz_sgn (bnds->below) >= 0);
905
906 if (integer_onep (s)
907 || (TREE_CODE (c) == INTEGER_CST
908 && TREE_CODE (s) == INTEGER_CST
909 && wi::mod_trunc (wi::to_wide (c), wi::to_wide (s),
910 TYPE_SIGN (type)) == 0)
911 || (TYPE_OVERFLOW_UNDEFINED (type)
912 && multiple_of_p (type, c, s)))
913 {
914 /* If C is an exact multiple of S, then its value will be reached before
915 the induction variable overflows (unless the loop is exited in some
916 other way before). Note that the actual induction variable in the
917 loop (which ranges from base to final instead of from 0 to C) may
918 overflow, in which case BNDS.up will not be giving a correct upper
919 bound on C; thus, BNDS_U_VALID had to be computed in advance. */
920 no_overflow = true;
921 exit_must_be_taken = true;
922 }
923
924 /* If the induction variable can overflow, the number of iterations is at
925 most the period of the control variable (or infinite, but in that case
926 the whole # of iterations analysis will fail). */
927 if (!no_overflow)
928 {
929 max = wi::mask <widest_int> (TYPE_PRECISION (type)
930 - wi::ctz (wi::to_wide (s)), false);
931 wi::to_mpz (max, bnd, UNSIGNED);
932 return;
933 }
934
935 /* Now we know that the induction variable does not overflow, so the loop
936 iterates at most (range of type / S) times. */
937 wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), bnd, UNSIGNED);
938
939 /* If the induction variable is guaranteed to reach the value of C before
940 overflow, ... */
941 if (exit_must_be_taken)
942 {
943 /* ... then we can strengthen this to C / S, and possibly we can use
944 the upper bound on C given by BNDS. */
945 if (TREE_CODE (c) == INTEGER_CST)
946 wi::to_mpz (wi::to_wide (c), bnd, UNSIGNED);
947 else if (bnds_u_valid)
948 mpz_set (bnd, bnds->up);
949 }
950
951 mpz_init (d);
952 wi::to_mpz (wi::to_wide (s), d, UNSIGNED);
953 mpz_fdiv_q (bnd, bnd, d);
954 mpz_clear (d);
955 }
956
957 /* Determines number of iterations of loop whose ending condition
958 is IV <> FINAL. TYPE is the type of the iv. The number of
959 iterations is stored to NITER. EXIT_MUST_BE_TAKEN is true if
960 we know that the exit must be taken eventually, i.e., that the IV
961 ever reaches the value FINAL (we derived this earlier, and possibly set
962 NITER->assumptions to make sure this is the case). BNDS contains the
963 bounds on the difference FINAL - IV->base. */
964
965 static bool
966 number_of_iterations_ne (class loop *loop, tree type, affine_iv *iv,
967 tree final, class tree_niter_desc *niter,
968 bool exit_must_be_taken, bounds *bnds)
969 {
970 tree niter_type = unsigned_type_for (type);
971 tree s, c, d, bits, assumption, tmp, bound;
972 mpz_t max;
973
974 niter->control = *iv;
975 niter->bound = final;
976 niter->cmp = NE_EXPR;
977
978 /* Rearrange the terms so that we get inequality S * i <> C, with S
979 positive. Also cast everything to the unsigned type. If IV does
980 not overflow, BNDS bounds the value of C. Also, this is the
981 case if the computation |FINAL - IV->base| does not overflow, i.e.,
982 if BNDS->below in the result is nonnegative. */
983 if (tree_int_cst_sign_bit (iv->step))
984 {
985 s = fold_convert (niter_type,
986 fold_build1 (NEGATE_EXPR, type, iv->step));
987 c = fold_build2 (MINUS_EXPR, niter_type,
988 fold_convert (niter_type, iv->base),
989 fold_convert (niter_type, final));
990 bounds_negate (bnds);
991 }
992 else
993 {
994 s = fold_convert (niter_type, iv->step);
995 c = fold_build2 (MINUS_EXPR, niter_type,
996 fold_convert (niter_type, final),
997 fold_convert (niter_type, iv->base));
998 }
999
1000 mpz_init (max);
1001 number_of_iterations_ne_max (max, iv->no_overflow, c, s, bnds,
1002 exit_must_be_taken);
1003 niter->max = widest_int::from (wi::from_mpz (niter_type, max, false),
1004 TYPE_SIGN (niter_type));
1005 mpz_clear (max);
1006
1007 /* Compute no-overflow information for the control iv. This can be
1008 proven when below two conditions are satisfied:
1009
1010 1) IV evaluates toward FINAL at beginning, i.e:
1011 base <= FINAL ; step > 0
1012 base >= FINAL ; step < 0
1013
1014 2) |FINAL - base| is an exact multiple of step.
1015
1016 Unfortunately, it's hard to prove above conditions after pass loop-ch
1017 because loop with exit condition (IV != FINAL) usually will be guarded
1018 by initial-condition (IV.base - IV.step != FINAL). In this case, we
1019 can alternatively try to prove below conditions:
1020
1021 1') IV evaluates toward FINAL at beginning, i.e:
1022 new_base = base - step < FINAL ; step > 0
1023 && base - step doesn't underflow
1024 new_base = base - step > FINAL ; step < 0
1025 && base - step doesn't overflow
1026
1027 2') |FINAL - new_base| is an exact multiple of step.
1028
1029 Please refer to PR34114 as an example of loop-ch's impact, also refer
1030 to PR72817 as an example why condition 2') is necessary.
1031
1032 Note, for NE_EXPR, base equals to FINAL is a special case, in
1033 which the loop exits immediately, and the iv does not overflow. */
1034 if (!niter->control.no_overflow
1035 && (integer_onep (s) || multiple_of_p (type, c, s)))
1036 {
1037 tree t, cond, new_c, relaxed_cond = boolean_false_node;
1038
1039 if (tree_int_cst_sign_bit (iv->step))
1040 {
1041 cond = fold_build2 (GE_EXPR, boolean_type_node, iv->base, final);
1042 if (TREE_CODE (type) == INTEGER_TYPE)
1043 {
1044 /* Only when base - step doesn't overflow. */
1045 t = TYPE_MAX_VALUE (type);
1046 t = fold_build2 (PLUS_EXPR, type, t, iv->step);
1047 t = fold_build2 (GE_EXPR, boolean_type_node, t, iv->base);
1048 if (integer_nonzerop (t))
1049 {
1050 t = fold_build2 (MINUS_EXPR, type, iv->base, iv->step);
1051 new_c = fold_build2 (MINUS_EXPR, niter_type,
1052 fold_convert (niter_type, t),
1053 fold_convert (niter_type, final));
1054 if (multiple_of_p (type, new_c, s))
1055 relaxed_cond = fold_build2 (GT_EXPR, boolean_type_node,
1056 t, final);
1057 }
1058 }
1059 }
1060 else
1061 {
1062 cond = fold_build2 (LE_EXPR, boolean_type_node, iv->base, final);
1063 if (TREE_CODE (type) == INTEGER_TYPE)
1064 {
1065 /* Only when base - step doesn't underflow. */
1066 t = TYPE_MIN_VALUE (type);
1067 t = fold_build2 (PLUS_EXPR, type, t, iv->step);
1068 t = fold_build2 (LE_EXPR, boolean_type_node, t, iv->base);
1069 if (integer_nonzerop (t))
1070 {
1071 t = fold_build2 (MINUS_EXPR, type, iv->base, iv->step);
1072 new_c = fold_build2 (MINUS_EXPR, niter_type,
1073 fold_convert (niter_type, final),
1074 fold_convert (niter_type, t));
1075 if (multiple_of_p (type, new_c, s))
1076 relaxed_cond = fold_build2 (LT_EXPR, boolean_type_node,
1077 t, final);
1078 }
1079 }
1080 }
1081
1082 t = simplify_using_initial_conditions (loop, cond);
1083 if (!t || !integer_onep (t))
1084 t = simplify_using_initial_conditions (loop, relaxed_cond);
1085
1086 if (t && integer_onep (t))
1087 niter->control.no_overflow = true;
1088 }
1089
1090 /* First the trivial cases -- when the step is 1. */
1091 if (integer_onep (s))
1092 {
1093 niter->niter = c;
1094 return true;
1095 }
1096 if (niter->control.no_overflow && multiple_of_p (type, c, s))
1097 {
1098 niter->niter = fold_build2 (FLOOR_DIV_EXPR, niter_type, c, s);
1099 return true;
1100 }
1101
1102 /* Let nsd (step, size of mode) = d. If d does not divide c, the loop
1103 is infinite. Otherwise, the number of iterations is
1104 (inverse(s/d) * (c/d)) mod (size of mode/d). */
1105 bits = num_ending_zeros (s);
1106 bound = build_low_bits_mask (niter_type,
1107 (TYPE_PRECISION (niter_type)
1108 - tree_to_uhwi (bits)));
1109
1110 d = fold_binary_to_constant (LSHIFT_EXPR, niter_type,
1111 build_int_cst (niter_type, 1), bits);
1112 s = fold_binary_to_constant (RSHIFT_EXPR, niter_type, s, bits);
1113
1114 if (!exit_must_be_taken)
1115 {
1116 /* If we cannot assume that the exit is taken eventually, record the
1117 assumptions for divisibility of c. */
1118 assumption = fold_build2 (FLOOR_MOD_EXPR, niter_type, c, d);
1119 assumption = fold_build2 (EQ_EXPR, boolean_type_node,
1120 assumption, build_int_cst (niter_type, 0));
1121 if (!integer_nonzerop (assumption))
1122 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1123 niter->assumptions, assumption);
1124 }
1125
1126 c = fold_build2 (EXACT_DIV_EXPR, niter_type, c, d);
1127 if (integer_onep (s))
1128 {
1129 niter->niter = c;
1130 }
1131 else
1132 {
1133 tmp = fold_build2 (MULT_EXPR, niter_type, c, inverse (s, bound));
1134 niter->niter = fold_build2 (BIT_AND_EXPR, niter_type, tmp, bound);
1135 }
1136 return true;
1137 }
1138
1139 /* Checks whether we can determine the final value of the control variable
1140 of the loop with ending condition IV0 < IV1 (computed in TYPE).
1141 DELTA is the difference IV1->base - IV0->base, STEP is the absolute value
1142 of the step. The assumptions necessary to ensure that the computation
1143 of the final value does not overflow are recorded in NITER. If we
1144 find the final value, we adjust DELTA and return TRUE. Otherwise
1145 we return false. BNDS bounds the value of IV1->base - IV0->base,
1146 and will be updated by the same amount as DELTA. EXIT_MUST_BE_TAKEN is
1147 true if we know that the exit must be taken eventually. */
1148
1149 static bool
1150 number_of_iterations_lt_to_ne (tree type, affine_iv *iv0, affine_iv *iv1,
1151 class tree_niter_desc *niter,
1152 tree *delta, tree step,
1153 bool exit_must_be_taken, bounds *bnds)
1154 {
1155 tree niter_type = TREE_TYPE (step);
1156 tree mod = fold_build2 (FLOOR_MOD_EXPR, niter_type, *delta, step);
1157 tree tmod;
1158 mpz_t mmod;
1159 tree assumption = boolean_true_node, bound, noloop;
1160 bool ret = false, fv_comp_no_overflow;
1161 tree type1 = type;
1162 if (POINTER_TYPE_P (type))
1163 type1 = sizetype;
1164
1165 if (TREE_CODE (mod) != INTEGER_CST)
1166 return false;
1167 if (integer_nonzerop (mod))
1168 mod = fold_build2 (MINUS_EXPR, niter_type, step, mod);
1169 tmod = fold_convert (type1, mod);
1170
1171 mpz_init (mmod);
1172 wi::to_mpz (wi::to_wide (mod), mmod, UNSIGNED);
1173 mpz_neg (mmod, mmod);
1174
1175 /* If the induction variable does not overflow and the exit is taken,
1176 then the computation of the final value does not overflow. This is
1177 also obviously the case if the new final value is equal to the
1178 current one. Finally, we postulate this for pointer type variables,
1179 as the code cannot rely on the object to that the pointer points being
1180 placed at the end of the address space (and more pragmatically,
1181 TYPE_{MIN,MAX}_VALUE is not defined for pointers). */
1182 if (integer_zerop (mod) || POINTER_TYPE_P (type))
1183 fv_comp_no_overflow = true;
1184 else if (!exit_must_be_taken)
1185 fv_comp_no_overflow = false;
1186 else
1187 fv_comp_no_overflow =
1188 (iv0->no_overflow && integer_nonzerop (iv0->step))
1189 || (iv1->no_overflow && integer_nonzerop (iv1->step));
1190
1191 if (integer_nonzerop (iv0->step))
1192 {
1193 /* The final value of the iv is iv1->base + MOD, assuming that this
1194 computation does not overflow, and that
1195 iv0->base <= iv1->base + MOD. */
1196 if (!fv_comp_no_overflow)
1197 {
1198 bound = fold_build2 (MINUS_EXPR, type1,
1199 TYPE_MAX_VALUE (type1), tmod);
1200 assumption = fold_build2 (LE_EXPR, boolean_type_node,
1201 iv1->base, bound);
1202 if (integer_zerop (assumption))
1203 goto end;
1204 }
1205 if (mpz_cmp (mmod, bnds->below) < 0)
1206 noloop = boolean_false_node;
1207 else if (POINTER_TYPE_P (type))
1208 noloop = fold_build2 (GT_EXPR, boolean_type_node,
1209 iv0->base,
1210 fold_build_pointer_plus (iv1->base, tmod));
1211 else
1212 noloop = fold_build2 (GT_EXPR, boolean_type_node,
1213 iv0->base,
1214 fold_build2 (PLUS_EXPR, type1,
1215 iv1->base, tmod));
1216 }
1217 else
1218 {
1219 /* The final value of the iv is iv0->base - MOD, assuming that this
1220 computation does not overflow, and that
1221 iv0->base - MOD <= iv1->base. */
1222 if (!fv_comp_no_overflow)
1223 {
1224 bound = fold_build2 (PLUS_EXPR, type1,
1225 TYPE_MIN_VALUE (type1), tmod);
1226 assumption = fold_build2 (GE_EXPR, boolean_type_node,
1227 iv0->base, bound);
1228 if (integer_zerop (assumption))
1229 goto end;
1230 }
1231 if (mpz_cmp (mmod, bnds->below) < 0)
1232 noloop = boolean_false_node;
1233 else if (POINTER_TYPE_P (type))
1234 noloop = fold_build2 (GT_EXPR, boolean_type_node,
1235 fold_build_pointer_plus (iv0->base,
1236 fold_build1 (NEGATE_EXPR,
1237 type1, tmod)),
1238 iv1->base);
1239 else
1240 noloop = fold_build2 (GT_EXPR, boolean_type_node,
1241 fold_build2 (MINUS_EXPR, type1,
1242 iv0->base, tmod),
1243 iv1->base);
1244 }
1245
1246 if (!integer_nonzerop (assumption))
1247 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1248 niter->assumptions,
1249 assumption);
1250 if (!integer_zerop (noloop))
1251 niter->may_be_zero = fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
1252 niter->may_be_zero,
1253 noloop);
1254 bounds_add (bnds, wi::to_widest (mod), type);
1255 *delta = fold_build2 (PLUS_EXPR, niter_type, *delta, mod);
1256
1257 ret = true;
1258 end:
1259 mpz_clear (mmod);
1260 return ret;
1261 }
1262
1263 /* Add assertions to NITER that ensure that the control variable of the loop
1264 with ending condition IV0 < IV1 does not overflow. Types of IV0 and IV1
1265 are TYPE. Returns false if we can prove that there is an overflow, true
1266 otherwise. STEP is the absolute value of the step. */
1267
1268 static bool
1269 assert_no_overflow_lt (tree type, affine_iv *iv0, affine_iv *iv1,
1270 class tree_niter_desc *niter, tree step)
1271 {
1272 tree bound, d, assumption, diff;
1273 tree niter_type = TREE_TYPE (step);
1274
1275 if (integer_nonzerop (iv0->step))
1276 {
1277 /* for (i = iv0->base; i < iv1->base; i += iv0->step) */
1278 if (iv0->no_overflow)
1279 return true;
1280
1281 /* If iv0->base is a constant, we can determine the last value before
1282 overflow precisely; otherwise we conservatively assume
1283 MAX - STEP + 1. */
1284
1285 if (TREE_CODE (iv0->base) == INTEGER_CST)
1286 {
1287 d = fold_build2 (MINUS_EXPR, niter_type,
1288 fold_convert (niter_type, TYPE_MAX_VALUE (type)),
1289 fold_convert (niter_type, iv0->base));
1290 diff = fold_build2 (FLOOR_MOD_EXPR, niter_type, d, step);
1291 }
1292 else
1293 diff = fold_build2 (MINUS_EXPR, niter_type, step,
1294 build_int_cst (niter_type, 1));
1295 bound = fold_build2 (MINUS_EXPR, type,
1296 TYPE_MAX_VALUE (type), fold_convert (type, diff));
1297 assumption = fold_build2 (LE_EXPR, boolean_type_node,
1298 iv1->base, bound);
1299 }
1300 else
1301 {
1302 /* for (i = iv1->base; i > iv0->base; i += iv1->step) */
1303 if (iv1->no_overflow)
1304 return true;
1305
1306 if (TREE_CODE (iv1->base) == INTEGER_CST)
1307 {
1308 d = fold_build2 (MINUS_EXPR, niter_type,
1309 fold_convert (niter_type, iv1->base),
1310 fold_convert (niter_type, TYPE_MIN_VALUE (type)));
1311 diff = fold_build2 (FLOOR_MOD_EXPR, niter_type, d, step);
1312 }
1313 else
1314 diff = fold_build2 (MINUS_EXPR, niter_type, step,
1315 build_int_cst (niter_type, 1));
1316 bound = fold_build2 (PLUS_EXPR, type,
1317 TYPE_MIN_VALUE (type), fold_convert (type, diff));
1318 assumption = fold_build2 (GE_EXPR, boolean_type_node,
1319 iv0->base, bound);
1320 }
1321
1322 if (integer_zerop (assumption))
1323 return false;
1324 if (!integer_nonzerop (assumption))
1325 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1326 niter->assumptions, assumption);
1327
1328 iv0->no_overflow = true;
1329 iv1->no_overflow = true;
1330 return true;
1331 }
1332
1333 /* Add an assumption to NITER that a loop whose ending condition
1334 is IV0 < IV1 rolls. TYPE is the type of the control iv. BNDS
1335 bounds the value of IV1->base - IV0->base. */
1336
1337 static void
1338 assert_loop_rolls_lt (tree type, affine_iv *iv0, affine_iv *iv1,
1339 class tree_niter_desc *niter, bounds *bnds)
1340 {
1341 tree assumption = boolean_true_node, bound, diff;
1342 tree mbz, mbzl, mbzr, type1;
1343 bool rolls_p, no_overflow_p;
1344 widest_int dstep;
1345 mpz_t mstep, max;
1346
1347 /* We are going to compute the number of iterations as
1348 (iv1->base - iv0->base + step - 1) / step, computed in the unsigned
1349 variant of TYPE. This formula only works if
1350
1351 -step + 1 <= (iv1->base - iv0->base) <= MAX - step + 1
1352
1353 (where MAX is the maximum value of the unsigned variant of TYPE, and
1354 the computations in this formula are performed in full precision,
1355 i.e., without overflows).
1356
1357 Usually, for loops with exit condition iv0->base + step * i < iv1->base,
1358 we have a condition of the form iv0->base - step < iv1->base before the loop,
1359 and for loops iv0->base < iv1->base - step * i the condition
1360 iv0->base < iv1->base + step, due to loop header copying, which enable us
1361 to prove the lower bound.
1362
1363 The upper bound is more complicated. Unless the expressions for initial
1364 and final value themselves contain enough information, we usually cannot
1365 derive it from the context. */
1366
1367 /* First check whether the answer does not follow from the bounds we gathered
1368 before. */
1369 if (integer_nonzerop (iv0->step))
1370 dstep = wi::to_widest (iv0->step);
1371 else
1372 {
1373 dstep = wi::sext (wi::to_widest (iv1->step), TYPE_PRECISION (type));
1374 dstep = -dstep;
1375 }
1376
1377 mpz_init (mstep);
1378 wi::to_mpz (dstep, mstep, UNSIGNED);
1379 mpz_neg (mstep, mstep);
1380 mpz_add_ui (mstep, mstep, 1);
1381
1382 rolls_p = mpz_cmp (mstep, bnds->below) <= 0;
1383
1384 mpz_init (max);
1385 wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), max, UNSIGNED);
1386 mpz_add (max, max, mstep);
1387 no_overflow_p = (mpz_cmp (bnds->up, max) <= 0
1388 /* For pointers, only values lying inside a single object
1389 can be compared or manipulated by pointer arithmetics.
1390 Gcc in general does not allow or handle objects larger
1391 than half of the address space, hence the upper bound
1392 is satisfied for pointers. */
1393 || POINTER_TYPE_P (type));
1394 mpz_clear (mstep);
1395 mpz_clear (max);
1396
1397 if (rolls_p && no_overflow_p)
1398 return;
1399
1400 type1 = type;
1401 if (POINTER_TYPE_P (type))
1402 type1 = sizetype;
1403
1404 /* Now the hard part; we must formulate the assumption(s) as expressions, and
1405 we must be careful not to introduce overflow. */
1406
1407 if (integer_nonzerop (iv0->step))
1408 {
1409 diff = fold_build2 (MINUS_EXPR, type1,
1410 iv0->step, build_int_cst (type1, 1));
1411
1412 /* We need to know that iv0->base >= MIN + iv0->step - 1. Since
1413 0 address never belongs to any object, we can assume this for
1414 pointers. */
1415 if (!POINTER_TYPE_P (type))
1416 {
1417 bound = fold_build2 (PLUS_EXPR, type1,
1418 TYPE_MIN_VALUE (type), diff);
1419 assumption = fold_build2 (GE_EXPR, boolean_type_node,
1420 iv0->base, bound);
1421 }
1422
1423 /* And then we can compute iv0->base - diff, and compare it with
1424 iv1->base. */
1425 mbzl = fold_build2 (MINUS_EXPR, type1,
1426 fold_convert (type1, iv0->base), diff);
1427 mbzr = fold_convert (type1, iv1->base);
1428 }
1429 else
1430 {
1431 diff = fold_build2 (PLUS_EXPR, type1,
1432 iv1->step, build_int_cst (type1, 1));
1433
1434 if (!POINTER_TYPE_P (type))
1435 {
1436 bound = fold_build2 (PLUS_EXPR, type1,
1437 TYPE_MAX_VALUE (type), diff);
1438 assumption = fold_build2 (LE_EXPR, boolean_type_node,
1439 iv1->base, bound);
1440 }
1441
1442 mbzl = fold_convert (type1, iv0->base);
1443 mbzr = fold_build2 (MINUS_EXPR, type1,
1444 fold_convert (type1, iv1->base), diff);
1445 }
1446
1447 if (!integer_nonzerop (assumption))
1448 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1449 niter->assumptions, assumption);
1450 if (!rolls_p)
1451 {
1452 mbz = fold_build2 (GT_EXPR, boolean_type_node, mbzl, mbzr);
1453 niter->may_be_zero = fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
1454 niter->may_be_zero, mbz);
1455 }
1456 }
1457
1458 /* Determines number of iterations of loop whose ending condition
1459 is IV0 < IV1. TYPE is the type of the iv. The number of
1460 iterations is stored to NITER. BNDS bounds the difference
1461 IV1->base - IV0->base. EXIT_MUST_BE_TAKEN is true if we know
1462 that the exit must be taken eventually. */
1463
1464 static bool
1465 number_of_iterations_lt (class loop *loop, tree type, affine_iv *iv0,
1466 affine_iv *iv1, class tree_niter_desc *niter,
1467 bool exit_must_be_taken, bounds *bnds)
1468 {
1469 tree niter_type = unsigned_type_for (type);
1470 tree delta, step, s;
1471 mpz_t mstep, tmp;
1472
1473 if (integer_nonzerop (iv0->step))
1474 {
1475 niter->control = *iv0;
1476 niter->cmp = LT_EXPR;
1477 niter->bound = iv1->base;
1478 }
1479 else
1480 {
1481 niter->control = *iv1;
1482 niter->cmp = GT_EXPR;
1483 niter->bound = iv0->base;
1484 }
1485
1486 delta = fold_build2 (MINUS_EXPR, niter_type,
1487 fold_convert (niter_type, iv1->base),
1488 fold_convert (niter_type, iv0->base));
1489
1490 /* First handle the special case that the step is +-1. */
1491 if ((integer_onep (iv0->step) && integer_zerop (iv1->step))
1492 || (integer_all_onesp (iv1->step) && integer_zerop (iv0->step)))
1493 {
1494 /* for (i = iv0->base; i < iv1->base; i++)
1495
1496 or
1497
1498 for (i = iv1->base; i > iv0->base; i--).
1499
1500 In both cases # of iterations is iv1->base - iv0->base, assuming that
1501 iv1->base >= iv0->base.
1502
1503 First try to derive a lower bound on the value of
1504 iv1->base - iv0->base, computed in full precision. If the difference
1505 is nonnegative, we are done, otherwise we must record the
1506 condition. */
1507
1508 if (mpz_sgn (bnds->below) < 0)
1509 niter->may_be_zero = fold_build2 (LT_EXPR, boolean_type_node,
1510 iv1->base, iv0->base);
1511 niter->niter = delta;
1512 niter->max = widest_int::from (wi::from_mpz (niter_type, bnds->up, false),
1513 TYPE_SIGN (niter_type));
1514 niter->control.no_overflow = true;
1515 return true;
1516 }
1517
1518 if (integer_nonzerop (iv0->step))
1519 step = fold_convert (niter_type, iv0->step);
1520 else
1521 step = fold_convert (niter_type,
1522 fold_build1 (NEGATE_EXPR, type, iv1->step));
1523
1524 /* If we can determine the final value of the control iv exactly, we can
1525 transform the condition to != comparison. In particular, this will be
1526 the case if DELTA is constant. */
1527 if (number_of_iterations_lt_to_ne (type, iv0, iv1, niter, &delta, step,
1528 exit_must_be_taken, bnds))
1529 {
1530 affine_iv zps;
1531
1532 zps.base = build_int_cst (niter_type, 0);
1533 zps.step = step;
1534 /* number_of_iterations_lt_to_ne will add assumptions that ensure that
1535 zps does not overflow. */
1536 zps.no_overflow = true;
1537
1538 return number_of_iterations_ne (loop, type, &zps,
1539 delta, niter, true, bnds);
1540 }
1541
1542 /* Make sure that the control iv does not overflow. */
1543 if (!assert_no_overflow_lt (type, iv0, iv1, niter, step))
1544 return false;
1545
1546 /* We determine the number of iterations as (delta + step - 1) / step. For
1547 this to work, we must know that iv1->base >= iv0->base - step + 1,
1548 otherwise the loop does not roll. */
1549 assert_loop_rolls_lt (type, iv0, iv1, niter, bnds);
1550
1551 s = fold_build2 (MINUS_EXPR, niter_type,
1552 step, build_int_cst (niter_type, 1));
1553 delta = fold_build2 (PLUS_EXPR, niter_type, delta, s);
1554 niter->niter = fold_build2 (FLOOR_DIV_EXPR, niter_type, delta, step);
1555
1556 mpz_init (mstep);
1557 mpz_init (tmp);
1558 wi::to_mpz (wi::to_wide (step), mstep, UNSIGNED);
1559 mpz_add (tmp, bnds->up, mstep);
1560 mpz_sub_ui (tmp, tmp, 1);
1561 mpz_fdiv_q (tmp, tmp, mstep);
1562 niter->max = widest_int::from (wi::from_mpz (niter_type, tmp, false),
1563 TYPE_SIGN (niter_type));
1564 mpz_clear (mstep);
1565 mpz_clear (tmp);
1566
1567 return true;
1568 }
1569
1570 /* Determines number of iterations of loop whose ending condition
1571 is IV0 <= IV1. TYPE is the type of the iv. The number of
1572 iterations is stored to NITER. EXIT_MUST_BE_TAKEN is true if
1573 we know that this condition must eventually become false (we derived this
1574 earlier, and possibly set NITER->assumptions to make sure this
1575 is the case). BNDS bounds the difference IV1->base - IV0->base. */
1576
1577 static bool
1578 number_of_iterations_le (class loop *loop, tree type, affine_iv *iv0,
1579 affine_iv *iv1, class tree_niter_desc *niter,
1580 bool exit_must_be_taken, bounds *bnds)
1581 {
1582 tree assumption;
1583 tree type1 = type;
1584 if (POINTER_TYPE_P (type))
1585 type1 = sizetype;
1586
1587 /* Say that IV0 is the control variable. Then IV0 <= IV1 iff
1588 IV0 < IV1 + 1, assuming that IV1 is not equal to the greatest
1589 value of the type. This we must know anyway, since if it is
1590 equal to this value, the loop rolls forever. We do not check
1591 this condition for pointer type ivs, as the code cannot rely on
1592 the object to that the pointer points being placed at the end of
1593 the address space (and more pragmatically, TYPE_{MIN,MAX}_VALUE is
1594 not defined for pointers). */
1595
1596 if (!exit_must_be_taken && !POINTER_TYPE_P (type))
1597 {
1598 if (integer_nonzerop (iv0->step))
1599 assumption = fold_build2 (NE_EXPR, boolean_type_node,
1600 iv1->base, TYPE_MAX_VALUE (type));
1601 else
1602 assumption = fold_build2 (NE_EXPR, boolean_type_node,
1603 iv0->base, TYPE_MIN_VALUE (type));
1604
1605 if (integer_zerop (assumption))
1606 return false;
1607 if (!integer_nonzerop (assumption))
1608 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1609 niter->assumptions, assumption);
1610 }
1611
1612 if (integer_nonzerop (iv0->step))
1613 {
1614 if (POINTER_TYPE_P (type))
1615 iv1->base = fold_build_pointer_plus_hwi (iv1->base, 1);
1616 else
1617 iv1->base = fold_build2 (PLUS_EXPR, type1, iv1->base,
1618 build_int_cst (type1, 1));
1619 }
1620 else if (POINTER_TYPE_P (type))
1621 iv0->base = fold_build_pointer_plus_hwi (iv0->base, -1);
1622 else
1623 iv0->base = fold_build2 (MINUS_EXPR, type1,
1624 iv0->base, build_int_cst (type1, 1));
1625
1626 bounds_add (bnds, 1, type1);
1627
1628 return number_of_iterations_lt (loop, type, iv0, iv1, niter, exit_must_be_taken,
1629 bnds);
1630 }
1631
1632 /* Dumps description of affine induction variable IV to FILE. */
1633
1634 static void
1635 dump_affine_iv (FILE *file, affine_iv *iv)
1636 {
1637 if (!integer_zerop (iv->step))
1638 fprintf (file, "[");
1639
1640 print_generic_expr (dump_file, iv->base, TDF_SLIM);
1641
1642 if (!integer_zerop (iv->step))
1643 {
1644 fprintf (file, ", + , ");
1645 print_generic_expr (dump_file, iv->step, TDF_SLIM);
1646 fprintf (file, "]%s", iv->no_overflow ? "(no_overflow)" : "");
1647 }
1648 }
1649
1650 /* Given exit condition IV0 CODE IV1 in TYPE, this function adjusts
1651 the condition for loop-until-wrap cases. For example:
1652 (unsigned){8, -1}_loop < 10 => {0, 1} != 9
1653 10 < (unsigned){0, max - 7}_loop => {0, 1} != 8
1654 Return true if condition is successfully adjusted. */
1655
1656 static bool
1657 adjust_cond_for_loop_until_wrap (tree type, affine_iv *iv0, tree_code *code,
1658 affine_iv *iv1)
1659 {
1660 /* Only support simple cases for the moment. */
1661 if (TREE_CODE (iv0->base) != INTEGER_CST
1662 || TREE_CODE (iv1->base) != INTEGER_CST)
1663 return false;
1664
1665 tree niter_type = unsigned_type_for (type), high, low;
1666 /* Case: i-- < 10. */
1667 if (integer_zerop (iv1->step))
1668 {
1669 /* TODO: Should handle case in which abs(step) != 1. */
1670 if (!integer_minus_onep (iv0->step))
1671 return false;
1672 /* Give up on infinite loop. */
1673 if (*code == LE_EXPR
1674 && tree_int_cst_equal (iv1->base, TYPE_MAX_VALUE (type)))
1675 return false;
1676 high = fold_build2 (PLUS_EXPR, niter_type,
1677 fold_convert (niter_type, iv0->base),
1678 build_int_cst (niter_type, 1));
1679 low = fold_convert (niter_type, TYPE_MIN_VALUE (type));
1680 }
1681 else if (integer_zerop (iv0->step))
1682 {
1683 /* TODO: Should handle case in which abs(step) != 1. */
1684 if (!integer_onep (iv1->step))
1685 return false;
1686 /* Give up on infinite loop. */
1687 if (*code == LE_EXPR
1688 && tree_int_cst_equal (iv0->base, TYPE_MIN_VALUE (type)))
1689 return false;
1690 high = fold_convert (niter_type, TYPE_MAX_VALUE (type));
1691 low = fold_build2 (MINUS_EXPR, niter_type,
1692 fold_convert (niter_type, iv1->base),
1693 build_int_cst (niter_type, 1));
1694 }
1695 else
1696 gcc_unreachable ();
1697
1698 iv0->base = low;
1699 iv0->step = fold_convert (niter_type, integer_one_node);
1700 iv1->base = high;
1701 iv1->step = build_int_cst (niter_type, 0);
1702 *code = NE_EXPR;
1703 return true;
1704 }
1705
1706 /* Determine the number of iterations according to condition (for staying
1707 inside loop) which compares two induction variables using comparison
1708 operator CODE. The induction variable on left side of the comparison
1709 is IV0, the right-hand side is IV1. Both induction variables must have
1710 type TYPE, which must be an integer or pointer type. The steps of the
1711 ivs must be constants (or NULL_TREE, which is interpreted as constant zero).
1712
1713 LOOP is the loop whose number of iterations we are determining.
1714
1715 ONLY_EXIT is true if we are sure this is the only way the loop could be
1716 exited (including possibly non-returning function calls, exceptions, etc.)
1717 -- in this case we can use the information whether the control induction
1718 variables can overflow or not in a more efficient way.
1719
1720 if EVERY_ITERATION is true, we know the test is executed on every iteration.
1721
1722 The results (number of iterations and assumptions as described in
1723 comments at class tree_niter_desc in tree-ssa-loop.h) are stored to NITER.
1724 Returns false if it fails to determine number of iterations, true if it
1725 was determined (possibly with some assumptions). */
1726
1727 static bool
1728 number_of_iterations_cond (class loop *loop,
1729 tree type, affine_iv *iv0, enum tree_code code,
1730 affine_iv *iv1, class tree_niter_desc *niter,
1731 bool only_exit, bool every_iteration)
1732 {
1733 bool exit_must_be_taken = false, ret;
1734 bounds bnds;
1735
1736 /* If the test is not executed every iteration, wrapping may make the test
1737 to pass again.
1738 TODO: the overflow case can be still used as unreliable estimate of upper
1739 bound. But we have no API to pass it down to number of iterations code
1740 and, at present, it will not use it anyway. */
1741 if (!every_iteration
1742 && (!iv0->no_overflow || !iv1->no_overflow
1743 || code == NE_EXPR || code == EQ_EXPR))
1744 return false;
1745
1746 /* The meaning of these assumptions is this:
1747 if !assumptions
1748 then the rest of information does not have to be valid
1749 if may_be_zero then the loop does not roll, even if
1750 niter != 0. */
1751 niter->assumptions = boolean_true_node;
1752 niter->may_be_zero = boolean_false_node;
1753 niter->niter = NULL_TREE;
1754 niter->max = 0;
1755 niter->bound = NULL_TREE;
1756 niter->cmp = ERROR_MARK;
1757
1758 /* Make < comparison from > ones, and for NE_EXPR comparisons, ensure that
1759 the control variable is on lhs. */
1760 if (code == GE_EXPR || code == GT_EXPR
1761 || (code == NE_EXPR && integer_zerop (iv0->step)))
1762 {
1763 std::swap (iv0, iv1);
1764 code = swap_tree_comparison (code);
1765 }
1766
1767 if (POINTER_TYPE_P (type))
1768 {
1769 /* Comparison of pointers is undefined unless both iv0 and iv1 point
1770 to the same object. If they do, the control variable cannot wrap
1771 (as wrap around the bounds of memory will never return a pointer
1772 that would be guaranteed to point to the same object, even if we
1773 avoid undefined behavior by casting to size_t and back). */
1774 iv0->no_overflow = true;
1775 iv1->no_overflow = true;
1776 }
1777
1778 /* If the control induction variable does not overflow and the only exit
1779 from the loop is the one that we analyze, we know it must be taken
1780 eventually. */
1781 if (only_exit)
1782 {
1783 if (!integer_zerop (iv0->step) && iv0->no_overflow)
1784 exit_must_be_taken = true;
1785 else if (!integer_zerop (iv1->step) && iv1->no_overflow)
1786 exit_must_be_taken = true;
1787 }
1788
1789 /* We can handle cases which neither of the sides of the comparison is
1790 invariant:
1791
1792 {iv0.base, iv0.step} cmp_code {iv1.base, iv1.step}
1793 as if:
1794 {iv0.base, iv0.step - iv1.step} cmp_code {iv1.base, 0}
1795
1796 provided that either below condition is satisfied:
1797
1798 a) the test is NE_EXPR;
1799 b) iv0.step - iv1.step is integer and iv0/iv1 don't overflow.
1800
1801 This rarely occurs in practice, but it is simple enough to manage. */
1802 if (!integer_zerop (iv0->step) && !integer_zerop (iv1->step))
1803 {
1804 tree step_type = POINTER_TYPE_P (type) ? sizetype : type;
1805 tree step = fold_binary_to_constant (MINUS_EXPR, step_type,
1806 iv0->step, iv1->step);
1807
1808 /* No need to check sign of the new step since below code takes care
1809 of this well. */
1810 if (code != NE_EXPR
1811 && (TREE_CODE (step) != INTEGER_CST
1812 || !iv0->no_overflow || !iv1->no_overflow))
1813 return false;
1814
1815 iv0->step = step;
1816 if (!POINTER_TYPE_P (type))
1817 iv0->no_overflow = false;
1818
1819 iv1->step = build_int_cst (step_type, 0);
1820 iv1->no_overflow = true;
1821 }
1822
1823 /* If the result of the comparison is a constant, the loop is weird. More
1824 precise handling would be possible, but the situation is not common enough
1825 to waste time on it. */
1826 if (integer_zerop (iv0->step) && integer_zerop (iv1->step))
1827 return false;
1828
1829 /* If the loop exits immediately, there is nothing to do. */
1830 tree tem = fold_binary (code, boolean_type_node, iv0->base, iv1->base);
1831 if (tem && integer_zerop (tem))
1832 {
1833 if (!every_iteration)
1834 return false;
1835 niter->niter = build_int_cst (unsigned_type_for (type), 0);
1836 niter->max = 0;
1837 return true;
1838 }
1839
1840 /* Handle special case loops: while (i-- < 10) and while (10 < i++) by
1841 adjusting iv0, iv1 and code. */
1842 if (code != NE_EXPR
1843 && (tree_int_cst_sign_bit (iv0->step)
1844 || (!integer_zerop (iv1->step)
1845 && !tree_int_cst_sign_bit (iv1->step)))
1846 && !adjust_cond_for_loop_until_wrap (type, iv0, &code, iv1))
1847 return false;
1848
1849 /* OK, now we know we have a senseful loop. Handle several cases, depending
1850 on what comparison operator is used. */
1851 bound_difference (loop, iv1->base, iv0->base, &bnds);
1852
1853 if (dump_file && (dump_flags & TDF_DETAILS))
1854 {
1855 fprintf (dump_file,
1856 "Analyzing # of iterations of loop %d\n", loop->num);
1857
1858 fprintf (dump_file, " exit condition ");
1859 dump_affine_iv (dump_file, iv0);
1860 fprintf (dump_file, " %s ",
1861 code == NE_EXPR ? "!="
1862 : code == LT_EXPR ? "<"
1863 : "<=");
1864 dump_affine_iv (dump_file, iv1);
1865 fprintf (dump_file, "\n");
1866
1867 fprintf (dump_file, " bounds on difference of bases: ");
1868 mpz_out_str (dump_file, 10, bnds.below);
1869 fprintf (dump_file, " ... ");
1870 mpz_out_str (dump_file, 10, bnds.up);
1871 fprintf (dump_file, "\n");
1872 }
1873
1874 switch (code)
1875 {
1876 case NE_EXPR:
1877 gcc_assert (integer_zerop (iv1->step));
1878 ret = number_of_iterations_ne (loop, type, iv0, iv1->base, niter,
1879 exit_must_be_taken, &bnds);
1880 break;
1881
1882 case LT_EXPR:
1883 ret = number_of_iterations_lt (loop, type, iv0, iv1, niter,
1884 exit_must_be_taken, &bnds);
1885 break;
1886
1887 case LE_EXPR:
1888 ret = number_of_iterations_le (loop, type, iv0, iv1, niter,
1889 exit_must_be_taken, &bnds);
1890 break;
1891
1892 default:
1893 gcc_unreachable ();
1894 }
1895
1896 mpz_clear (bnds.up);
1897 mpz_clear (bnds.below);
1898
1899 if (dump_file && (dump_flags & TDF_DETAILS))
1900 {
1901 if (ret)
1902 {
1903 fprintf (dump_file, " result:\n");
1904 if (!integer_nonzerop (niter->assumptions))
1905 {
1906 fprintf (dump_file, " under assumptions ");
1907 print_generic_expr (dump_file, niter->assumptions, TDF_SLIM);
1908 fprintf (dump_file, "\n");
1909 }
1910
1911 if (!integer_zerop (niter->may_be_zero))
1912 {
1913 fprintf (dump_file, " zero if ");
1914 print_generic_expr (dump_file, niter->may_be_zero, TDF_SLIM);
1915 fprintf (dump_file, "\n");
1916 }
1917
1918 fprintf (dump_file, " # of iterations ");
1919 print_generic_expr (dump_file, niter->niter, TDF_SLIM);
1920 fprintf (dump_file, ", bounded by ");
1921 print_decu (niter->max, dump_file);
1922 fprintf (dump_file, "\n");
1923 }
1924 else
1925 fprintf (dump_file, " failed\n\n");
1926 }
1927 return ret;
1928 }
1929
1930 /* Substitute NEW_TREE for OLD in EXPR and fold the result.
1931 If VALUEIZE is non-NULL then OLD and NEW_TREE are ignored and instead
1932 all SSA names are replaced with the result of calling the VALUEIZE
1933 function with the SSA name as argument. */
1934
1935 tree
1936 simplify_replace_tree (tree expr, tree old, tree new_tree,
1937 tree (*valueize) (tree, void*), void *context,
1938 bool do_fold)
1939 {
1940 unsigned i, n;
1941 tree ret = NULL_TREE, e, se;
1942
1943 if (!expr)
1944 return NULL_TREE;
1945
1946 /* Do not bother to replace constants. */
1947 if (CONSTANT_CLASS_P (expr))
1948 return expr;
1949
1950 if (valueize)
1951 {
1952 if (TREE_CODE (expr) == SSA_NAME)
1953 {
1954 new_tree = valueize (expr, context);
1955 if (new_tree != expr)
1956 return new_tree;
1957 }
1958 }
1959 else if (expr == old
1960 || operand_equal_p (expr, old, 0))
1961 return unshare_expr (new_tree);
1962
1963 if (!EXPR_P (expr))
1964 return expr;
1965
1966 n = TREE_OPERAND_LENGTH (expr);
1967 for (i = 0; i < n; i++)
1968 {
1969 e = TREE_OPERAND (expr, i);
1970 se = simplify_replace_tree (e, old, new_tree, valueize, context, do_fold);
1971 if (e == se)
1972 continue;
1973
1974 if (!ret)
1975 ret = copy_node (expr);
1976
1977 TREE_OPERAND (ret, i) = se;
1978 }
1979
1980 return (ret ? (do_fold ? fold (ret) : ret) : expr);
1981 }
1982
1983 /* Expand definitions of ssa names in EXPR as long as they are simple
1984 enough, and return the new expression. If STOP is specified, stop
1985 expanding if EXPR equals to it. */
1986
1987 static tree
1988 expand_simple_operations (tree expr, tree stop, hash_map<tree, tree> &cache)
1989 {
1990 unsigned i, n;
1991 tree ret = NULL_TREE, e, ee, e1;
1992 enum tree_code code;
1993 gimple *stmt;
1994
1995 if (expr == NULL_TREE)
1996 return expr;
1997
1998 if (is_gimple_min_invariant (expr))
1999 return expr;
2000
2001 code = TREE_CODE (expr);
2002 if (IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (code)))
2003 {
2004 n = TREE_OPERAND_LENGTH (expr);
2005 for (i = 0; i < n; i++)
2006 {
2007 e = TREE_OPERAND (expr, i);
2008 /* SCEV analysis feeds us with a proper expression
2009 graph matching the SSA graph. Avoid turning it
2010 into a tree here, thus handle tree sharing
2011 properly.
2012 ??? The SSA walk below still turns the SSA graph
2013 into a tree but until we find a testcase do not
2014 introduce additional tree sharing here. */
2015 bool existed_p;
2016 tree &cee = cache.get_or_insert (e, &existed_p);
2017 if (existed_p)
2018 ee = cee;
2019 else
2020 {
2021 cee = e;
2022 ee = expand_simple_operations (e, stop, cache);
2023 if (ee != e)
2024 *cache.get (e) = ee;
2025 }
2026 if (e == ee)
2027 continue;
2028
2029 if (!ret)
2030 ret = copy_node (expr);
2031
2032 TREE_OPERAND (ret, i) = ee;
2033 }
2034
2035 if (!ret)
2036 return expr;
2037
2038 fold_defer_overflow_warnings ();
2039 ret = fold (ret);
2040 fold_undefer_and_ignore_overflow_warnings ();
2041 return ret;
2042 }
2043
2044 /* Stop if it's not ssa name or the one we don't want to expand. */
2045 if (TREE_CODE (expr) != SSA_NAME || expr == stop)
2046 return expr;
2047
2048 stmt = SSA_NAME_DEF_STMT (expr);
2049 if (gimple_code (stmt) == GIMPLE_PHI)
2050 {
2051 basic_block src, dest;
2052
2053 if (gimple_phi_num_args (stmt) != 1)
2054 return expr;
2055 e = PHI_ARG_DEF (stmt, 0);
2056
2057 /* Avoid propagating through loop exit phi nodes, which
2058 could break loop-closed SSA form restrictions. */
2059 dest = gimple_bb (stmt);
2060 src = single_pred (dest);
2061 if (TREE_CODE (e) == SSA_NAME
2062 && src->loop_father != dest->loop_father)
2063 return expr;
2064
2065 return expand_simple_operations (e, stop, cache);
2066 }
2067 if (gimple_code (stmt) != GIMPLE_ASSIGN)
2068 return expr;
2069
2070 /* Avoid expanding to expressions that contain SSA names that need
2071 to take part in abnormal coalescing. */
2072 ssa_op_iter iter;
2073 FOR_EACH_SSA_TREE_OPERAND (e, stmt, iter, SSA_OP_USE)
2074 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (e))
2075 return expr;
2076
2077 e = gimple_assign_rhs1 (stmt);
2078 code = gimple_assign_rhs_code (stmt);
2079 if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS)
2080 {
2081 if (is_gimple_min_invariant (e))
2082 return e;
2083
2084 if (code == SSA_NAME)
2085 return expand_simple_operations (e, stop, cache);
2086 else if (code == ADDR_EXPR)
2087 {
2088 poly_int64 offset;
2089 tree base = get_addr_base_and_unit_offset (TREE_OPERAND (e, 0),
2090 &offset);
2091 if (base
2092 && TREE_CODE (base) == MEM_REF)
2093 {
2094 ee = expand_simple_operations (TREE_OPERAND (base, 0), stop,
2095 cache);
2096 return fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (expr), ee,
2097 wide_int_to_tree (sizetype,
2098 mem_ref_offset (base)
2099 + offset));
2100 }
2101 }
2102
2103 return expr;
2104 }
2105
2106 switch (code)
2107 {
2108 CASE_CONVERT:
2109 /* Casts are simple. */
2110 ee = expand_simple_operations (e, stop, cache);
2111 return fold_build1 (code, TREE_TYPE (expr), ee);
2112
2113 case PLUS_EXPR:
2114 case MINUS_EXPR:
2115 if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (expr))
2116 && TYPE_OVERFLOW_TRAPS (TREE_TYPE (expr)))
2117 return expr;
2118 /* Fallthru. */
2119 case POINTER_PLUS_EXPR:
2120 /* And increments and decrements by a constant are simple. */
2121 e1 = gimple_assign_rhs2 (stmt);
2122 if (!is_gimple_min_invariant (e1))
2123 return expr;
2124
2125 ee = expand_simple_operations (e, stop, cache);
2126 return fold_build2 (code, TREE_TYPE (expr), ee, e1);
2127
2128 default:
2129 return expr;
2130 }
2131 }
2132
2133 tree
2134 expand_simple_operations (tree expr, tree stop)
2135 {
2136 hash_map<tree, tree> cache;
2137 return expand_simple_operations (expr, stop, cache);
2138 }
2139
2140 /* Tries to simplify EXPR using the condition COND. Returns the simplified
2141 expression (or EXPR unchanged, if no simplification was possible). */
2142
2143 static tree
2144 tree_simplify_using_condition_1 (tree cond, tree expr)
2145 {
2146 bool changed;
2147 tree e, e0, e1, e2, notcond;
2148 enum tree_code code = TREE_CODE (expr);
2149
2150 if (code == INTEGER_CST)
2151 return expr;
2152
2153 if (code == TRUTH_OR_EXPR
2154 || code == TRUTH_AND_EXPR
2155 || code == COND_EXPR)
2156 {
2157 changed = false;
2158
2159 e0 = tree_simplify_using_condition_1 (cond, TREE_OPERAND (expr, 0));
2160 if (TREE_OPERAND (expr, 0) != e0)
2161 changed = true;
2162
2163 e1 = tree_simplify_using_condition_1 (cond, TREE_OPERAND (expr, 1));
2164 if (TREE_OPERAND (expr, 1) != e1)
2165 changed = true;
2166
2167 if (code == COND_EXPR)
2168 {
2169 e2 = tree_simplify_using_condition_1 (cond, TREE_OPERAND (expr, 2));
2170 if (TREE_OPERAND (expr, 2) != e2)
2171 changed = true;
2172 }
2173 else
2174 e2 = NULL_TREE;
2175
2176 if (changed)
2177 {
2178 if (code == COND_EXPR)
2179 expr = fold_build3 (code, boolean_type_node, e0, e1, e2);
2180 else
2181 expr = fold_build2 (code, boolean_type_node, e0, e1);
2182 }
2183
2184 return expr;
2185 }
2186
2187 /* In case COND is equality, we may be able to simplify EXPR by copy/constant
2188 propagation, and vice versa. Fold does not handle this, since it is
2189 considered too expensive. */
2190 if (TREE_CODE (cond) == EQ_EXPR)
2191 {
2192 e0 = TREE_OPERAND (cond, 0);
2193 e1 = TREE_OPERAND (cond, 1);
2194
2195 /* We know that e0 == e1. Check whether we cannot simplify expr
2196 using this fact. */
2197 e = simplify_replace_tree (expr, e0, e1);
2198 if (integer_zerop (e) || integer_nonzerop (e))
2199 return e;
2200
2201 e = simplify_replace_tree (expr, e1, e0);
2202 if (integer_zerop (e) || integer_nonzerop (e))
2203 return e;
2204 }
2205 if (TREE_CODE (expr) == EQ_EXPR)
2206 {
2207 e0 = TREE_OPERAND (expr, 0);
2208 e1 = TREE_OPERAND (expr, 1);
2209
2210 /* If e0 == e1 (EXPR) implies !COND, then EXPR cannot be true. */
2211 e = simplify_replace_tree (cond, e0, e1);
2212 if (integer_zerop (e))
2213 return e;
2214 e = simplify_replace_tree (cond, e1, e0);
2215 if (integer_zerop (e))
2216 return e;
2217 }
2218 if (TREE_CODE (expr) == NE_EXPR)
2219 {
2220 e0 = TREE_OPERAND (expr, 0);
2221 e1 = TREE_OPERAND (expr, 1);
2222
2223 /* If e0 == e1 (!EXPR) implies !COND, then EXPR must be true. */
2224 e = simplify_replace_tree (cond, e0, e1);
2225 if (integer_zerop (e))
2226 return boolean_true_node;
2227 e = simplify_replace_tree (cond, e1, e0);
2228 if (integer_zerop (e))
2229 return boolean_true_node;
2230 }
2231
2232 /* Check whether COND ==> EXPR. */
2233 notcond = invert_truthvalue (cond);
2234 e = fold_binary (TRUTH_OR_EXPR, boolean_type_node, notcond, expr);
2235 if (e && integer_nonzerop (e))
2236 return e;
2237
2238 /* Check whether COND ==> not EXPR. */
2239 e = fold_binary (TRUTH_AND_EXPR, boolean_type_node, cond, expr);
2240 if (e && integer_zerop (e))
2241 return e;
2242
2243 return expr;
2244 }
2245
2246 /* Tries to simplify EXPR using the condition COND. Returns the simplified
2247 expression (or EXPR unchanged, if no simplification was possible).
2248 Wrapper around tree_simplify_using_condition_1 that ensures that chains
2249 of simple operations in definitions of ssa names in COND are expanded,
2250 so that things like casts or incrementing the value of the bound before
2251 the loop do not cause us to fail. */
2252
2253 static tree
2254 tree_simplify_using_condition (tree cond, tree expr)
2255 {
2256 cond = expand_simple_operations (cond);
2257
2258 return tree_simplify_using_condition_1 (cond, expr);
2259 }
2260
2261 /* Tries to simplify EXPR using the conditions on entry to LOOP.
2262 Returns the simplified expression (or EXPR unchanged, if no
2263 simplification was possible). */
2264
2265 tree
2266 simplify_using_initial_conditions (class loop *loop, tree expr)
2267 {
2268 edge e;
2269 basic_block bb;
2270 gimple *stmt;
2271 tree cond, expanded, backup;
2272 int cnt = 0;
2273
2274 if (TREE_CODE (expr) == INTEGER_CST)
2275 return expr;
2276
2277 backup = expanded = expand_simple_operations (expr);
2278
2279 /* Limit walking the dominators to avoid quadraticness in
2280 the number of BBs times the number of loops in degenerate
2281 cases. */
2282 for (bb = loop->header;
2283 bb != ENTRY_BLOCK_PTR_FOR_FN (cfun) && cnt < MAX_DOMINATORS_TO_WALK;
2284 bb = get_immediate_dominator (CDI_DOMINATORS, bb))
2285 {
2286 if (!single_pred_p (bb))
2287 continue;
2288 e = single_pred_edge (bb);
2289
2290 if (!(e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
2291 continue;
2292
2293 stmt = last_stmt (e->src);
2294 cond = fold_build2 (gimple_cond_code (stmt),
2295 boolean_type_node,
2296 gimple_cond_lhs (stmt),
2297 gimple_cond_rhs (stmt));
2298 if (e->flags & EDGE_FALSE_VALUE)
2299 cond = invert_truthvalue (cond);
2300 expanded = tree_simplify_using_condition (cond, expanded);
2301 /* Break if EXPR is simplified to const values. */
2302 if (expanded
2303 && (integer_zerop (expanded) || integer_nonzerop (expanded)))
2304 return expanded;
2305
2306 ++cnt;
2307 }
2308
2309 /* Return the original expression if no simplification is done. */
2310 return operand_equal_p (backup, expanded, 0) ? expr : expanded;
2311 }
2312
2313 /* Tries to simplify EXPR using the evolutions of the loop invariants
2314 in the superloops of LOOP. Returns the simplified expression
2315 (or EXPR unchanged, if no simplification was possible). */
2316
2317 static tree
2318 simplify_using_outer_evolutions (class loop *loop, tree expr)
2319 {
2320 enum tree_code code = TREE_CODE (expr);
2321 bool changed;
2322 tree e, e0, e1, e2;
2323
2324 if (is_gimple_min_invariant (expr))
2325 return expr;
2326
2327 if (code == TRUTH_OR_EXPR
2328 || code == TRUTH_AND_EXPR
2329 || code == COND_EXPR)
2330 {
2331 changed = false;
2332
2333 e0 = simplify_using_outer_evolutions (loop, TREE_OPERAND (expr, 0));
2334 if (TREE_OPERAND (expr, 0) != e0)
2335 changed = true;
2336
2337 e1 = simplify_using_outer_evolutions (loop, TREE_OPERAND (expr, 1));
2338 if (TREE_OPERAND (expr, 1) != e1)
2339 changed = true;
2340
2341 if (code == COND_EXPR)
2342 {
2343 e2 = simplify_using_outer_evolutions (loop, TREE_OPERAND (expr, 2));
2344 if (TREE_OPERAND (expr, 2) != e2)
2345 changed = true;
2346 }
2347 else
2348 e2 = NULL_TREE;
2349
2350 if (changed)
2351 {
2352 if (code == COND_EXPR)
2353 expr = fold_build3 (code, boolean_type_node, e0, e1, e2);
2354 else
2355 expr = fold_build2 (code, boolean_type_node, e0, e1);
2356 }
2357
2358 return expr;
2359 }
2360
2361 e = instantiate_parameters (loop, expr);
2362 if (is_gimple_min_invariant (e))
2363 return e;
2364
2365 return expr;
2366 }
2367
2368 /* Returns true if EXIT is the only possible exit from LOOP. */
2369
2370 bool
2371 loop_only_exit_p (const class loop *loop, basic_block *body, const_edge exit)
2372 {
2373 gimple_stmt_iterator bsi;
2374 unsigned i;
2375
2376 if (exit != single_exit (loop))
2377 return false;
2378
2379 for (i = 0; i < loop->num_nodes; i++)
2380 for (bsi = gsi_start_bb (body[i]); !gsi_end_p (bsi); gsi_next (&bsi))
2381 if (stmt_can_terminate_bb_p (gsi_stmt (bsi)))
2382 return false;
2383
2384 return true;
2385 }
2386
2387 /* Stores description of number of iterations of LOOP derived from
2388 EXIT (an exit edge of the LOOP) in NITER. Returns true if some useful
2389 information could be derived (and fields of NITER have meaning described
2390 in comments at class tree_niter_desc declaration), false otherwise.
2391 When EVERY_ITERATION is true, only tests that are known to be executed
2392 every iteration are considered (i.e. only test that alone bounds the loop).
2393 If AT_STMT is not NULL, this function stores LOOP's condition statement in
2394 it when returning true. */
2395
2396 bool
2397 number_of_iterations_exit_assumptions (class loop *loop, edge exit,
2398 class tree_niter_desc *niter,
2399 gcond **at_stmt, bool every_iteration,
2400 basic_block *body)
2401 {
2402 gimple *last;
2403 gcond *stmt;
2404 tree type;
2405 tree op0, op1;
2406 enum tree_code code;
2407 affine_iv iv0, iv1;
2408 bool safe;
2409
2410 /* Nothing to analyze if the loop is known to be infinite. */
2411 if (loop_constraint_set_p (loop, LOOP_C_INFINITE))
2412 return false;
2413
2414 safe = dominated_by_p (CDI_DOMINATORS, loop->latch, exit->src);
2415
2416 if (every_iteration && !safe)
2417 return false;
2418
2419 niter->assumptions = boolean_false_node;
2420 niter->control.base = NULL_TREE;
2421 niter->control.step = NULL_TREE;
2422 niter->control.no_overflow = false;
2423 last = last_stmt (exit->src);
2424 if (!last)
2425 return false;
2426 stmt = dyn_cast <gcond *> (last);
2427 if (!stmt)
2428 return false;
2429
2430 /* We want the condition for staying inside loop. */
2431 code = gimple_cond_code (stmt);
2432 if (exit->flags & EDGE_TRUE_VALUE)
2433 code = invert_tree_comparison (code, false);
2434
2435 switch (code)
2436 {
2437 case GT_EXPR:
2438 case GE_EXPR:
2439 case LT_EXPR:
2440 case LE_EXPR:
2441 case NE_EXPR:
2442 break;
2443
2444 default:
2445 return false;
2446 }
2447
2448 op0 = gimple_cond_lhs (stmt);
2449 op1 = gimple_cond_rhs (stmt);
2450 type = TREE_TYPE (op0);
2451
2452 if (TREE_CODE (type) != INTEGER_TYPE
2453 && !POINTER_TYPE_P (type))
2454 return false;
2455
2456 tree iv0_niters = NULL_TREE;
2457 if (!simple_iv_with_niters (loop, loop_containing_stmt (stmt),
2458 op0, &iv0, safe ? &iv0_niters : NULL, false))
2459 return number_of_iterations_popcount (loop, exit, code, niter);
2460 tree iv1_niters = NULL_TREE;
2461 if (!simple_iv_with_niters (loop, loop_containing_stmt (stmt),
2462 op1, &iv1, safe ? &iv1_niters : NULL, false))
2463 return false;
2464 /* Give up on complicated case. */
2465 if (iv0_niters && iv1_niters)
2466 return false;
2467
2468 /* We don't want to see undefined signed overflow warnings while
2469 computing the number of iterations. */
2470 fold_defer_overflow_warnings ();
2471
2472 iv0.base = expand_simple_operations (iv0.base);
2473 iv1.base = expand_simple_operations (iv1.base);
2474 bool body_from_caller = true;
2475 if (!body)
2476 {
2477 body = get_loop_body (loop);
2478 body_from_caller = false;
2479 }
2480 bool only_exit_p = loop_only_exit_p (loop, body, exit);
2481 if (!body_from_caller)
2482 free (body);
2483 if (!number_of_iterations_cond (loop, type, &iv0, code, &iv1, niter,
2484 only_exit_p, safe))
2485 {
2486 fold_undefer_and_ignore_overflow_warnings ();
2487 return false;
2488 }
2489
2490 /* Incorporate additional assumption implied by control iv. */
2491 tree iv_niters = iv0_niters ? iv0_niters : iv1_niters;
2492 if (iv_niters)
2493 {
2494 tree assumption = fold_build2 (LE_EXPR, boolean_type_node, niter->niter,
2495 fold_convert (TREE_TYPE (niter->niter),
2496 iv_niters));
2497
2498 if (!integer_nonzerop (assumption))
2499 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
2500 niter->assumptions, assumption);
2501
2502 /* Refine upper bound if possible. */
2503 if (TREE_CODE (iv_niters) == INTEGER_CST
2504 && niter->max > wi::to_widest (iv_niters))
2505 niter->max = wi::to_widest (iv_niters);
2506 }
2507
2508 /* There is no assumptions if the loop is known to be finite. */
2509 if (!integer_zerop (niter->assumptions)
2510 && loop_constraint_set_p (loop, LOOP_C_FINITE))
2511 niter->assumptions = boolean_true_node;
2512
2513 if (optimize >= 3)
2514 {
2515 niter->assumptions = simplify_using_outer_evolutions (loop,
2516 niter->assumptions);
2517 niter->may_be_zero = simplify_using_outer_evolutions (loop,
2518 niter->may_be_zero);
2519 niter->niter = simplify_using_outer_evolutions (loop, niter->niter);
2520 }
2521
2522 niter->assumptions
2523 = simplify_using_initial_conditions (loop,
2524 niter->assumptions);
2525 niter->may_be_zero
2526 = simplify_using_initial_conditions (loop,
2527 niter->may_be_zero);
2528
2529 fold_undefer_and_ignore_overflow_warnings ();
2530
2531 /* If NITER has simplified into a constant, update MAX. */
2532 if (TREE_CODE (niter->niter) == INTEGER_CST)
2533 niter->max = wi::to_widest (niter->niter);
2534
2535 if (at_stmt)
2536 *at_stmt = stmt;
2537
2538 return (!integer_zerop (niter->assumptions));
2539 }
2540
2541
2542 /* Utility function to check if OP is defined by a stmt
2543 that is a val - 1. */
2544
2545 static bool
2546 ssa_defined_by_minus_one_stmt_p (tree op, tree val)
2547 {
2548 gimple *stmt;
2549 return (TREE_CODE (op) == SSA_NAME
2550 && (stmt = SSA_NAME_DEF_STMT (op))
2551 && is_gimple_assign (stmt)
2552 && (gimple_assign_rhs_code (stmt) == PLUS_EXPR)
2553 && val == gimple_assign_rhs1 (stmt)
2554 && integer_minus_onep (gimple_assign_rhs2 (stmt)));
2555 }
2556
2557
2558 /* See if LOOP is a popcout implementation, determine NITER for the loop
2559
2560 We match:
2561 <bb 2>
2562 goto <bb 4>
2563
2564 <bb 3>
2565 _1 = b_11 + -1
2566 b_6 = _1 & b_11
2567
2568 <bb 4>
2569 b_11 = PHI <b_5(D)(2), b_6(3)>
2570
2571 exit block
2572 if (b_11 != 0)
2573 goto <bb 3>
2574 else
2575 goto <bb 5>
2576
2577 OR we match copy-header version:
2578 if (b_5 != 0)
2579 goto <bb 3>
2580 else
2581 goto <bb 4>
2582
2583 <bb 3>
2584 b_11 = PHI <b_5(2), b_6(3)>
2585 _1 = b_11 + -1
2586 b_6 = _1 & b_11
2587
2588 exit block
2589 if (b_6 != 0)
2590 goto <bb 3>
2591 else
2592 goto <bb 4>
2593
2594 If popcount pattern, update NITER accordingly.
2595 i.e., set NITER to __builtin_popcount (b)
2596 return true if we did, false otherwise.
2597
2598 */
2599
2600 static bool
2601 number_of_iterations_popcount (loop_p loop, edge exit,
2602 enum tree_code code,
2603 class tree_niter_desc *niter)
2604 {
2605 bool adjust = true;
2606 tree iter;
2607 HOST_WIDE_INT max;
2608 adjust = true;
2609 tree fn = NULL_TREE;
2610
2611 /* Check loop terminating branch is like
2612 if (b != 0). */
2613 gimple *stmt = last_stmt (exit->src);
2614 if (!stmt
2615 || gimple_code (stmt) != GIMPLE_COND
2616 || code != NE_EXPR
2617 || !integer_zerop (gimple_cond_rhs (stmt))
2618 || TREE_CODE (gimple_cond_lhs (stmt)) != SSA_NAME)
2619 return false;
2620
2621 gimple *and_stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (stmt));
2622
2623 /* Depending on copy-header is performed, feeding PHI stmts might be in
2624 the loop header or loop latch, handle this. */
2625 if (gimple_code (and_stmt) == GIMPLE_PHI
2626 && gimple_bb (and_stmt) == loop->header
2627 && gimple_phi_num_args (and_stmt) == 2
2628 && (TREE_CODE (gimple_phi_arg_def (and_stmt,
2629 loop_latch_edge (loop)->dest_idx))
2630 == SSA_NAME))
2631 {
2632 /* SSA used in exit condition is defined by PHI stmt
2633 b_11 = PHI <b_5(D)(2), b_6(3)>
2634 from the PHI stmt, get the and_stmt
2635 b_6 = _1 & b_11. */
2636 tree t = gimple_phi_arg_def (and_stmt, loop_latch_edge (loop)->dest_idx);
2637 and_stmt = SSA_NAME_DEF_STMT (t);
2638 adjust = false;
2639 }
2640
2641 /* Make sure it is indeed an and stmt (b_6 = _1 & b_11). */
2642 if (!is_gimple_assign (and_stmt)
2643 || gimple_assign_rhs_code (and_stmt) != BIT_AND_EXPR)
2644 return false;
2645
2646 tree b_11 = gimple_assign_rhs1 (and_stmt);
2647 tree _1 = gimple_assign_rhs2 (and_stmt);
2648
2649 /* Check that _1 is defined by _b11 + -1 (_1 = b_11 + -1).
2650 Also make sure that b_11 is the same in and_stmt and _1 defining stmt.
2651 Also canonicalize if _1 and _b11 are revrsed. */
2652 if (ssa_defined_by_minus_one_stmt_p (b_11, _1))
2653 std::swap (b_11, _1);
2654 else if (ssa_defined_by_minus_one_stmt_p (_1, b_11))
2655 ;
2656 else
2657 return false;
2658 /* Check the recurrence:
2659 ... = PHI <b_5(2), b_6(3)>. */
2660 gimple *phi = SSA_NAME_DEF_STMT (b_11);
2661 if (gimple_code (phi) != GIMPLE_PHI
2662 || (gimple_bb (phi) != loop_latch_edge (loop)->dest)
2663 || (gimple_assign_lhs (and_stmt)
2664 != gimple_phi_arg_def (phi, loop_latch_edge (loop)->dest_idx)))
2665 return false;
2666
2667 /* We found a match. Get the corresponding popcount builtin. */
2668 tree src = gimple_phi_arg_def (phi, loop_preheader_edge (loop)->dest_idx);
2669 if (TYPE_PRECISION (TREE_TYPE (src)) <= TYPE_PRECISION (integer_type_node))
2670 fn = builtin_decl_implicit (BUILT_IN_POPCOUNT);
2671 else if (TYPE_PRECISION (TREE_TYPE (src))
2672 == TYPE_PRECISION (long_integer_type_node))
2673 fn = builtin_decl_implicit (BUILT_IN_POPCOUNTL);
2674 else if (TYPE_PRECISION (TREE_TYPE (src))
2675 == TYPE_PRECISION (long_long_integer_type_node)
2676 || (TYPE_PRECISION (TREE_TYPE (src))
2677 == 2 * TYPE_PRECISION (long_long_integer_type_node)))
2678 fn = builtin_decl_implicit (BUILT_IN_POPCOUNTLL);
2679
2680 if (!fn)
2681 return false;
2682
2683 /* Update NITER params accordingly */
2684 tree utype = unsigned_type_for (TREE_TYPE (src));
2685 src = fold_convert (utype, src);
2686 if (TYPE_PRECISION (TREE_TYPE (src)) < TYPE_PRECISION (integer_type_node))
2687 src = fold_convert (unsigned_type_node, src);
2688 tree call;
2689 if (TYPE_PRECISION (TREE_TYPE (src))
2690 == 2 * TYPE_PRECISION (long_long_integer_type_node))
2691 {
2692 int prec = TYPE_PRECISION (long_long_integer_type_node);
2693 tree src1 = fold_convert (long_long_unsigned_type_node,
2694 fold_build2 (RSHIFT_EXPR, TREE_TYPE (src),
2695 unshare_expr (src),
2696 build_int_cst (integer_type_node,
2697 prec)));
2698 tree src2 = fold_convert (long_long_unsigned_type_node, src);
2699 call = build_call_expr (fn, 1, src1);
2700 call = fold_build2 (PLUS_EXPR, TREE_TYPE (call), call,
2701 build_call_expr (fn, 1, src2));
2702 call = fold_convert (utype, call);
2703 }
2704 else
2705 call = fold_convert (utype, build_call_expr (fn, 1, src));
2706 if (adjust)
2707 iter = fold_build2 (MINUS_EXPR, utype, call, build_int_cst (utype, 1));
2708 else
2709 iter = call;
2710
2711 if (TREE_CODE (call) == INTEGER_CST)
2712 max = tree_to_uhwi (call);
2713 else
2714 max = TYPE_PRECISION (TREE_TYPE (src));
2715 if (adjust)
2716 max = max - 1;
2717
2718 niter->niter = iter;
2719 niter->assumptions = boolean_true_node;
2720
2721 if (adjust)
2722 {
2723 tree may_be_zero = fold_build2 (EQ_EXPR, boolean_type_node, src,
2724 build_zero_cst (TREE_TYPE (src)));
2725 niter->may_be_zero
2726 = simplify_using_initial_conditions (loop, may_be_zero);
2727 }
2728 else
2729 niter->may_be_zero = boolean_false_node;
2730
2731 niter->max = max;
2732 niter->bound = NULL_TREE;
2733 niter->cmp = ERROR_MARK;
2734 return true;
2735 }
2736
2737
2738 /* Like number_of_iterations_exit_assumptions, but return TRUE only if
2739 the niter information holds unconditionally. */
2740
2741 bool
2742 number_of_iterations_exit (class loop *loop, edge exit,
2743 class tree_niter_desc *niter,
2744 bool warn, bool every_iteration,
2745 basic_block *body)
2746 {
2747 gcond *stmt;
2748 if (!number_of_iterations_exit_assumptions (loop, exit, niter,
2749 &stmt, every_iteration, body))
2750 return false;
2751
2752 if (integer_nonzerop (niter->assumptions))
2753 return true;
2754
2755 if (warn && dump_enabled_p ())
2756 dump_printf_loc (MSG_MISSED_OPTIMIZATION, stmt,
2757 "missed loop optimization: niters analysis ends up "
2758 "with assumptions.\n");
2759
2760 return false;
2761 }
2762
2763 /* Try to determine the number of iterations of LOOP. If we succeed,
2764 expression giving number of iterations is returned and *EXIT is
2765 set to the edge from that the information is obtained. Otherwise
2766 chrec_dont_know is returned. */
2767
2768 tree
2769 find_loop_niter (class loop *loop, edge *exit)
2770 {
2771 unsigned i;
2772 auto_vec<edge> exits = get_loop_exit_edges (loop);
2773 edge ex;
2774 tree niter = NULL_TREE, aniter;
2775 class tree_niter_desc desc;
2776
2777 *exit = NULL;
2778 FOR_EACH_VEC_ELT (exits, i, ex)
2779 {
2780 if (!number_of_iterations_exit (loop, ex, &desc, false))
2781 continue;
2782
2783 if (integer_nonzerop (desc.may_be_zero))
2784 {
2785 /* We exit in the first iteration through this exit.
2786 We won't find anything better. */
2787 niter = build_int_cst (unsigned_type_node, 0);
2788 *exit = ex;
2789 break;
2790 }
2791
2792 if (!integer_zerop (desc.may_be_zero))
2793 continue;
2794
2795 aniter = desc.niter;
2796
2797 if (!niter)
2798 {
2799 /* Nothing recorded yet. */
2800 niter = aniter;
2801 *exit = ex;
2802 continue;
2803 }
2804
2805 /* Prefer constants, the lower the better. */
2806 if (TREE_CODE (aniter) != INTEGER_CST)
2807 continue;
2808
2809 if (TREE_CODE (niter) != INTEGER_CST)
2810 {
2811 niter = aniter;
2812 *exit = ex;
2813 continue;
2814 }
2815
2816 if (tree_int_cst_lt (aniter, niter))
2817 {
2818 niter = aniter;
2819 *exit = ex;
2820 continue;
2821 }
2822 }
2823
2824 return niter ? niter : chrec_dont_know;
2825 }
2826
2827 /* Return true if loop is known to have bounded number of iterations. */
2828
2829 bool
2830 finite_loop_p (class loop *loop)
2831 {
2832 widest_int nit;
2833 int flags;
2834
2835 flags = flags_from_decl_or_type (current_function_decl);
2836 if ((flags & (ECF_CONST|ECF_PURE)) && !(flags & ECF_LOOPING_CONST_OR_PURE))
2837 {
2838 if (dump_file && (dump_flags & TDF_DETAILS))
2839 fprintf (dump_file, "Found loop %i to be finite: it is within pure or const function.\n",
2840 loop->num);
2841 return true;
2842 }
2843
2844 if (loop->any_upper_bound
2845 || max_loop_iterations (loop, &nit))
2846 {
2847 if (dump_file && (dump_flags & TDF_DETAILS))
2848 fprintf (dump_file, "Found loop %i to be finite: upper bound found.\n",
2849 loop->num);
2850 return true;
2851 }
2852
2853 if (loop->finite_p)
2854 {
2855 unsigned i;
2856 auto_vec<edge> exits = get_loop_exit_edges (loop);
2857 edge ex;
2858
2859 /* If the loop has a normal exit, we can assume it will terminate. */
2860 FOR_EACH_VEC_ELT (exits, i, ex)
2861 if (!(ex->flags & (EDGE_EH | EDGE_ABNORMAL | EDGE_FAKE)))
2862 {
2863 if (dump_file)
2864 fprintf (dump_file, "Assume loop %i to be finite: it has an exit "
2865 "and -ffinite-loops is on.\n", loop->num);
2866 return true;
2867 }
2868 }
2869
2870 return false;
2871 }
2872
2873 /*
2874
2875 Analysis of a number of iterations of a loop by a brute-force evaluation.
2876
2877 */
2878
2879 /* Bound on the number of iterations we try to evaluate. */
2880
2881 #define MAX_ITERATIONS_TO_TRACK \
2882 ((unsigned) param_max_iterations_to_track)
2883
2884 /* Returns the loop phi node of LOOP such that ssa name X is derived from its
2885 result by a chain of operations such that all but exactly one of their
2886 operands are constants. */
2887
2888 static gphi *
2889 chain_of_csts_start (class loop *loop, tree x)
2890 {
2891 gimple *stmt = SSA_NAME_DEF_STMT (x);
2892 tree use;
2893 basic_block bb = gimple_bb (stmt);
2894 enum tree_code code;
2895
2896 if (!bb
2897 || !flow_bb_inside_loop_p (loop, bb))
2898 return NULL;
2899
2900 if (gimple_code (stmt) == GIMPLE_PHI)
2901 {
2902 if (bb == loop->header)
2903 return as_a <gphi *> (stmt);
2904
2905 return NULL;
2906 }
2907
2908 if (gimple_code (stmt) != GIMPLE_ASSIGN
2909 || gimple_assign_rhs_class (stmt) == GIMPLE_TERNARY_RHS)
2910 return NULL;
2911
2912 code = gimple_assign_rhs_code (stmt);
2913 if (gimple_references_memory_p (stmt)
2914 || TREE_CODE_CLASS (code) == tcc_reference
2915 || (code == ADDR_EXPR
2916 && !is_gimple_min_invariant (gimple_assign_rhs1 (stmt))))
2917 return NULL;
2918
2919 use = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_USE);
2920 if (use == NULL_TREE)
2921 return NULL;
2922
2923 return chain_of_csts_start (loop, use);
2924 }
2925
2926 /* Determines whether the expression X is derived from a result of a phi node
2927 in header of LOOP such that
2928
2929 * the derivation of X consists only from operations with constants
2930 * the initial value of the phi node is constant
2931 * the value of the phi node in the next iteration can be derived from the
2932 value in the current iteration by a chain of operations with constants,
2933 or is also a constant
2934
2935 If such phi node exists, it is returned, otherwise NULL is returned. */
2936
2937 static gphi *
2938 get_base_for (class loop *loop, tree x)
2939 {
2940 gphi *phi;
2941 tree init, next;
2942
2943 if (is_gimple_min_invariant (x))
2944 return NULL;
2945
2946 phi = chain_of_csts_start (loop, x);
2947 if (!phi)
2948 return NULL;
2949
2950 init = PHI_ARG_DEF_FROM_EDGE (phi, loop_preheader_edge (loop));
2951 next = PHI_ARG_DEF_FROM_EDGE (phi, loop_latch_edge (loop));
2952
2953 if (!is_gimple_min_invariant (init))
2954 return NULL;
2955
2956 if (TREE_CODE (next) == SSA_NAME
2957 && chain_of_csts_start (loop, next) != phi)
2958 return NULL;
2959
2960 return phi;
2961 }
2962
2963 /* Given an expression X, then
2964
2965 * if X is NULL_TREE, we return the constant BASE.
2966 * if X is a constant, we return the constant X.
2967 * otherwise X is a SSA name, whose value in the considered loop is derived
2968 by a chain of operations with constant from a result of a phi node in
2969 the header of the loop. Then we return value of X when the value of the
2970 result of this phi node is given by the constant BASE. */
2971
2972 static tree
2973 get_val_for (tree x, tree base)
2974 {
2975 gimple *stmt;
2976
2977 gcc_checking_assert (is_gimple_min_invariant (base));
2978
2979 if (!x)
2980 return base;
2981 else if (is_gimple_min_invariant (x))
2982 return x;
2983
2984 stmt = SSA_NAME_DEF_STMT (x);
2985 if (gimple_code (stmt) == GIMPLE_PHI)
2986 return base;
2987
2988 gcc_checking_assert (is_gimple_assign (stmt));
2989
2990 /* STMT must be either an assignment of a single SSA name or an
2991 expression involving an SSA name and a constant. Try to fold that
2992 expression using the value for the SSA name. */
2993 if (gimple_assign_ssa_name_copy_p (stmt))
2994 return get_val_for (gimple_assign_rhs1 (stmt), base);
2995 else if (gimple_assign_rhs_class (stmt) == GIMPLE_UNARY_RHS
2996 && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME)
2997 return fold_build1 (gimple_assign_rhs_code (stmt),
2998 gimple_expr_type (stmt),
2999 get_val_for (gimple_assign_rhs1 (stmt), base));
3000 else if (gimple_assign_rhs_class (stmt) == GIMPLE_BINARY_RHS)
3001 {
3002 tree rhs1 = gimple_assign_rhs1 (stmt);
3003 tree rhs2 = gimple_assign_rhs2 (stmt);
3004 if (TREE_CODE (rhs1) == SSA_NAME)
3005 rhs1 = get_val_for (rhs1, base);
3006 else if (TREE_CODE (rhs2) == SSA_NAME)
3007 rhs2 = get_val_for (rhs2, base);
3008 else
3009 gcc_unreachable ();
3010 return fold_build2 (gimple_assign_rhs_code (stmt),
3011 gimple_expr_type (stmt), rhs1, rhs2);
3012 }
3013 else
3014 gcc_unreachable ();
3015 }
3016
3017
3018 /* Tries to count the number of iterations of LOOP till it exits by EXIT
3019 by brute force -- i.e. by determining the value of the operands of the
3020 condition at EXIT in first few iterations of the loop (assuming that
3021 these values are constant) and determining the first one in that the
3022 condition is not satisfied. Returns the constant giving the number
3023 of the iterations of LOOP if successful, chrec_dont_know otherwise. */
3024
3025 tree
3026 loop_niter_by_eval (class loop *loop, edge exit)
3027 {
3028 tree acnd;
3029 tree op[2], val[2], next[2], aval[2];
3030 gphi *phi;
3031 gimple *cond;
3032 unsigned i, j;
3033 enum tree_code cmp;
3034
3035 cond = last_stmt (exit->src);
3036 if (!cond || gimple_code (cond) != GIMPLE_COND)
3037 return chrec_dont_know;
3038
3039 cmp = gimple_cond_code (cond);
3040 if (exit->flags & EDGE_TRUE_VALUE)
3041 cmp = invert_tree_comparison (cmp, false);
3042
3043 switch (cmp)
3044 {
3045 case EQ_EXPR:
3046 case NE_EXPR:
3047 case GT_EXPR:
3048 case GE_EXPR:
3049 case LT_EXPR:
3050 case LE_EXPR:
3051 op[0] = gimple_cond_lhs (cond);
3052 op[1] = gimple_cond_rhs (cond);
3053 break;
3054
3055 default:
3056 return chrec_dont_know;
3057 }
3058
3059 for (j = 0; j < 2; j++)
3060 {
3061 if (is_gimple_min_invariant (op[j]))
3062 {
3063 val[j] = op[j];
3064 next[j] = NULL_TREE;
3065 op[j] = NULL_TREE;
3066 }
3067 else
3068 {
3069 phi = get_base_for (loop, op[j]);
3070 if (!phi)
3071 return chrec_dont_know;
3072 val[j] = PHI_ARG_DEF_FROM_EDGE (phi, loop_preheader_edge (loop));
3073 next[j] = PHI_ARG_DEF_FROM_EDGE (phi, loop_latch_edge (loop));
3074 }
3075 }
3076
3077 /* Don't issue signed overflow warnings. */
3078 fold_defer_overflow_warnings ();
3079
3080 for (i = 0; i < MAX_ITERATIONS_TO_TRACK; i++)
3081 {
3082 for (j = 0; j < 2; j++)
3083 aval[j] = get_val_for (op[j], val[j]);
3084
3085 acnd = fold_binary (cmp, boolean_type_node, aval[0], aval[1]);
3086 if (acnd && integer_zerop (acnd))
3087 {
3088 fold_undefer_and_ignore_overflow_warnings ();
3089 if (dump_file && (dump_flags & TDF_DETAILS))
3090 fprintf (dump_file,
3091 "Proved that loop %d iterates %d times using brute force.\n",
3092 loop->num, i);
3093 return build_int_cst (unsigned_type_node, i);
3094 }
3095
3096 for (j = 0; j < 2; j++)
3097 {
3098 aval[j] = val[j];
3099 val[j] = get_val_for (next[j], val[j]);
3100 if (!is_gimple_min_invariant (val[j]))
3101 {
3102 fold_undefer_and_ignore_overflow_warnings ();
3103 return chrec_dont_know;
3104 }
3105 }
3106
3107 /* If the next iteration would use the same base values
3108 as the current one, there is no point looping further,
3109 all following iterations will be the same as this one. */
3110 if (val[0] == aval[0] && val[1] == aval[1])
3111 break;
3112 }
3113
3114 fold_undefer_and_ignore_overflow_warnings ();
3115
3116 return chrec_dont_know;
3117 }
3118
3119 /* Finds the exit of the LOOP by that the loop exits after a constant
3120 number of iterations and stores the exit edge to *EXIT. The constant
3121 giving the number of iterations of LOOP is returned. The number of
3122 iterations is determined using loop_niter_by_eval (i.e. by brute force
3123 evaluation). If we are unable to find the exit for that loop_niter_by_eval
3124 determines the number of iterations, chrec_dont_know is returned. */
3125
3126 tree
3127 find_loop_niter_by_eval (class loop *loop, edge *exit)
3128 {
3129 unsigned i;
3130 auto_vec<edge> exits = get_loop_exit_edges (loop);
3131 edge ex;
3132 tree niter = NULL_TREE, aniter;
3133
3134 *exit = NULL;
3135
3136 /* Loops with multiple exits are expensive to handle and less important. */
3137 if (!flag_expensive_optimizations
3138 && exits.length () > 1)
3139 return chrec_dont_know;
3140
3141 FOR_EACH_VEC_ELT (exits, i, ex)
3142 {
3143 if (!just_once_each_iteration_p (loop, ex->src))
3144 continue;
3145
3146 aniter = loop_niter_by_eval (loop, ex);
3147 if (chrec_contains_undetermined (aniter))
3148 continue;
3149
3150 if (niter
3151 && !tree_int_cst_lt (aniter, niter))
3152 continue;
3153
3154 niter = aniter;
3155 *exit = ex;
3156 }
3157
3158 return niter ? niter : chrec_dont_know;
3159 }
3160
3161 /*
3162
3163 Analysis of upper bounds on number of iterations of a loop.
3164
3165 */
3166
3167 static widest_int derive_constant_upper_bound_ops (tree, tree,
3168 enum tree_code, tree);
3169
3170 /* Returns a constant upper bound on the value of the right-hand side of
3171 an assignment statement STMT. */
3172
3173 static widest_int
3174 derive_constant_upper_bound_assign (gimple *stmt)
3175 {
3176 enum tree_code code = gimple_assign_rhs_code (stmt);
3177 tree op0 = gimple_assign_rhs1 (stmt);
3178 tree op1 = gimple_assign_rhs2 (stmt);
3179
3180 return derive_constant_upper_bound_ops (TREE_TYPE (gimple_assign_lhs (stmt)),
3181 op0, code, op1);
3182 }
3183
3184 /* Returns a constant upper bound on the value of expression VAL. VAL
3185 is considered to be unsigned. If its type is signed, its value must
3186 be nonnegative. */
3187
3188 static widest_int
3189 derive_constant_upper_bound (tree val)
3190 {
3191 enum tree_code code;
3192 tree op0, op1, op2;
3193
3194 extract_ops_from_tree (val, &code, &op0, &op1, &op2);
3195 return derive_constant_upper_bound_ops (TREE_TYPE (val), op0, code, op1);
3196 }
3197
3198 /* Returns a constant upper bound on the value of expression OP0 CODE OP1,
3199 whose type is TYPE. The expression is considered to be unsigned. If
3200 its type is signed, its value must be nonnegative. */
3201
3202 static widest_int
3203 derive_constant_upper_bound_ops (tree type, tree op0,
3204 enum tree_code code, tree op1)
3205 {
3206 tree subtype, maxt;
3207 widest_int bnd, max, cst;
3208 gimple *stmt;
3209
3210 if (INTEGRAL_TYPE_P (type))
3211 maxt = TYPE_MAX_VALUE (type);
3212 else
3213 maxt = upper_bound_in_type (type, type);
3214
3215 max = wi::to_widest (maxt);
3216
3217 switch (code)
3218 {
3219 case INTEGER_CST:
3220 return wi::to_widest (op0);
3221
3222 CASE_CONVERT:
3223 subtype = TREE_TYPE (op0);
3224 if (!TYPE_UNSIGNED (subtype)
3225 /* If TYPE is also signed, the fact that VAL is nonnegative implies
3226 that OP0 is nonnegative. */
3227 && TYPE_UNSIGNED (type)
3228 && !tree_expr_nonnegative_p (op0))
3229 {
3230 /* If we cannot prove that the casted expression is nonnegative,
3231 we cannot establish more useful upper bound than the precision
3232 of the type gives us. */
3233 return max;
3234 }
3235
3236 /* We now know that op0 is an nonnegative value. Try deriving an upper
3237 bound for it. */
3238 bnd = derive_constant_upper_bound (op0);
3239
3240 /* If the bound does not fit in TYPE, max. value of TYPE could be
3241 attained. */
3242 if (wi::ltu_p (max, bnd))
3243 return max;
3244
3245 return bnd;
3246
3247 case PLUS_EXPR:
3248 case POINTER_PLUS_EXPR:
3249 case MINUS_EXPR:
3250 if (TREE_CODE (op1) != INTEGER_CST
3251 || !tree_expr_nonnegative_p (op0))
3252 return max;
3253
3254 /* Canonicalize to OP0 - CST. Consider CST to be signed, in order to
3255 choose the most logical way how to treat this constant regardless
3256 of the signedness of the type. */
3257 cst = wi::sext (wi::to_widest (op1), TYPE_PRECISION (type));
3258 if (code != MINUS_EXPR)
3259 cst = -cst;
3260
3261 bnd = derive_constant_upper_bound (op0);
3262
3263 if (wi::neg_p (cst))
3264 {
3265 cst = -cst;
3266 /* Avoid CST == 0x80000... */
3267 if (wi::neg_p (cst))
3268 return max;
3269
3270 /* OP0 + CST. We need to check that
3271 BND <= MAX (type) - CST. */
3272
3273 widest_int mmax = max - cst;
3274 if (wi::leu_p (bnd, mmax))
3275 return max;
3276
3277 return bnd + cst;
3278 }
3279 else
3280 {
3281 /* OP0 - CST, where CST >= 0.
3282
3283 If TYPE is signed, we have already verified that OP0 >= 0, and we
3284 know that the result is nonnegative. This implies that
3285 VAL <= BND - CST.
3286
3287 If TYPE is unsigned, we must additionally know that OP0 >= CST,
3288 otherwise the operation underflows.
3289 */
3290
3291 /* This should only happen if the type is unsigned; however, for
3292 buggy programs that use overflowing signed arithmetics even with
3293 -fno-wrapv, this condition may also be true for signed values. */
3294 if (wi::ltu_p (bnd, cst))
3295 return max;
3296
3297 if (TYPE_UNSIGNED (type))
3298 {
3299 tree tem = fold_binary (GE_EXPR, boolean_type_node, op0,
3300 wide_int_to_tree (type, cst));
3301 if (!tem || integer_nonzerop (tem))
3302 return max;
3303 }
3304
3305 bnd -= cst;
3306 }
3307
3308 return bnd;
3309
3310 case FLOOR_DIV_EXPR:
3311 case EXACT_DIV_EXPR:
3312 if (TREE_CODE (op1) != INTEGER_CST
3313 || tree_int_cst_sign_bit (op1))
3314 return max;
3315
3316 bnd = derive_constant_upper_bound (op0);
3317 return wi::udiv_floor (bnd, wi::to_widest (op1));
3318
3319 case BIT_AND_EXPR:
3320 if (TREE_CODE (op1) != INTEGER_CST
3321 || tree_int_cst_sign_bit (op1))
3322 return max;
3323 return wi::to_widest (op1);
3324
3325 case SSA_NAME:
3326 stmt = SSA_NAME_DEF_STMT (op0);
3327 if (gimple_code (stmt) != GIMPLE_ASSIGN
3328 || gimple_assign_lhs (stmt) != op0)
3329 return max;
3330 return derive_constant_upper_bound_assign (stmt);
3331
3332 default:
3333 return max;
3334 }
3335 }
3336
3337 /* Emit a -Waggressive-loop-optimizations warning if needed. */
3338
3339 static void
3340 do_warn_aggressive_loop_optimizations (class loop *loop,
3341 widest_int i_bound, gimple *stmt)
3342 {
3343 /* Don't warn if the loop doesn't have known constant bound. */
3344 if (!loop->nb_iterations
3345 || TREE_CODE (loop->nb_iterations) != INTEGER_CST
3346 || !warn_aggressive_loop_optimizations
3347 /* To avoid warning multiple times for the same loop,
3348 only start warning when we preserve loops. */
3349 || (cfun->curr_properties & PROP_loops) == 0
3350 /* Only warn once per loop. */
3351 || loop->warned_aggressive_loop_optimizations
3352 /* Only warn if undefined behavior gives us lower estimate than the
3353 known constant bound. */
3354 || wi::cmpu (i_bound, wi::to_widest (loop->nb_iterations)) >= 0
3355 /* And undefined behavior happens unconditionally. */
3356 || !dominated_by_p (CDI_DOMINATORS, loop->latch, gimple_bb (stmt)))
3357 return;
3358
3359 edge e = single_exit (loop);
3360 if (e == NULL)
3361 return;
3362
3363 gimple *estmt = last_stmt (e->src);
3364 char buf[WIDE_INT_PRINT_BUFFER_SIZE];
3365 print_dec (i_bound, buf, TYPE_UNSIGNED (TREE_TYPE (loop->nb_iterations))
3366 ? UNSIGNED : SIGNED);
3367 auto_diagnostic_group d;
3368 if (warning_at (gimple_location (stmt), OPT_Waggressive_loop_optimizations,
3369 "iteration %s invokes undefined behavior", buf))
3370 inform (gimple_location (estmt), "within this loop");
3371 loop->warned_aggressive_loop_optimizations = true;
3372 }
3373
3374 /* Records that AT_STMT is executed at most BOUND + 1 times in LOOP. IS_EXIT
3375 is true if the loop is exited immediately after STMT, and this exit
3376 is taken at last when the STMT is executed BOUND + 1 times.
3377 REALISTIC is true if BOUND is expected to be close to the real number
3378 of iterations. UPPER is true if we are sure the loop iterates at most
3379 BOUND times. I_BOUND is a widest_int upper estimate on BOUND. */
3380
3381 static void
3382 record_estimate (class loop *loop, tree bound, const widest_int &i_bound,
3383 gimple *at_stmt, bool is_exit, bool realistic, bool upper)
3384 {
3385 widest_int delta;
3386
3387 if (dump_file && (dump_flags & TDF_DETAILS))
3388 {
3389 fprintf (dump_file, "Statement %s", is_exit ? "(exit)" : "");
3390 print_gimple_stmt (dump_file, at_stmt, 0, TDF_SLIM);
3391 fprintf (dump_file, " is %sexecuted at most ",
3392 upper ? "" : "probably ");
3393 print_generic_expr (dump_file, bound, TDF_SLIM);
3394 fprintf (dump_file, " (bounded by ");
3395 print_decu (i_bound, dump_file);
3396 fprintf (dump_file, ") + 1 times in loop %d.\n", loop->num);
3397 }
3398
3399 /* If the I_BOUND is just an estimate of BOUND, it rarely is close to the
3400 real number of iterations. */
3401 if (TREE_CODE (bound) != INTEGER_CST)
3402 realistic = false;
3403 else
3404 gcc_checking_assert (i_bound == wi::to_widest (bound));
3405
3406 /* If we have a guaranteed upper bound, record it in the appropriate
3407 list, unless this is an !is_exit bound (i.e. undefined behavior in
3408 at_stmt) in a loop with known constant number of iterations. */
3409 if (upper
3410 && (is_exit
3411 || loop->nb_iterations == NULL_TREE
3412 || TREE_CODE (loop->nb_iterations) != INTEGER_CST))
3413 {
3414 class nb_iter_bound *elt = ggc_alloc<nb_iter_bound> ();
3415
3416 elt->bound = i_bound;
3417 elt->stmt = at_stmt;
3418 elt->is_exit = is_exit;
3419 elt->next = loop->bounds;
3420 loop->bounds = elt;
3421 }
3422
3423 /* If statement is executed on every path to the loop latch, we can directly
3424 infer the upper bound on the # of iterations of the loop. */
3425 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, gimple_bb (at_stmt)))
3426 upper = false;
3427
3428 /* Update the number of iteration estimates according to the bound.
3429 If at_stmt is an exit then the loop latch is executed at most BOUND times,
3430 otherwise it can be executed BOUND + 1 times. We will lower the estimate
3431 later if such statement must be executed on last iteration */
3432 if (is_exit)
3433 delta = 0;
3434 else
3435 delta = 1;
3436 widest_int new_i_bound = i_bound + delta;
3437
3438 /* If an overflow occurred, ignore the result. */
3439 if (wi::ltu_p (new_i_bound, delta))
3440 return;
3441
3442 if (upper && !is_exit)
3443 do_warn_aggressive_loop_optimizations (loop, new_i_bound, at_stmt);
3444 record_niter_bound (loop, new_i_bound, realistic, upper);
3445 }
3446
3447 /* Records the control iv analyzed in NITER for LOOP if the iv is valid
3448 and doesn't overflow. */
3449
3450 static void
3451 record_control_iv (class loop *loop, class tree_niter_desc *niter)
3452 {
3453 struct control_iv *iv;
3454
3455 if (!niter->control.base || !niter->control.step)
3456 return;
3457
3458 if (!integer_onep (niter->assumptions) || !niter->control.no_overflow)
3459 return;
3460
3461 iv = ggc_alloc<control_iv> ();
3462 iv->base = niter->control.base;
3463 iv->step = niter->control.step;
3464 iv->next = loop->control_ivs;
3465 loop->control_ivs = iv;
3466
3467 return;
3468 }
3469
3470 /* This function returns TRUE if below conditions are satisfied:
3471 1) VAR is SSA variable.
3472 2) VAR is an IV:{base, step} in its defining loop.
3473 3) IV doesn't overflow.
3474 4) Both base and step are integer constants.
3475 5) Base is the MIN/MAX value depends on IS_MIN.
3476 Store value of base to INIT correspondingly. */
3477
3478 static bool
3479 get_cst_init_from_scev (tree var, wide_int *init, bool is_min)
3480 {
3481 if (TREE_CODE (var) != SSA_NAME)
3482 return false;
3483
3484 gimple *def_stmt = SSA_NAME_DEF_STMT (var);
3485 class loop *loop = loop_containing_stmt (def_stmt);
3486
3487 if (loop == NULL)
3488 return false;
3489
3490 affine_iv iv;
3491 if (!simple_iv (loop, loop, var, &iv, false))
3492 return false;
3493
3494 if (!iv.no_overflow)
3495 return false;
3496
3497 if (TREE_CODE (iv.base) != INTEGER_CST || TREE_CODE (iv.step) != INTEGER_CST)
3498 return false;
3499
3500 if (is_min == tree_int_cst_sign_bit (iv.step))
3501 return false;
3502
3503 *init = wi::to_wide (iv.base);
3504 return true;
3505 }
3506
3507 /* Record the estimate on number of iterations of LOOP based on the fact that
3508 the induction variable BASE + STEP * i evaluated in STMT does not wrap and
3509 its values belong to the range <LOW, HIGH>. REALISTIC is true if the
3510 estimated number of iterations is expected to be close to the real one.
3511 UPPER is true if we are sure the induction variable does not wrap. */
3512
3513 static void
3514 record_nonwrapping_iv (class loop *loop, tree base, tree step, gimple *stmt,
3515 tree low, tree high, bool realistic, bool upper)
3516 {
3517 tree niter_bound, extreme, delta;
3518 tree type = TREE_TYPE (base), unsigned_type;
3519 tree orig_base = base;
3520
3521 if (TREE_CODE (step) != INTEGER_CST || integer_zerop (step))
3522 return;
3523
3524 if (dump_file && (dump_flags & TDF_DETAILS))
3525 {
3526 fprintf (dump_file, "Induction variable (");
3527 print_generic_expr (dump_file, TREE_TYPE (base), TDF_SLIM);
3528 fprintf (dump_file, ") ");
3529 print_generic_expr (dump_file, base, TDF_SLIM);
3530 fprintf (dump_file, " + ");
3531 print_generic_expr (dump_file, step, TDF_SLIM);
3532 fprintf (dump_file, " * iteration does not wrap in statement ");
3533 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
3534 fprintf (dump_file, " in loop %d.\n", loop->num);
3535 }
3536
3537 unsigned_type = unsigned_type_for (type);
3538 base = fold_convert (unsigned_type, base);
3539 step = fold_convert (unsigned_type, step);
3540
3541 if (tree_int_cst_sign_bit (step))
3542 {
3543 wide_int min, max;
3544 extreme = fold_convert (unsigned_type, low);
3545 if (TREE_CODE (orig_base) == SSA_NAME
3546 && TREE_CODE (high) == INTEGER_CST
3547 && INTEGRAL_TYPE_P (TREE_TYPE (orig_base))
3548 && (get_range_info (orig_base, &min, &max) == VR_RANGE
3549 || get_cst_init_from_scev (orig_base, &max, false))
3550 && wi::gts_p (wi::to_wide (high), max))
3551 base = wide_int_to_tree (unsigned_type, max);
3552 else if (TREE_CODE (base) != INTEGER_CST
3553 && dominated_by_p (CDI_DOMINATORS,
3554 loop->latch, gimple_bb (stmt)))
3555 base = fold_convert (unsigned_type, high);
3556 delta = fold_build2 (MINUS_EXPR, unsigned_type, base, extreme);
3557 step = fold_build1 (NEGATE_EXPR, unsigned_type, step);
3558 }
3559 else
3560 {
3561 wide_int min, max;
3562 extreme = fold_convert (unsigned_type, high);
3563 if (TREE_CODE (orig_base) == SSA_NAME
3564 && TREE_CODE (low) == INTEGER_CST
3565 && INTEGRAL_TYPE_P (TREE_TYPE (orig_base))
3566 && (get_range_info (orig_base, &min, &max) == VR_RANGE
3567 || get_cst_init_from_scev (orig_base, &min, true))
3568 && wi::gts_p (min, wi::to_wide (low)))
3569 base = wide_int_to_tree (unsigned_type, min);
3570 else if (TREE_CODE (base) != INTEGER_CST
3571 && dominated_by_p (CDI_DOMINATORS,
3572 loop->latch, gimple_bb (stmt)))
3573 base = fold_convert (unsigned_type, low);
3574 delta = fold_build2 (MINUS_EXPR, unsigned_type, extreme, base);
3575 }
3576
3577 /* STMT is executed at most NITER_BOUND + 1 times, since otherwise the value
3578 would get out of the range. */
3579 niter_bound = fold_build2 (FLOOR_DIV_EXPR, unsigned_type, delta, step);
3580 widest_int max = derive_constant_upper_bound (niter_bound);
3581 record_estimate (loop, niter_bound, max, stmt, false, realistic, upper);
3582 }
3583
3584 /* Determine information about number of iterations a LOOP from the index
3585 IDX of a data reference accessed in STMT. RELIABLE is true if STMT is
3586 guaranteed to be executed in every iteration of LOOP. Callback for
3587 for_each_index. */
3588
3589 struct ilb_data
3590 {
3591 class loop *loop;
3592 gimple *stmt;
3593 };
3594
3595 static bool
3596 idx_infer_loop_bounds (tree base, tree *idx, void *dta)
3597 {
3598 struct ilb_data *data = (struct ilb_data *) dta;
3599 tree ev, init, step;
3600 tree low, high, type, next;
3601 bool sign, upper = true, at_end = false;
3602 class loop *loop = data->loop;
3603
3604 if (TREE_CODE (base) != ARRAY_REF)
3605 return true;
3606
3607 /* For arrays at the end of the structure, we are not guaranteed that they
3608 do not really extend over their declared size. However, for arrays of
3609 size greater than one, this is unlikely to be intended. */
3610 if (array_at_struct_end_p (base))
3611 {
3612 at_end = true;
3613 upper = false;
3614 }
3615
3616 class loop *dloop = loop_containing_stmt (data->stmt);
3617 if (!dloop)
3618 return true;
3619
3620 ev = analyze_scalar_evolution (dloop, *idx);
3621 ev = instantiate_parameters (loop, ev);
3622 init = initial_condition (ev);
3623 step = evolution_part_in_loop_num (ev, loop->num);
3624
3625 if (!init
3626 || !step
3627 || TREE_CODE (step) != INTEGER_CST
3628 || integer_zerop (step)
3629 || tree_contains_chrecs (init, NULL)
3630 || chrec_contains_symbols_defined_in_loop (init, loop->num))
3631 return true;
3632
3633 low = array_ref_low_bound (base);
3634 high = array_ref_up_bound (base);
3635
3636 /* The case of nonconstant bounds could be handled, but it would be
3637 complicated. */
3638 if (TREE_CODE (low) != INTEGER_CST
3639 || !high
3640 || TREE_CODE (high) != INTEGER_CST)
3641 return true;
3642 sign = tree_int_cst_sign_bit (step);
3643 type = TREE_TYPE (step);
3644
3645 /* The array of length 1 at the end of a structure most likely extends
3646 beyond its bounds. */
3647 if (at_end
3648 && operand_equal_p (low, high, 0))
3649 return true;
3650
3651 /* In case the relevant bound of the array does not fit in type, or
3652 it does, but bound + step (in type) still belongs into the range of the
3653 array, the index may wrap and still stay within the range of the array
3654 (consider e.g. if the array is indexed by the full range of
3655 unsigned char).
3656
3657 To make things simpler, we require both bounds to fit into type, although
3658 there are cases where this would not be strictly necessary. */
3659 if (!int_fits_type_p (high, type)
3660 || !int_fits_type_p (low, type))
3661 return true;
3662 low = fold_convert (type, low);
3663 high = fold_convert (type, high);
3664
3665 if (sign)
3666 next = fold_binary (PLUS_EXPR, type, low, step);
3667 else
3668 next = fold_binary (PLUS_EXPR, type, high, step);
3669
3670 if (tree_int_cst_compare (low, next) <= 0
3671 && tree_int_cst_compare (next, high) <= 0)
3672 return true;
3673
3674 /* If access is not executed on every iteration, we must ensure that overlow
3675 may not make the access valid later. */
3676 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, gimple_bb (data->stmt))
3677 && scev_probably_wraps_p (NULL_TREE,
3678 initial_condition_in_loop_num (ev, loop->num),
3679 step, data->stmt, loop, true))
3680 upper = false;
3681
3682 record_nonwrapping_iv (loop, init, step, data->stmt, low, high, false, upper);
3683 return true;
3684 }
3685
3686 /* Determine information about number of iterations a LOOP from the bounds
3687 of arrays in the data reference REF accessed in STMT. RELIABLE is true if
3688 STMT is guaranteed to be executed in every iteration of LOOP.*/
3689
3690 static void
3691 infer_loop_bounds_from_ref (class loop *loop, gimple *stmt, tree ref)
3692 {
3693 struct ilb_data data;
3694
3695 data.loop = loop;
3696 data.stmt = stmt;
3697 for_each_index (&ref, idx_infer_loop_bounds, &data);
3698 }
3699
3700 /* Determine information about number of iterations of a LOOP from the way
3701 arrays are used in STMT. RELIABLE is true if STMT is guaranteed to be
3702 executed in every iteration of LOOP. */
3703
3704 static void
3705 infer_loop_bounds_from_array (class loop *loop, gimple *stmt)
3706 {
3707 if (is_gimple_assign (stmt))
3708 {
3709 tree op0 = gimple_assign_lhs (stmt);
3710 tree op1 = gimple_assign_rhs1 (stmt);
3711
3712 /* For each memory access, analyze its access function
3713 and record a bound on the loop iteration domain. */
3714 if (REFERENCE_CLASS_P (op0))
3715 infer_loop_bounds_from_ref (loop, stmt, op0);
3716
3717 if (REFERENCE_CLASS_P (op1))
3718 infer_loop_bounds_from_ref (loop, stmt, op1);
3719 }
3720 else if (is_gimple_call (stmt))
3721 {
3722 tree arg, lhs;
3723 unsigned i, n = gimple_call_num_args (stmt);
3724
3725 lhs = gimple_call_lhs (stmt);
3726 if (lhs && REFERENCE_CLASS_P (lhs))
3727 infer_loop_bounds_from_ref (loop, stmt, lhs);
3728
3729 for (i = 0; i < n; i++)
3730 {
3731 arg = gimple_call_arg (stmt, i);
3732 if (REFERENCE_CLASS_P (arg))
3733 infer_loop_bounds_from_ref (loop, stmt, arg);
3734 }
3735 }
3736 }
3737
3738 /* Determine information about number of iterations of a LOOP from the fact
3739 that pointer arithmetics in STMT does not overflow. */
3740
3741 static void
3742 infer_loop_bounds_from_pointer_arith (class loop *loop, gimple *stmt)
3743 {
3744 tree def, base, step, scev, type, low, high;
3745 tree var, ptr;
3746
3747 if (!is_gimple_assign (stmt)
3748 || gimple_assign_rhs_code (stmt) != POINTER_PLUS_EXPR)
3749 return;
3750
3751 def = gimple_assign_lhs (stmt);
3752 if (TREE_CODE (def) != SSA_NAME)
3753 return;
3754
3755 type = TREE_TYPE (def);
3756 if (!nowrap_type_p (type))
3757 return;
3758
3759 ptr = gimple_assign_rhs1 (stmt);
3760 if (!expr_invariant_in_loop_p (loop, ptr))
3761 return;
3762
3763 var = gimple_assign_rhs2 (stmt);
3764 if (TYPE_PRECISION (type) != TYPE_PRECISION (TREE_TYPE (var)))
3765 return;
3766
3767 class loop *uloop = loop_containing_stmt (stmt);
3768 scev = instantiate_parameters (loop, analyze_scalar_evolution (uloop, def));
3769 if (chrec_contains_undetermined (scev))
3770 return;
3771
3772 base = initial_condition_in_loop_num (scev, loop->num);
3773 step = evolution_part_in_loop_num (scev, loop->num);
3774
3775 if (!base || !step
3776 || TREE_CODE (step) != INTEGER_CST
3777 || tree_contains_chrecs (base, NULL)
3778 || chrec_contains_symbols_defined_in_loop (base, loop->num))
3779 return;
3780
3781 low = lower_bound_in_type (type, type);
3782 high = upper_bound_in_type (type, type);
3783
3784 /* In C, pointer arithmetic p + 1 cannot use a NULL pointer, and p - 1 cannot
3785 produce a NULL pointer. The contrary would mean NULL points to an object,
3786 while NULL is supposed to compare unequal with the address of all objects.
3787 Furthermore, p + 1 cannot produce a NULL pointer and p - 1 cannot use a
3788 NULL pointer since that would mean wrapping, which we assume here not to
3789 happen. So, we can exclude NULL from the valid range of pointer
3790 arithmetic. */
3791 if (flag_delete_null_pointer_checks && int_cst_value (low) == 0)
3792 low = build_int_cstu (TREE_TYPE (low), TYPE_ALIGN_UNIT (TREE_TYPE (type)));
3793
3794 record_nonwrapping_iv (loop, base, step, stmt, low, high, false, true);
3795 }
3796
3797 /* Determine information about number of iterations of a LOOP from the fact
3798 that signed arithmetics in STMT does not overflow. */
3799
3800 static void
3801 infer_loop_bounds_from_signedness (class loop *loop, gimple *stmt)
3802 {
3803 tree def, base, step, scev, type, low, high;
3804
3805 if (gimple_code (stmt) != GIMPLE_ASSIGN)
3806 return;
3807
3808 def = gimple_assign_lhs (stmt);
3809
3810 if (TREE_CODE (def) != SSA_NAME)
3811 return;
3812
3813 type = TREE_TYPE (def);
3814 if (!INTEGRAL_TYPE_P (type)
3815 || !TYPE_OVERFLOW_UNDEFINED (type))
3816 return;
3817
3818 scev = instantiate_parameters (loop, analyze_scalar_evolution (loop, def));
3819 if (chrec_contains_undetermined (scev))
3820 return;
3821
3822 base = initial_condition_in_loop_num (scev, loop->num);
3823 step = evolution_part_in_loop_num (scev, loop->num);
3824
3825 if (!base || !step
3826 || TREE_CODE (step) != INTEGER_CST
3827 || tree_contains_chrecs (base, NULL)
3828 || chrec_contains_symbols_defined_in_loop (base, loop->num))
3829 return;
3830
3831 low = lower_bound_in_type (type, type);
3832 high = upper_bound_in_type (type, type);
3833 wide_int minv, maxv;
3834 if (get_range_info (def, &minv, &maxv) == VR_RANGE)
3835 {
3836 low = wide_int_to_tree (type, minv);
3837 high = wide_int_to_tree (type, maxv);
3838 }
3839
3840 record_nonwrapping_iv (loop, base, step, stmt, low, high, false, true);
3841 }
3842
3843 /* The following analyzers are extracting informations on the bounds
3844 of LOOP from the following undefined behaviors:
3845
3846 - data references should not access elements over the statically
3847 allocated size,
3848
3849 - signed variables should not overflow when flag_wrapv is not set.
3850 */
3851
3852 static void
3853 infer_loop_bounds_from_undefined (class loop *loop, basic_block *bbs)
3854 {
3855 unsigned i;
3856 gimple_stmt_iterator bsi;
3857 basic_block bb;
3858 bool reliable;
3859
3860 for (i = 0; i < loop->num_nodes; i++)
3861 {
3862 bb = bbs[i];
3863
3864 /* If BB is not executed in each iteration of the loop, we cannot
3865 use the operations in it to infer reliable upper bound on the
3866 # of iterations of the loop. However, we can use it as a guess.
3867 Reliable guesses come only from array bounds. */
3868 reliable = dominated_by_p (CDI_DOMINATORS, loop->latch, bb);
3869
3870 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
3871 {
3872 gimple *stmt = gsi_stmt (bsi);
3873
3874 infer_loop_bounds_from_array (loop, stmt);
3875
3876 if (reliable)
3877 {
3878 infer_loop_bounds_from_signedness (loop, stmt);
3879 infer_loop_bounds_from_pointer_arith (loop, stmt);
3880 }
3881 }
3882
3883 }
3884 }
3885
3886 /* Compare wide ints, callback for qsort. */
3887
3888 static int
3889 wide_int_cmp (const void *p1, const void *p2)
3890 {
3891 const widest_int *d1 = (const widest_int *) p1;
3892 const widest_int *d2 = (const widest_int *) p2;
3893 return wi::cmpu (*d1, *d2);
3894 }
3895
3896 /* Return index of BOUND in BOUNDS array sorted in increasing order.
3897 Lookup by binary search. */
3898
3899 static int
3900 bound_index (vec<widest_int> bounds, const widest_int &bound)
3901 {
3902 unsigned int end = bounds.length ();
3903 unsigned int begin = 0;
3904
3905 /* Find a matching index by means of a binary search. */
3906 while (begin != end)
3907 {
3908 unsigned int middle = (begin + end) / 2;
3909 widest_int index = bounds[middle];
3910
3911 if (index == bound)
3912 return middle;
3913 else if (wi::ltu_p (index, bound))
3914 begin = middle + 1;
3915 else
3916 end = middle;
3917 }
3918 gcc_unreachable ();
3919 }
3920
3921 /* We recorded loop bounds only for statements dominating loop latch (and thus
3922 executed each loop iteration). If there are any bounds on statements not
3923 dominating the loop latch we can improve the estimate by walking the loop
3924 body and seeing if every path from loop header to loop latch contains
3925 some bounded statement. */
3926
3927 static void
3928 discover_iteration_bound_by_body_walk (class loop *loop)
3929 {
3930 class nb_iter_bound *elt;
3931 auto_vec<widest_int> bounds;
3932 vec<vec<basic_block> > queues = vNULL;
3933 vec<basic_block> queue = vNULL;
3934 ptrdiff_t queue_index;
3935 ptrdiff_t latch_index = 0;
3936
3937 /* Discover what bounds may interest us. */
3938 for (elt = loop->bounds; elt; elt = elt->next)
3939 {
3940 widest_int bound = elt->bound;
3941
3942 /* Exit terminates loop at given iteration, while non-exits produce undefined
3943 effect on the next iteration. */
3944 if (!elt->is_exit)
3945 {
3946 bound += 1;
3947 /* If an overflow occurred, ignore the result. */
3948 if (bound == 0)
3949 continue;
3950 }
3951
3952 if (!loop->any_upper_bound
3953 || wi::ltu_p (bound, loop->nb_iterations_upper_bound))
3954 bounds.safe_push (bound);
3955 }
3956
3957 /* Exit early if there is nothing to do. */
3958 if (!bounds.exists ())
3959 return;
3960
3961 if (dump_file && (dump_flags & TDF_DETAILS))
3962 fprintf (dump_file, " Trying to walk loop body to reduce the bound.\n");
3963
3964 /* Sort the bounds in decreasing order. */
3965 bounds.qsort (wide_int_cmp);
3966
3967 /* For every basic block record the lowest bound that is guaranteed to
3968 terminate the loop. */
3969
3970 hash_map<basic_block, ptrdiff_t> bb_bounds;
3971 for (elt = loop->bounds; elt; elt = elt->next)
3972 {
3973 widest_int bound = elt->bound;
3974 if (!elt->is_exit)
3975 {
3976 bound += 1;
3977 /* If an overflow occurred, ignore the result. */
3978 if (bound == 0)
3979 continue;
3980 }
3981
3982 if (!loop->any_upper_bound
3983 || wi::ltu_p (bound, loop->nb_iterations_upper_bound))
3984 {
3985 ptrdiff_t index = bound_index (bounds, bound);
3986 ptrdiff_t *entry = bb_bounds.get (gimple_bb (elt->stmt));
3987 if (!entry)
3988 bb_bounds.put (gimple_bb (elt->stmt), index);
3989 else if ((ptrdiff_t)*entry > index)
3990 *entry = index;
3991 }
3992 }
3993
3994 hash_map<basic_block, ptrdiff_t> block_priority;
3995
3996 /* Perform shortest path discovery loop->header ... loop->latch.
3997
3998 The "distance" is given by the smallest loop bound of basic block
3999 present in the path and we look for path with largest smallest bound
4000 on it.
4001
4002 To avoid the need for fibonacci heap on double ints we simply compress
4003 double ints into indexes to BOUNDS array and then represent the queue
4004 as arrays of queues for every index.
4005 Index of BOUNDS.length() means that the execution of given BB has
4006 no bounds determined.
4007
4008 VISITED is a pointer map translating basic block into smallest index
4009 it was inserted into the priority queue with. */
4010 latch_index = -1;
4011
4012 /* Start walk in loop header with index set to infinite bound. */
4013 queue_index = bounds.length ();
4014 queues.safe_grow_cleared (queue_index + 1, true);
4015 queue.safe_push (loop->header);
4016 queues[queue_index] = queue;
4017 block_priority.put (loop->header, queue_index);
4018
4019 for (; queue_index >= 0; queue_index--)
4020 {
4021 if (latch_index < queue_index)
4022 {
4023 while (queues[queue_index].length ())
4024 {
4025 basic_block bb;
4026 ptrdiff_t bound_index = queue_index;
4027 edge e;
4028 edge_iterator ei;
4029
4030 queue = queues[queue_index];
4031 bb = queue.pop ();
4032
4033 /* OK, we later inserted the BB with lower priority, skip it. */
4034 if (*block_priority.get (bb) > queue_index)
4035 continue;
4036
4037 /* See if we can improve the bound. */
4038 ptrdiff_t *entry = bb_bounds.get (bb);
4039 if (entry && *entry < bound_index)
4040 bound_index = *entry;
4041
4042 /* Insert succesors into the queue, watch for latch edge
4043 and record greatest index we saw. */
4044 FOR_EACH_EDGE (e, ei, bb->succs)
4045 {
4046 bool insert = false;
4047
4048 if (loop_exit_edge_p (loop, e))
4049 continue;
4050
4051 if (e == loop_latch_edge (loop)
4052 && latch_index < bound_index)
4053 latch_index = bound_index;
4054 else if (!(entry = block_priority.get (e->dest)))
4055 {
4056 insert = true;
4057 block_priority.put (e->dest, bound_index);
4058 }
4059 else if (*entry < bound_index)
4060 {
4061 insert = true;
4062 *entry = bound_index;
4063 }
4064
4065 if (insert)
4066 queues[bound_index].safe_push (e->dest);
4067 }
4068 }
4069 }
4070 queues[queue_index].release ();
4071 }
4072
4073 gcc_assert (latch_index >= 0);
4074 if ((unsigned)latch_index < bounds.length ())
4075 {
4076 if (dump_file && (dump_flags & TDF_DETAILS))
4077 {
4078 fprintf (dump_file, "Found better loop bound ");
4079 print_decu (bounds[latch_index], dump_file);
4080 fprintf (dump_file, "\n");
4081 }
4082 record_niter_bound (loop, bounds[latch_index], false, true);
4083 }
4084
4085 queues.release ();
4086 }
4087
4088 /* See if every path cross the loop goes through a statement that is known
4089 to not execute at the last iteration. In that case we can decrese iteration
4090 count by 1. */
4091
4092 static void
4093 maybe_lower_iteration_bound (class loop *loop)
4094 {
4095 hash_set<gimple *> *not_executed_last_iteration = NULL;
4096 class nb_iter_bound *elt;
4097 bool found_exit = false;
4098 auto_vec<basic_block> queue;
4099 bitmap visited;
4100
4101 /* Collect all statements with interesting (i.e. lower than
4102 nb_iterations_upper_bound) bound on them.
4103
4104 TODO: Due to the way record_estimate choose estimates to store, the bounds
4105 will be always nb_iterations_upper_bound-1. We can change this to record
4106 also statements not dominating the loop latch and update the walk bellow
4107 to the shortest path algorithm. */
4108 for (elt = loop->bounds; elt; elt = elt->next)
4109 {
4110 if (!elt->is_exit
4111 && wi::ltu_p (elt->bound, loop->nb_iterations_upper_bound))
4112 {
4113 if (!not_executed_last_iteration)
4114 not_executed_last_iteration = new hash_set<gimple *>;
4115 not_executed_last_iteration->add (elt->stmt);
4116 }
4117 }
4118 if (!not_executed_last_iteration)
4119 return;
4120
4121 /* Start DFS walk in the loop header and see if we can reach the
4122 loop latch or any of the exits (including statements with side
4123 effects that may terminate the loop otherwise) without visiting
4124 any of the statements known to have undefined effect on the last
4125 iteration. */
4126 queue.safe_push (loop->header);
4127 visited = BITMAP_ALLOC (NULL);
4128 bitmap_set_bit (visited, loop->header->index);
4129 found_exit = false;
4130
4131 do
4132 {
4133 basic_block bb = queue.pop ();
4134 gimple_stmt_iterator gsi;
4135 bool stmt_found = false;
4136
4137 /* Loop for possible exits and statements bounding the execution. */
4138 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4139 {
4140 gimple *stmt = gsi_stmt (gsi);
4141 if (not_executed_last_iteration->contains (stmt))
4142 {
4143 stmt_found = true;
4144 break;
4145 }
4146 if (gimple_has_side_effects (stmt))
4147 {
4148 found_exit = true;
4149 break;
4150 }
4151 }
4152 if (found_exit)
4153 break;
4154
4155 /* If no bounding statement is found, continue the walk. */
4156 if (!stmt_found)
4157 {
4158 edge e;
4159 edge_iterator ei;
4160
4161 FOR_EACH_EDGE (e, ei, bb->succs)
4162 {
4163 if (loop_exit_edge_p (loop, e)
4164 || e == loop_latch_edge (loop))
4165 {
4166 found_exit = true;
4167 break;
4168 }
4169 if (bitmap_set_bit (visited, e->dest->index))
4170 queue.safe_push (e->dest);
4171 }
4172 }
4173 }
4174 while (queue.length () && !found_exit);
4175
4176 /* If every path through the loop reach bounding statement before exit,
4177 then we know the last iteration of the loop will have undefined effect
4178 and we can decrease number of iterations. */
4179
4180 if (!found_exit)
4181 {
4182 if (dump_file && (dump_flags & TDF_DETAILS))
4183 fprintf (dump_file, "Reducing loop iteration estimate by 1; "
4184 "undefined statement must be executed at the last iteration.\n");
4185 record_niter_bound (loop, loop->nb_iterations_upper_bound - 1,
4186 false, true);
4187 }
4188
4189 BITMAP_FREE (visited);
4190 delete not_executed_last_iteration;
4191 }
4192
4193 /* Get expected upper bound for number of loop iterations for
4194 BUILT_IN_EXPECT_WITH_PROBABILITY for a condition COND. */
4195
4196 static tree
4197 get_upper_bound_based_on_builtin_expr_with_prob (gcond *cond)
4198 {
4199 if (cond == NULL)
4200 return NULL_TREE;
4201
4202 tree lhs = gimple_cond_lhs (cond);
4203 if (TREE_CODE (lhs) != SSA_NAME)
4204 return NULL_TREE;
4205
4206 gimple *stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (cond));
4207 gcall *def = dyn_cast<gcall *> (stmt);
4208 if (def == NULL)
4209 return NULL_TREE;
4210
4211 tree decl = gimple_call_fndecl (def);
4212 if (!decl
4213 || !fndecl_built_in_p (decl, BUILT_IN_EXPECT_WITH_PROBABILITY)
4214 || gimple_call_num_args (stmt) != 3)
4215 return NULL_TREE;
4216
4217 tree c = gimple_call_arg (def, 1);
4218 tree condt = TREE_TYPE (lhs);
4219 tree res = fold_build2 (gimple_cond_code (cond),
4220 condt, c,
4221 gimple_cond_rhs (cond));
4222 if (TREE_CODE (res) != INTEGER_CST)
4223 return NULL_TREE;
4224
4225
4226 tree prob = gimple_call_arg (def, 2);
4227 tree t = TREE_TYPE (prob);
4228 tree one
4229 = build_real_from_int_cst (t,
4230 integer_one_node);
4231 if (integer_zerop (res))
4232 prob = fold_build2 (MINUS_EXPR, t, one, prob);
4233 tree r = fold_build2 (RDIV_EXPR, t, one, prob);
4234 if (TREE_CODE (r) != REAL_CST)
4235 return NULL_TREE;
4236
4237 HOST_WIDE_INT probi
4238 = real_to_integer (TREE_REAL_CST_PTR (r));
4239 return build_int_cst (condt, probi);
4240 }
4241
4242 /* Records estimates on numbers of iterations of LOOP. If USE_UNDEFINED_P
4243 is true also use estimates derived from undefined behavior. */
4244
4245 void
4246 estimate_numbers_of_iterations (class loop *loop)
4247 {
4248 tree niter, type;
4249 unsigned i;
4250 class tree_niter_desc niter_desc;
4251 edge ex;
4252 widest_int bound;
4253 edge likely_exit;
4254
4255 /* Give up if we already have tried to compute an estimation. */
4256 if (loop->estimate_state != EST_NOT_COMPUTED)
4257 return;
4258
4259 loop->estimate_state = EST_AVAILABLE;
4260
4261 /* If we have a measured profile, use it to estimate the number of
4262 iterations. Normally this is recorded by branch_prob right after
4263 reading the profile. In case we however found a new loop, record the
4264 information here.
4265
4266 Explicitly check for profile status so we do not report
4267 wrong prediction hitrates for guessed loop iterations heuristics.
4268 Do not recompute already recorded bounds - we ought to be better on
4269 updating iteration bounds than updating profile in general and thus
4270 recomputing iteration bounds later in the compilation process will just
4271 introduce random roundoff errors. */
4272 if (!loop->any_estimate
4273 && loop->header->count.reliable_p ())
4274 {
4275 gcov_type nit = expected_loop_iterations_unbounded (loop);
4276 bound = gcov_type_to_wide_int (nit);
4277 record_niter_bound (loop, bound, true, false);
4278 }
4279
4280 /* Ensure that loop->nb_iterations is computed if possible. If it turns out
4281 to be constant, we avoid undefined behavior implied bounds and instead
4282 diagnose those loops with -Waggressive-loop-optimizations. */
4283 number_of_latch_executions (loop);
4284
4285 basic_block *body = get_loop_body (loop);
4286 auto_vec<edge> exits = get_loop_exit_edges (loop, body);
4287 likely_exit = single_likely_exit (loop, exits);
4288 FOR_EACH_VEC_ELT (exits, i, ex)
4289 {
4290 if (ex == likely_exit)
4291 {
4292 gimple *stmt = last_stmt (ex->src);
4293 if (stmt != NULL)
4294 {
4295 gcond *cond = dyn_cast<gcond *> (stmt);
4296 tree niter_bound
4297 = get_upper_bound_based_on_builtin_expr_with_prob (cond);
4298 if (niter_bound != NULL_TREE)
4299 {
4300 widest_int max = derive_constant_upper_bound (niter_bound);
4301 record_estimate (loop, niter_bound, max, cond,
4302 true, true, false);
4303 }
4304 }
4305 }
4306
4307 if (!number_of_iterations_exit (loop, ex, &niter_desc,
4308 false, false, body))
4309 continue;
4310
4311 niter = niter_desc.niter;
4312 type = TREE_TYPE (niter);
4313 if (TREE_CODE (niter_desc.may_be_zero) != INTEGER_CST)
4314 niter = build3 (COND_EXPR, type, niter_desc.may_be_zero,
4315 build_int_cst (type, 0),
4316 niter);
4317 record_estimate (loop, niter, niter_desc.max,
4318 last_stmt (ex->src),
4319 true, ex == likely_exit, true);
4320 record_control_iv (loop, &niter_desc);
4321 }
4322
4323 if (flag_aggressive_loop_optimizations)
4324 infer_loop_bounds_from_undefined (loop, body);
4325 free (body);
4326
4327 discover_iteration_bound_by_body_walk (loop);
4328
4329 maybe_lower_iteration_bound (loop);
4330
4331 /* If we know the exact number of iterations of this loop, try to
4332 not break code with undefined behavior by not recording smaller
4333 maximum number of iterations. */
4334 if (loop->nb_iterations
4335 && TREE_CODE (loop->nb_iterations) == INTEGER_CST)
4336 {
4337 loop->any_upper_bound = true;
4338 loop->nb_iterations_upper_bound = wi::to_widest (loop->nb_iterations);
4339 }
4340 }
4341
4342 /* Sets NIT to the estimated number of executions of the latch of the
4343 LOOP. If CONSERVATIVE is true, we must be sure that NIT is at least as
4344 large as the number of iterations. If we have no reliable estimate,
4345 the function returns false, otherwise returns true. */
4346
4347 bool
4348 estimated_loop_iterations (class loop *loop, widest_int *nit)
4349 {
4350 /* When SCEV information is available, try to update loop iterations
4351 estimate. Otherwise just return whatever we recorded earlier. */
4352 if (scev_initialized_p ())
4353 estimate_numbers_of_iterations (loop);
4354
4355 return (get_estimated_loop_iterations (loop, nit));
4356 }
4357
4358 /* Similar to estimated_loop_iterations, but returns the estimate only
4359 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
4360 on the number of iterations of LOOP could not be derived, returns -1. */
4361
4362 HOST_WIDE_INT
4363 estimated_loop_iterations_int (class loop *loop)
4364 {
4365 widest_int nit;
4366 HOST_WIDE_INT hwi_nit;
4367
4368 if (!estimated_loop_iterations (loop, &nit))
4369 return -1;
4370
4371 if (!wi::fits_shwi_p (nit))
4372 return -1;
4373 hwi_nit = nit.to_shwi ();
4374
4375 return hwi_nit < 0 ? -1 : hwi_nit;
4376 }
4377
4378
4379 /* Sets NIT to an upper bound for the maximum number of executions of the
4380 latch of the LOOP. If we have no reliable estimate, the function returns
4381 false, otherwise returns true. */
4382
4383 bool
4384 max_loop_iterations (class loop *loop, widest_int *nit)
4385 {
4386 /* When SCEV information is available, try to update loop iterations
4387 estimate. Otherwise just return whatever we recorded earlier. */
4388 if (scev_initialized_p ())
4389 estimate_numbers_of_iterations (loop);
4390
4391 return get_max_loop_iterations (loop, nit);
4392 }
4393
4394 /* Similar to max_loop_iterations, but returns the estimate only
4395 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
4396 on the number of iterations of LOOP could not be derived, returns -1. */
4397
4398 HOST_WIDE_INT
4399 max_loop_iterations_int (class loop *loop)
4400 {
4401 widest_int nit;
4402 HOST_WIDE_INT hwi_nit;
4403
4404 if (!max_loop_iterations (loop, &nit))
4405 return -1;
4406
4407 if (!wi::fits_shwi_p (nit))
4408 return -1;
4409 hwi_nit = nit.to_shwi ();
4410
4411 return hwi_nit < 0 ? -1 : hwi_nit;
4412 }
4413
4414 /* Sets NIT to an likely upper bound for the maximum number of executions of the
4415 latch of the LOOP. If we have no reliable estimate, the function returns
4416 false, otherwise returns true. */
4417
4418 bool
4419 likely_max_loop_iterations (class loop *loop, widest_int *nit)
4420 {
4421 /* When SCEV information is available, try to update loop iterations
4422 estimate. Otherwise just return whatever we recorded earlier. */
4423 if (scev_initialized_p ())
4424 estimate_numbers_of_iterations (loop);
4425
4426 return get_likely_max_loop_iterations (loop, nit);
4427 }
4428
4429 /* Similar to max_loop_iterations, but returns the estimate only
4430 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
4431 on the number of iterations of LOOP could not be derived, returns -1. */
4432
4433 HOST_WIDE_INT
4434 likely_max_loop_iterations_int (class loop *loop)
4435 {
4436 widest_int nit;
4437 HOST_WIDE_INT hwi_nit;
4438
4439 if (!likely_max_loop_iterations (loop, &nit))
4440 return -1;
4441
4442 if (!wi::fits_shwi_p (nit))
4443 return -1;
4444 hwi_nit = nit.to_shwi ();
4445
4446 return hwi_nit < 0 ? -1 : hwi_nit;
4447 }
4448
4449 /* Returns an estimate for the number of executions of statements
4450 in the LOOP. For statements before the loop exit, this exceeds
4451 the number of execution of the latch by one. */
4452
4453 HOST_WIDE_INT
4454 estimated_stmt_executions_int (class loop *loop)
4455 {
4456 HOST_WIDE_INT nit = estimated_loop_iterations_int (loop);
4457 HOST_WIDE_INT snit;
4458
4459 if (nit == -1)
4460 return -1;
4461
4462 snit = (HOST_WIDE_INT) ((unsigned HOST_WIDE_INT) nit + 1);
4463
4464 /* If the computation overflows, return -1. */
4465 return snit < 0 ? -1 : snit;
4466 }
4467
4468 /* Sets NIT to the maximum number of executions of the latch of the
4469 LOOP, plus one. If we have no reliable estimate, the function returns
4470 false, otherwise returns true. */
4471
4472 bool
4473 max_stmt_executions (class loop *loop, widest_int *nit)
4474 {
4475 widest_int nit_minus_one;
4476
4477 if (!max_loop_iterations (loop, nit))
4478 return false;
4479
4480 nit_minus_one = *nit;
4481
4482 *nit += 1;
4483
4484 return wi::gtu_p (*nit, nit_minus_one);
4485 }
4486
4487 /* Sets NIT to the estimated maximum number of executions of the latch of the
4488 LOOP, plus one. If we have no likely estimate, the function returns
4489 false, otherwise returns true. */
4490
4491 bool
4492 likely_max_stmt_executions (class loop *loop, widest_int *nit)
4493 {
4494 widest_int nit_minus_one;
4495
4496 if (!likely_max_loop_iterations (loop, nit))
4497 return false;
4498
4499 nit_minus_one = *nit;
4500
4501 *nit += 1;
4502
4503 return wi::gtu_p (*nit, nit_minus_one);
4504 }
4505
4506 /* Sets NIT to the estimated number of executions of the latch of the
4507 LOOP, plus one. If we have no reliable estimate, the function returns
4508 false, otherwise returns true. */
4509
4510 bool
4511 estimated_stmt_executions (class loop *loop, widest_int *nit)
4512 {
4513 widest_int nit_minus_one;
4514
4515 if (!estimated_loop_iterations (loop, nit))
4516 return false;
4517
4518 nit_minus_one = *nit;
4519
4520 *nit += 1;
4521
4522 return wi::gtu_p (*nit, nit_minus_one);
4523 }
4524
4525 /* Records estimates on numbers of iterations of loops. */
4526
4527 void
4528 estimate_numbers_of_iterations (function *fn)
4529 {
4530 class loop *loop;
4531
4532 /* We don't want to issue signed overflow warnings while getting
4533 loop iteration estimates. */
4534 fold_defer_overflow_warnings ();
4535
4536 FOR_EACH_LOOP_FN (fn, loop, 0)
4537 estimate_numbers_of_iterations (loop);
4538
4539 fold_undefer_and_ignore_overflow_warnings ();
4540 }
4541
4542 /* Returns true if statement S1 dominates statement S2. */
4543
4544 bool
4545 stmt_dominates_stmt_p (gimple *s1, gimple *s2)
4546 {
4547 basic_block bb1 = gimple_bb (s1), bb2 = gimple_bb (s2);
4548
4549 if (!bb1
4550 || s1 == s2)
4551 return true;
4552
4553 if (bb1 == bb2)
4554 {
4555 gimple_stmt_iterator bsi;
4556
4557 if (gimple_code (s2) == GIMPLE_PHI)
4558 return false;
4559
4560 if (gimple_code (s1) == GIMPLE_PHI)
4561 return true;
4562
4563 for (bsi = gsi_start_bb (bb1); gsi_stmt (bsi) != s2; gsi_next (&bsi))
4564 if (gsi_stmt (bsi) == s1)
4565 return true;
4566
4567 return false;
4568 }
4569
4570 return dominated_by_p (CDI_DOMINATORS, bb2, bb1);
4571 }
4572
4573 /* Returns true when we can prove that the number of executions of
4574 STMT in the loop is at most NITER, according to the bound on
4575 the number of executions of the statement NITER_BOUND->stmt recorded in
4576 NITER_BOUND and fact that NITER_BOUND->stmt dominate STMT.
4577
4578 ??? This code can become quite a CPU hog - we can have many bounds,
4579 and large basic block forcing stmt_dominates_stmt_p to be queried
4580 many times on a large basic blocks, so the whole thing is O(n^2)
4581 for scev_probably_wraps_p invocation (that can be done n times).
4582
4583 It would make more sense (and give better answers) to remember BB
4584 bounds computed by discover_iteration_bound_by_body_walk. */
4585
4586 static bool
4587 n_of_executions_at_most (gimple *stmt,
4588 class nb_iter_bound *niter_bound,
4589 tree niter)
4590 {
4591 widest_int bound = niter_bound->bound;
4592 tree nit_type = TREE_TYPE (niter), e;
4593 enum tree_code cmp;
4594
4595 gcc_assert (TYPE_UNSIGNED (nit_type));
4596
4597 /* If the bound does not even fit into NIT_TYPE, it cannot tell us that
4598 the number of iterations is small. */
4599 if (!wi::fits_to_tree_p (bound, nit_type))
4600 return false;
4601
4602 /* We know that NITER_BOUND->stmt is executed at most NITER_BOUND->bound + 1
4603 times. This means that:
4604
4605 -- if NITER_BOUND->is_exit is true, then everything after
4606 it at most NITER_BOUND->bound times.
4607
4608 -- If NITER_BOUND->is_exit is false, then if we can prove that when STMT
4609 is executed, then NITER_BOUND->stmt is executed as well in the same
4610 iteration then STMT is executed at most NITER_BOUND->bound + 1 times.
4611
4612 If we can determine that NITER_BOUND->stmt is always executed
4613 after STMT, then STMT is executed at most NITER_BOUND->bound + 2 times.
4614 We conclude that if both statements belong to the same
4615 basic block and STMT is before NITER_BOUND->stmt and there are no
4616 statements with side effects in between. */
4617
4618 if (niter_bound->is_exit)
4619 {
4620 if (stmt == niter_bound->stmt
4621 || !stmt_dominates_stmt_p (niter_bound->stmt, stmt))
4622 return false;
4623 cmp = GE_EXPR;
4624 }
4625 else
4626 {
4627 if (!stmt_dominates_stmt_p (niter_bound->stmt, stmt))
4628 {
4629 gimple_stmt_iterator bsi;
4630 if (gimple_bb (stmt) != gimple_bb (niter_bound->stmt)
4631 || gimple_code (stmt) == GIMPLE_PHI
4632 || gimple_code (niter_bound->stmt) == GIMPLE_PHI)
4633 return false;
4634
4635 /* By stmt_dominates_stmt_p we already know that STMT appears
4636 before NITER_BOUND->STMT. Still need to test that the loop
4637 cannot be terinated by a side effect in between. */
4638 for (bsi = gsi_for_stmt (stmt); gsi_stmt (bsi) != niter_bound->stmt;
4639 gsi_next (&bsi))
4640 if (gimple_has_side_effects (gsi_stmt (bsi)))
4641 return false;
4642 bound += 1;
4643 if (bound == 0
4644 || !wi::fits_to_tree_p (bound, nit_type))
4645 return false;
4646 }
4647 cmp = GT_EXPR;
4648 }
4649
4650 e = fold_binary (cmp, boolean_type_node,
4651 niter, wide_int_to_tree (nit_type, bound));
4652 return e && integer_nonzerop (e);
4653 }
4654
4655 /* Returns true if the arithmetics in TYPE can be assumed not to wrap. */
4656
4657 bool
4658 nowrap_type_p (tree type)
4659 {
4660 if (ANY_INTEGRAL_TYPE_P (type)
4661 && TYPE_OVERFLOW_UNDEFINED (type))
4662 return true;
4663
4664 if (POINTER_TYPE_P (type))
4665 return true;
4666
4667 return false;
4668 }
4669
4670 /* Return true if we can prove LOOP is exited before evolution of induction
4671 variable {BASE, STEP} overflows with respect to its type bound. */
4672
4673 static bool
4674 loop_exits_before_overflow (tree base, tree step,
4675 gimple *at_stmt, class loop *loop)
4676 {
4677 widest_int niter;
4678 struct control_iv *civ;
4679 class nb_iter_bound *bound;
4680 tree e, delta, step_abs, unsigned_base;
4681 tree type = TREE_TYPE (step);
4682 tree unsigned_type, valid_niter;
4683
4684 /* Don't issue signed overflow warnings. */
4685 fold_defer_overflow_warnings ();
4686
4687 /* Compute the number of iterations before we reach the bound of the
4688 type, and verify that the loop is exited before this occurs. */
4689 unsigned_type = unsigned_type_for (type);
4690 unsigned_base = fold_convert (unsigned_type, base);
4691
4692 if (tree_int_cst_sign_bit (step))
4693 {
4694 tree extreme = fold_convert (unsigned_type,
4695 lower_bound_in_type (type, type));
4696 delta = fold_build2 (MINUS_EXPR, unsigned_type, unsigned_base, extreme);
4697 step_abs = fold_build1 (NEGATE_EXPR, unsigned_type,
4698 fold_convert (unsigned_type, step));
4699 }
4700 else
4701 {
4702 tree extreme = fold_convert (unsigned_type,
4703 upper_bound_in_type (type, type));
4704 delta = fold_build2 (MINUS_EXPR, unsigned_type, extreme, unsigned_base);
4705 step_abs = fold_convert (unsigned_type, step);
4706 }
4707
4708 valid_niter = fold_build2 (FLOOR_DIV_EXPR, unsigned_type, delta, step_abs);
4709
4710 estimate_numbers_of_iterations (loop);
4711
4712 if (max_loop_iterations (loop, &niter)
4713 && wi::fits_to_tree_p (niter, TREE_TYPE (valid_niter))
4714 && (e = fold_binary (GT_EXPR, boolean_type_node, valid_niter,
4715 wide_int_to_tree (TREE_TYPE (valid_niter),
4716 niter))) != NULL
4717 && integer_nonzerop (e))
4718 {
4719 fold_undefer_and_ignore_overflow_warnings ();
4720 return true;
4721 }
4722 if (at_stmt)
4723 for (bound = loop->bounds; bound; bound = bound->next)
4724 {
4725 if (n_of_executions_at_most (at_stmt, bound, valid_niter))
4726 {
4727 fold_undefer_and_ignore_overflow_warnings ();
4728 return true;
4729 }
4730 }
4731 fold_undefer_and_ignore_overflow_warnings ();
4732
4733 /* Try to prove loop is exited before {base, step} overflows with the
4734 help of analyzed loop control IV. This is done only for IVs with
4735 constant step because otherwise we don't have the information. */
4736 if (TREE_CODE (step) == INTEGER_CST)
4737 {
4738 for (civ = loop->control_ivs; civ; civ = civ->next)
4739 {
4740 enum tree_code code;
4741 tree civ_type = TREE_TYPE (civ->step);
4742
4743 /* Have to consider type difference because operand_equal_p ignores
4744 that for constants. */
4745 if (TYPE_UNSIGNED (type) != TYPE_UNSIGNED (civ_type)
4746 || element_precision (type) != element_precision (civ_type))
4747 continue;
4748
4749 /* Only consider control IV with same step. */
4750 if (!operand_equal_p (step, civ->step, 0))
4751 continue;
4752
4753 /* Done proving if this is a no-overflow control IV. */
4754 if (operand_equal_p (base, civ->base, 0))
4755 return true;
4756
4757 /* Control IV is recorded after expanding simple operations,
4758 Here we expand base and compare it too. */
4759 tree expanded_base = expand_simple_operations (base);
4760 if (operand_equal_p (expanded_base, civ->base, 0))
4761 return true;
4762
4763 /* If this is a before stepping control IV, in other words, we have
4764
4765 {civ_base, step} = {base + step, step}
4766
4767 Because civ {base + step, step} doesn't overflow during loop
4768 iterations, {base, step} will not overflow if we can prove the
4769 operation "base + step" does not overflow. Specifically, we try
4770 to prove below conditions are satisfied:
4771
4772 base <= UPPER_BOUND (type) - step ;;step > 0
4773 base >= LOWER_BOUND (type) - step ;;step < 0
4774
4775 by proving the reverse conditions are false using loop's initial
4776 condition. */
4777 if (POINTER_TYPE_P (TREE_TYPE (base)))
4778 code = POINTER_PLUS_EXPR;
4779 else
4780 code = PLUS_EXPR;
4781
4782 tree stepped = fold_build2 (code, TREE_TYPE (base), base, step);
4783 tree expanded_stepped = fold_build2 (code, TREE_TYPE (base),
4784 expanded_base, step);
4785 if (operand_equal_p (stepped, civ->base, 0)
4786 || operand_equal_p (expanded_stepped, civ->base, 0))
4787 {
4788 tree extreme;
4789
4790 if (tree_int_cst_sign_bit (step))
4791 {
4792 code = LT_EXPR;
4793 extreme = lower_bound_in_type (type, type);
4794 }
4795 else
4796 {
4797 code = GT_EXPR;
4798 extreme = upper_bound_in_type (type, type);
4799 }
4800 extreme = fold_build2 (MINUS_EXPR, type, extreme, step);
4801 e = fold_build2 (code, boolean_type_node, base, extreme);
4802 e = simplify_using_initial_conditions (loop, e);
4803 if (integer_zerop (e))
4804 return true;
4805 }
4806 }
4807 }
4808
4809 return false;
4810 }
4811
4812 /* VAR is scev variable whose evolution part is constant STEP, this function
4813 proves that VAR can't overflow by using value range info. If VAR's value
4814 range is [MIN, MAX], it can be proven by:
4815 MAX + step doesn't overflow ; if step > 0
4816 or
4817 MIN + step doesn't underflow ; if step < 0.
4818
4819 We can only do this if var is computed in every loop iteration, i.e, var's
4820 definition has to dominate loop latch. Consider below example:
4821
4822 {
4823 unsigned int i;
4824
4825 <bb 3>:
4826
4827 <bb 4>:
4828 # RANGE [0, 4294967294] NONZERO 65535
4829 # i_21 = PHI <0(3), i_18(9)>
4830 if (i_21 != 0)
4831 goto <bb 6>;
4832 else
4833 goto <bb 8>;
4834
4835 <bb 6>:
4836 # RANGE [0, 65533] NONZERO 65535
4837 _6 = i_21 + 4294967295;
4838 # RANGE [0, 65533] NONZERO 65535
4839 _7 = (long unsigned int) _6;
4840 # RANGE [0, 524264] NONZERO 524280
4841 _8 = _7 * 8;
4842 # PT = nonlocal escaped
4843 _9 = a_14 + _8;
4844 *_9 = 0;
4845
4846 <bb 8>:
4847 # RANGE [1, 65535] NONZERO 65535
4848 i_18 = i_21 + 1;
4849 if (i_18 >= 65535)
4850 goto <bb 10>;
4851 else
4852 goto <bb 9>;
4853
4854 <bb 9>:
4855 goto <bb 4>;
4856
4857 <bb 10>:
4858 return;
4859 }
4860
4861 VAR _6 doesn't overflow only with pre-condition (i_21 != 0), here we
4862 can't use _6 to prove no-overlfow for _7. In fact, var _7 takes value
4863 sequence (4294967295, 0, 1, ..., 65533) in loop life time, rather than
4864 (4294967295, 4294967296, ...). */
4865
4866 static bool
4867 scev_var_range_cant_overflow (tree var, tree step, class loop *loop)
4868 {
4869 tree type;
4870 wide_int minv, maxv, diff, step_wi;
4871 enum value_range_kind rtype;
4872
4873 if (TREE_CODE (step) != INTEGER_CST || !INTEGRAL_TYPE_P (TREE_TYPE (var)))
4874 return false;
4875
4876 /* Check if VAR evaluates in every loop iteration. It's not the case
4877 if VAR is default definition or does not dominate loop's latch. */
4878 basic_block def_bb = gimple_bb (SSA_NAME_DEF_STMT (var));
4879 if (!def_bb || !dominated_by_p (CDI_DOMINATORS, loop->latch, def_bb))
4880 return false;
4881
4882 rtype = get_range_info (var, &minv, &maxv);
4883 if (rtype != VR_RANGE)
4884 return false;
4885
4886 /* VAR is a scev whose evolution part is STEP and value range info
4887 is [MIN, MAX], we can prove its no-overflowness by conditions:
4888
4889 type_MAX - MAX >= step ; if step > 0
4890 MIN - type_MIN >= |step| ; if step < 0.
4891
4892 Or VAR must take value outside of value range, which is not true. */
4893 step_wi = wi::to_wide (step);
4894 type = TREE_TYPE (var);
4895 if (tree_int_cst_sign_bit (step))
4896 {
4897 diff = minv - wi::to_wide (lower_bound_in_type (type, type));
4898 step_wi = - step_wi;
4899 }
4900 else
4901 diff = wi::to_wide (upper_bound_in_type (type, type)) - maxv;
4902
4903 return (wi::geu_p (diff, step_wi));
4904 }
4905
4906 /* Return false only when the induction variable BASE + STEP * I is
4907 known to not overflow: i.e. when the number of iterations is small
4908 enough with respect to the step and initial condition in order to
4909 keep the evolution confined in TYPEs bounds. Return true when the
4910 iv is known to overflow or when the property is not computable.
4911
4912 USE_OVERFLOW_SEMANTICS is true if this function should assume that
4913 the rules for overflow of the given language apply (e.g., that signed
4914 arithmetics in C does not overflow).
4915
4916 If VAR is a ssa variable, this function also returns false if VAR can
4917 be proven not overflow with value range info. */
4918
4919 bool
4920 scev_probably_wraps_p (tree var, tree base, tree step,
4921 gimple *at_stmt, class loop *loop,
4922 bool use_overflow_semantics)
4923 {
4924 /* FIXME: We really need something like
4925 http://gcc.gnu.org/ml/gcc-patches/2005-06/msg02025.html.
4926
4927 We used to test for the following situation that frequently appears
4928 during address arithmetics:
4929
4930 D.1621_13 = (long unsigned intD.4) D.1620_12;
4931 D.1622_14 = D.1621_13 * 8;
4932 D.1623_15 = (doubleD.29 *) D.1622_14;
4933
4934 And derived that the sequence corresponding to D_14
4935 can be proved to not wrap because it is used for computing a
4936 memory access; however, this is not really the case -- for example,
4937 if D_12 = (unsigned char) [254,+,1], then D_14 has values
4938 2032, 2040, 0, 8, ..., but the code is still legal. */
4939
4940 if (chrec_contains_undetermined (base)
4941 || chrec_contains_undetermined (step))
4942 return true;
4943
4944 if (integer_zerop (step))
4945 return false;
4946
4947 /* If we can use the fact that signed and pointer arithmetics does not
4948 wrap, we are done. */
4949 if (use_overflow_semantics && nowrap_type_p (TREE_TYPE (base)))
4950 return false;
4951
4952 /* To be able to use estimates on number of iterations of the loop,
4953 we must have an upper bound on the absolute value of the step. */
4954 if (TREE_CODE (step) != INTEGER_CST)
4955 return true;
4956
4957 /* Check if var can be proven not overflow with value range info. */
4958 if (var && TREE_CODE (var) == SSA_NAME
4959 && scev_var_range_cant_overflow (var, step, loop))
4960 return false;
4961
4962 if (loop_exits_before_overflow (base, step, at_stmt, loop))
4963 return false;
4964
4965 /* At this point we still don't have a proof that the iv does not
4966 overflow: give up. */
4967 return true;
4968 }
4969
4970 /* Frees the information on upper bounds on numbers of iterations of LOOP. */
4971
4972 void
4973 free_numbers_of_iterations_estimates (class loop *loop)
4974 {
4975 struct control_iv *civ;
4976 class nb_iter_bound *bound;
4977
4978 loop->nb_iterations = NULL;
4979 loop->estimate_state = EST_NOT_COMPUTED;
4980 for (bound = loop->bounds; bound;)
4981 {
4982 class nb_iter_bound *next = bound->next;
4983 ggc_free (bound);
4984 bound = next;
4985 }
4986 loop->bounds = NULL;
4987
4988 for (civ = loop->control_ivs; civ;)
4989 {
4990 struct control_iv *next = civ->next;
4991 ggc_free (civ);
4992 civ = next;
4993 }
4994 loop->control_ivs = NULL;
4995 }
4996
4997 /* Frees the information on upper bounds on numbers of iterations of loops. */
4998
4999 void
5000 free_numbers_of_iterations_estimates (function *fn)
5001 {
5002 class loop *loop;
5003
5004 FOR_EACH_LOOP_FN (fn, loop, 0)
5005 free_numbers_of_iterations_estimates (loop);
5006 }
5007
5008 /* Substitute value VAL for ssa name NAME inside expressions held
5009 at LOOP. */
5010
5011 void
5012 substitute_in_loop_info (class loop *loop, tree name, tree val)
5013 {
5014 loop->nb_iterations = simplify_replace_tree (loop->nb_iterations, name, val);
5015 }