tree-vrp (extract_range_from_binary_expr_1): Treat all divisions by zero as VR_UNDEFINED.
[gcc.git] / gcc / tree-vrp.c
1 /* Support routines for Value Range Propagation (VRP).
2 Copyright (C) 2005-2018 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "insn-codes.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "gimple.h"
29 #include "cfghooks.h"
30 #include "tree-pass.h"
31 #include "ssa.h"
32 #include "optabs-tree.h"
33 #include "gimple-pretty-print.h"
34 #include "diagnostic-core.h"
35 #include "flags.h"
36 #include "fold-const.h"
37 #include "stor-layout.h"
38 #include "calls.h"
39 #include "cfganal.h"
40 #include "gimple-fold.h"
41 #include "tree-eh.h"
42 #include "gimple-iterator.h"
43 #include "gimple-walk.h"
44 #include "tree-cfg.h"
45 #include "tree-dfa.h"
46 #include "tree-ssa-loop-manip.h"
47 #include "tree-ssa-loop-niter.h"
48 #include "tree-ssa-loop.h"
49 #include "tree-into-ssa.h"
50 #include "tree-ssa.h"
51 #include "intl.h"
52 #include "cfgloop.h"
53 #include "tree-scalar-evolution.h"
54 #include "tree-ssa-propagate.h"
55 #include "tree-chrec.h"
56 #include "tree-ssa-threadupdate.h"
57 #include "tree-ssa-scopedtables.h"
58 #include "tree-ssa-threadedge.h"
59 #include "omp-general.h"
60 #include "target.h"
61 #include "case-cfn-macros.h"
62 #include "params.h"
63 #include "alloc-pool.h"
64 #include "domwalk.h"
65 #include "tree-cfgcleanup.h"
66 #include "stringpool.h"
67 #include "attribs.h"
68 #include "vr-values.h"
69 #include "builtins.h"
70 #include "wide-int-range.h"
71
72 /* Set of SSA names found live during the RPO traversal of the function
73 for still active basic-blocks. */
74 static sbitmap *live;
75
76 /* Return true if the SSA name NAME is live on the edge E. */
77
78 static bool
79 live_on_edge (edge e, tree name)
80 {
81 return (live[e->dest->index]
82 && bitmap_bit_p (live[e->dest->index], SSA_NAME_VERSION (name)));
83 }
84
85 /* Location information for ASSERT_EXPRs. Each instance of this
86 structure describes an ASSERT_EXPR for an SSA name. Since a single
87 SSA name may have more than one assertion associated with it, these
88 locations are kept in a linked list attached to the corresponding
89 SSA name. */
90 struct assert_locus
91 {
92 /* Basic block where the assertion would be inserted. */
93 basic_block bb;
94
95 /* Some assertions need to be inserted on an edge (e.g., assertions
96 generated by COND_EXPRs). In those cases, BB will be NULL. */
97 edge e;
98
99 /* Pointer to the statement that generated this assertion. */
100 gimple_stmt_iterator si;
101
102 /* Predicate code for the ASSERT_EXPR. Must be COMPARISON_CLASS_P. */
103 enum tree_code comp_code;
104
105 /* Value being compared against. */
106 tree val;
107
108 /* Expression to compare. */
109 tree expr;
110
111 /* Next node in the linked list. */
112 assert_locus *next;
113 };
114
115 /* If bit I is present, it means that SSA name N_i has a list of
116 assertions that should be inserted in the IL. */
117 static bitmap need_assert_for;
118
119 /* Array of locations lists where to insert assertions. ASSERTS_FOR[I]
120 holds a list of ASSERT_LOCUS_T nodes that describe where
121 ASSERT_EXPRs for SSA name N_I should be inserted. */
122 static assert_locus **asserts_for;
123
124 vec<edge> to_remove_edges;
125 vec<switch_update> to_update_switch_stmts;
126
127
128 /* Return the maximum value for TYPE. */
129
130 tree
131 vrp_val_max (const_tree type)
132 {
133 if (!INTEGRAL_TYPE_P (type))
134 return NULL_TREE;
135
136 return TYPE_MAX_VALUE (type);
137 }
138
139 /* Return the minimum value for TYPE. */
140
141 tree
142 vrp_val_min (const_tree type)
143 {
144 if (!INTEGRAL_TYPE_P (type))
145 return NULL_TREE;
146
147 return TYPE_MIN_VALUE (type);
148 }
149
150 /* Return whether VAL is equal to the maximum value of its type.
151 We can't do a simple equality comparison with TYPE_MAX_VALUE because
152 C typedefs and Ada subtypes can produce types whose TYPE_MAX_VALUE
153 is not == to the integer constant with the same value in the type. */
154
155 bool
156 vrp_val_is_max (const_tree val)
157 {
158 tree type_max = vrp_val_max (TREE_TYPE (val));
159 return (val == type_max
160 || (type_max != NULL_TREE
161 && operand_equal_p (val, type_max, 0)));
162 }
163
164 /* Return whether VAL is equal to the minimum value of its type. */
165
166 bool
167 vrp_val_is_min (const_tree val)
168 {
169 tree type_min = vrp_val_min (TREE_TYPE (val));
170 return (val == type_min
171 || (type_min != NULL_TREE
172 && operand_equal_p (val, type_min, 0)));
173 }
174
175 /* VR_TYPE describes a range with mininum value *MIN and maximum
176 value *MAX. Restrict the range to the set of values that have
177 no bits set outside NONZERO_BITS. Update *MIN and *MAX and
178 return the new range type.
179
180 SGN gives the sign of the values described by the range. */
181
182 enum value_range_type
183 intersect_range_with_nonzero_bits (enum value_range_type vr_type,
184 wide_int *min, wide_int *max,
185 const wide_int &nonzero_bits,
186 signop sgn)
187 {
188 if (vr_type == VR_ANTI_RANGE)
189 {
190 /* The VR_ANTI_RANGE is equivalent to the union of the ranges
191 A: [-INF, *MIN) and B: (*MAX, +INF]. First use NONZERO_BITS
192 to create an inclusive upper bound for A and an inclusive lower
193 bound for B. */
194 wide_int a_max = wi::round_down_for_mask (*min - 1, nonzero_bits);
195 wide_int b_min = wi::round_up_for_mask (*max + 1, nonzero_bits);
196
197 /* If the calculation of A_MAX wrapped, A is effectively empty
198 and A_MAX is the highest value that satisfies NONZERO_BITS.
199 Likewise if the calculation of B_MIN wrapped, B is effectively
200 empty and B_MIN is the lowest value that satisfies NONZERO_BITS. */
201 bool a_empty = wi::ge_p (a_max, *min, sgn);
202 bool b_empty = wi::le_p (b_min, *max, sgn);
203
204 /* If both A and B are empty, there are no valid values. */
205 if (a_empty && b_empty)
206 return VR_UNDEFINED;
207
208 /* If exactly one of A or B is empty, return a VR_RANGE for the
209 other one. */
210 if (a_empty || b_empty)
211 {
212 *min = b_min;
213 *max = a_max;
214 gcc_checking_assert (wi::le_p (*min, *max, sgn));
215 return VR_RANGE;
216 }
217
218 /* Update the VR_ANTI_RANGE bounds. */
219 *min = a_max + 1;
220 *max = b_min - 1;
221 gcc_checking_assert (wi::le_p (*min, *max, sgn));
222
223 /* Now check whether the excluded range includes any values that
224 satisfy NONZERO_BITS. If not, switch to a full VR_RANGE. */
225 if (wi::round_up_for_mask (*min, nonzero_bits) == b_min)
226 {
227 unsigned int precision = min->get_precision ();
228 *min = wi::min_value (precision, sgn);
229 *max = wi::max_value (precision, sgn);
230 vr_type = VR_RANGE;
231 }
232 }
233 if (vr_type == VR_RANGE)
234 {
235 *max = wi::round_down_for_mask (*max, nonzero_bits);
236
237 /* Check that the range contains at least one valid value. */
238 if (wi::gt_p (*min, *max, sgn))
239 return VR_UNDEFINED;
240
241 *min = wi::round_up_for_mask (*min, nonzero_bits);
242 gcc_checking_assert (wi::le_p (*min, *max, sgn));
243 }
244 return vr_type;
245 }
246
247 /* Set value range VR to VR_UNDEFINED. */
248
249 static inline void
250 set_value_range_to_undefined (value_range *vr)
251 {
252 vr->type = VR_UNDEFINED;
253 vr->min = vr->max = NULL_TREE;
254 if (vr->equiv)
255 bitmap_clear (vr->equiv);
256 }
257
258 /* Set value range VR to VR_VARYING. */
259
260 void
261 set_value_range_to_varying (value_range *vr)
262 {
263 vr->type = VR_VARYING;
264 vr->min = vr->max = NULL_TREE;
265 if (vr->equiv)
266 bitmap_clear (vr->equiv);
267 }
268
269 /* Set value range VR to {T, MIN, MAX, EQUIV}. */
270
271 void
272 set_value_range (value_range *vr, enum value_range_type t, tree min,
273 tree max, bitmap equiv)
274 {
275 /* Check the validity of the range. */
276 if (flag_checking
277 && (t == VR_RANGE || t == VR_ANTI_RANGE))
278 {
279 int cmp;
280
281 gcc_assert (min && max);
282
283 gcc_assert (!TREE_OVERFLOW_P (min) && !TREE_OVERFLOW_P (max));
284
285 if (INTEGRAL_TYPE_P (TREE_TYPE (min)) && t == VR_ANTI_RANGE)
286 gcc_assert (!vrp_val_is_min (min) || !vrp_val_is_max (max));
287
288 cmp = compare_values (min, max);
289 gcc_assert (cmp == 0 || cmp == -1 || cmp == -2);
290 }
291
292 if (flag_checking
293 && (t == VR_UNDEFINED || t == VR_VARYING))
294 {
295 gcc_assert (min == NULL_TREE && max == NULL_TREE);
296 gcc_assert (equiv == NULL || bitmap_empty_p (equiv));
297 }
298
299 vr->type = t;
300 vr->min = min;
301 vr->max = max;
302
303 /* Since updating the equivalence set involves deep copying the
304 bitmaps, only do it if absolutely necessary.
305
306 All equivalence bitmaps are allocated from the same obstack. So
307 we can use the obstack associated with EQUIV to allocate vr->equiv. */
308 if (vr->equiv == NULL
309 && equiv != NULL)
310 vr->equiv = BITMAP_ALLOC (equiv->obstack);
311
312 if (equiv != vr->equiv)
313 {
314 if (equiv && !bitmap_empty_p (equiv))
315 bitmap_copy (vr->equiv, equiv);
316 else
317 bitmap_clear (vr->equiv);
318 }
319 }
320
321
322 /* Set value range VR to the canonical form of {T, MIN, MAX, EQUIV}.
323 This means adjusting T, MIN and MAX representing the case of a
324 wrapping range with MAX < MIN covering [MIN, type_max] U [type_min, MAX]
325 as anti-rage ~[MAX+1, MIN-1]. Likewise for wrapping anti-ranges.
326 In corner cases where MAX+1 or MIN-1 wraps this will fall back
327 to varying.
328 This routine exists to ease canonicalization in the case where we
329 extract ranges from var + CST op limit. */
330
331 void
332 set_and_canonicalize_value_range (value_range *vr, enum value_range_type t,
333 tree min, tree max, bitmap equiv)
334 {
335 /* Use the canonical setters for VR_UNDEFINED and VR_VARYING. */
336 if (t == VR_UNDEFINED)
337 {
338 set_value_range_to_undefined (vr);
339 return;
340 }
341 else if (t == VR_VARYING)
342 {
343 set_value_range_to_varying (vr);
344 return;
345 }
346
347 /* Nothing to canonicalize for symbolic ranges. */
348 if (TREE_CODE (min) != INTEGER_CST
349 || TREE_CODE (max) != INTEGER_CST)
350 {
351 set_value_range (vr, t, min, max, equiv);
352 return;
353 }
354
355 /* Wrong order for min and max, to swap them and the VR type we need
356 to adjust them. */
357 if (tree_int_cst_lt (max, min))
358 {
359 tree one, tmp;
360
361 /* For one bit precision if max < min, then the swapped
362 range covers all values, so for VR_RANGE it is varying and
363 for VR_ANTI_RANGE empty range, so drop to varying as well. */
364 if (TYPE_PRECISION (TREE_TYPE (min)) == 1)
365 {
366 set_value_range_to_varying (vr);
367 return;
368 }
369
370 one = build_int_cst (TREE_TYPE (min), 1);
371 tmp = int_const_binop (PLUS_EXPR, max, one);
372 max = int_const_binop (MINUS_EXPR, min, one);
373 min = tmp;
374
375 /* There's one corner case, if we had [C+1, C] before we now have
376 that again. But this represents an empty value range, so drop
377 to varying in this case. */
378 if (tree_int_cst_lt (max, min))
379 {
380 set_value_range_to_varying (vr);
381 return;
382 }
383
384 t = t == VR_RANGE ? VR_ANTI_RANGE : VR_RANGE;
385 }
386
387 /* Anti-ranges that can be represented as ranges should be so. */
388 if (t == VR_ANTI_RANGE)
389 {
390 /* For -fstrict-enums we may receive out-of-range ranges so consider
391 values < -INF and values > INF as -INF/INF as well. */
392 tree type = TREE_TYPE (min);
393 bool is_min = (INTEGRAL_TYPE_P (type)
394 && tree_int_cst_compare (min, TYPE_MIN_VALUE (type)) <= 0);
395 bool is_max = (INTEGRAL_TYPE_P (type)
396 && tree_int_cst_compare (max, TYPE_MAX_VALUE (type)) >= 0);
397
398 if (is_min && is_max)
399 {
400 /* We cannot deal with empty ranges, drop to varying.
401 ??? This could be VR_UNDEFINED instead. */
402 set_value_range_to_varying (vr);
403 return;
404 }
405 else if (TYPE_PRECISION (TREE_TYPE (min)) == 1
406 && (is_min || is_max))
407 {
408 /* Non-empty boolean ranges can always be represented
409 as a singleton range. */
410 if (is_min)
411 min = max = vrp_val_max (TREE_TYPE (min));
412 else
413 min = max = vrp_val_min (TREE_TYPE (min));
414 t = VR_RANGE;
415 }
416 else if (is_min
417 /* As a special exception preserve non-null ranges. */
418 && !(TYPE_UNSIGNED (TREE_TYPE (min))
419 && integer_zerop (max)))
420 {
421 tree one = build_int_cst (TREE_TYPE (max), 1);
422 min = int_const_binop (PLUS_EXPR, max, one);
423 max = vrp_val_max (TREE_TYPE (max));
424 t = VR_RANGE;
425 }
426 else if (is_max)
427 {
428 tree one = build_int_cst (TREE_TYPE (min), 1);
429 max = int_const_binop (MINUS_EXPR, min, one);
430 min = vrp_val_min (TREE_TYPE (min));
431 t = VR_RANGE;
432 }
433 }
434
435 /* Do not drop [-INF(OVF), +INF(OVF)] to varying. (OVF) has to be sticky
436 to make sure VRP iteration terminates, otherwise we can get into
437 oscillations. */
438
439 set_value_range (vr, t, min, max, equiv);
440 }
441
442 /* Copy value range FROM into value range TO. */
443
444 void
445 copy_value_range (value_range *to, const value_range *from)
446 {
447 set_value_range (to, from->type, from->min, from->max, from->equiv);
448 }
449
450 /* Set value range VR to a single value. This function is only called
451 with values we get from statements, and exists to clear the
452 TREE_OVERFLOW flag. */
453
454 void
455 set_value_range_to_value (value_range *vr, tree val, bitmap equiv)
456 {
457 gcc_assert (is_gimple_min_invariant (val));
458 if (TREE_OVERFLOW_P (val))
459 val = drop_tree_overflow (val);
460 set_value_range (vr, VR_RANGE, val, val, equiv);
461 }
462
463 /* Set value range VR to a non-NULL range of type TYPE. */
464
465 void
466 set_value_range_to_nonnull (value_range *vr, tree type)
467 {
468 tree zero = build_int_cst (type, 0);
469 set_value_range (vr, VR_ANTI_RANGE, zero, zero, vr->equiv);
470 }
471
472
473 /* Set value range VR to a NULL range of type TYPE. */
474
475 void
476 set_value_range_to_null (value_range *vr, tree type)
477 {
478 set_value_range_to_value (vr, build_int_cst (type, 0), vr->equiv);
479 }
480
481 /* Return true, if VAL1 and VAL2 are equal values for VRP purposes. */
482
483 bool
484 vrp_operand_equal_p (const_tree val1, const_tree val2)
485 {
486 if (val1 == val2)
487 return true;
488 if (!val1 || !val2 || !operand_equal_p (val1, val2, 0))
489 return false;
490 return true;
491 }
492
493 /* Return true, if the bitmaps B1 and B2 are equal. */
494
495 bool
496 vrp_bitmap_equal_p (const_bitmap b1, const_bitmap b2)
497 {
498 return (b1 == b2
499 || ((!b1 || bitmap_empty_p (b1))
500 && (!b2 || bitmap_empty_p (b2)))
501 || (b1 && b2
502 && bitmap_equal_p (b1, b2)));
503 }
504
505 /* Return true if VR is [0, 0]. */
506
507 static inline bool
508 range_is_null (const value_range *vr)
509 {
510 return vr->type == VR_RANGE
511 && integer_zerop (vr->min)
512 && integer_zerop (vr->max);
513 }
514
515 /* Return true if max and min of VR are INTEGER_CST. It's not necessary
516 a singleton. */
517
518 bool
519 range_int_cst_p (const value_range *vr)
520 {
521 return (vr->type == VR_RANGE
522 && TREE_CODE (vr->max) == INTEGER_CST
523 && TREE_CODE (vr->min) == INTEGER_CST);
524 }
525
526 /* Return true if VR is a INTEGER_CST singleton. */
527
528 bool
529 range_int_cst_singleton_p (const value_range *vr)
530 {
531 return (range_int_cst_p (vr)
532 && tree_int_cst_equal (vr->min, vr->max));
533 }
534
535 /* Return true if value range VR involves at least one symbol. */
536
537 bool
538 symbolic_range_p (const value_range *vr)
539 {
540 return (!is_gimple_min_invariant (vr->min)
541 || !is_gimple_min_invariant (vr->max));
542 }
543
544 /* Return the single symbol (an SSA_NAME) contained in T if any, or NULL_TREE
545 otherwise. We only handle additive operations and set NEG to true if the
546 symbol is negated and INV to the invariant part, if any. */
547
548 tree
549 get_single_symbol (tree t, bool *neg, tree *inv)
550 {
551 bool neg_;
552 tree inv_;
553
554 *inv = NULL_TREE;
555 *neg = false;
556
557 if (TREE_CODE (t) == PLUS_EXPR
558 || TREE_CODE (t) == POINTER_PLUS_EXPR
559 || TREE_CODE (t) == MINUS_EXPR)
560 {
561 if (is_gimple_min_invariant (TREE_OPERAND (t, 0)))
562 {
563 neg_ = (TREE_CODE (t) == MINUS_EXPR);
564 inv_ = TREE_OPERAND (t, 0);
565 t = TREE_OPERAND (t, 1);
566 }
567 else if (is_gimple_min_invariant (TREE_OPERAND (t, 1)))
568 {
569 neg_ = false;
570 inv_ = TREE_OPERAND (t, 1);
571 t = TREE_OPERAND (t, 0);
572 }
573 else
574 return NULL_TREE;
575 }
576 else
577 {
578 neg_ = false;
579 inv_ = NULL_TREE;
580 }
581
582 if (TREE_CODE (t) == NEGATE_EXPR)
583 {
584 t = TREE_OPERAND (t, 0);
585 neg_ = !neg_;
586 }
587
588 if (TREE_CODE (t) != SSA_NAME)
589 return NULL_TREE;
590
591 if (inv_ && TREE_OVERFLOW_P (inv_))
592 inv_ = drop_tree_overflow (inv_);
593
594 *neg = neg_;
595 *inv = inv_;
596 return t;
597 }
598
599 /* The reverse operation: build a symbolic expression with TYPE
600 from symbol SYM, negated according to NEG, and invariant INV. */
601
602 static tree
603 build_symbolic_expr (tree type, tree sym, bool neg, tree inv)
604 {
605 const bool pointer_p = POINTER_TYPE_P (type);
606 tree t = sym;
607
608 if (neg)
609 t = build1 (NEGATE_EXPR, type, t);
610
611 if (integer_zerop (inv))
612 return t;
613
614 return build2 (pointer_p ? POINTER_PLUS_EXPR : PLUS_EXPR, type, t, inv);
615 }
616
617 /* Return
618 1 if VAL < VAL2
619 0 if !(VAL < VAL2)
620 -2 if those are incomparable. */
621 int
622 operand_less_p (tree val, tree val2)
623 {
624 /* LT is folded faster than GE and others. Inline the common case. */
625 if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
626 return tree_int_cst_lt (val, val2);
627 else
628 {
629 tree tcmp;
630
631 fold_defer_overflow_warnings ();
632
633 tcmp = fold_binary_to_constant (LT_EXPR, boolean_type_node, val, val2);
634
635 fold_undefer_and_ignore_overflow_warnings ();
636
637 if (!tcmp
638 || TREE_CODE (tcmp) != INTEGER_CST)
639 return -2;
640
641 if (!integer_zerop (tcmp))
642 return 1;
643 }
644
645 return 0;
646 }
647
648 /* Compare two values VAL1 and VAL2. Return
649
650 -2 if VAL1 and VAL2 cannot be compared at compile-time,
651 -1 if VAL1 < VAL2,
652 0 if VAL1 == VAL2,
653 +1 if VAL1 > VAL2, and
654 +2 if VAL1 != VAL2
655
656 This is similar to tree_int_cst_compare but supports pointer values
657 and values that cannot be compared at compile time.
658
659 If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to
660 true if the return value is only valid if we assume that signed
661 overflow is undefined. */
662
663 int
664 compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p)
665 {
666 if (val1 == val2)
667 return 0;
668
669 /* Below we rely on the fact that VAL1 and VAL2 are both pointers or
670 both integers. */
671 gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1))
672 == POINTER_TYPE_P (TREE_TYPE (val2)));
673
674 /* Convert the two values into the same type. This is needed because
675 sizetype causes sign extension even for unsigned types. */
676 val2 = fold_convert (TREE_TYPE (val1), val2);
677 STRIP_USELESS_TYPE_CONVERSION (val2);
678
679 const bool overflow_undefined
680 = INTEGRAL_TYPE_P (TREE_TYPE (val1))
681 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1));
682 tree inv1, inv2;
683 bool neg1, neg2;
684 tree sym1 = get_single_symbol (val1, &neg1, &inv1);
685 tree sym2 = get_single_symbol (val2, &neg2, &inv2);
686
687 /* If VAL1 and VAL2 are of the form '[-]NAME [+ CST]', return -1 or +1
688 accordingly. If VAL1 and VAL2 don't use the same name, return -2. */
689 if (sym1 && sym2)
690 {
691 /* Both values must use the same name with the same sign. */
692 if (sym1 != sym2 || neg1 != neg2)
693 return -2;
694
695 /* [-]NAME + CST == [-]NAME + CST. */
696 if (inv1 == inv2)
697 return 0;
698
699 /* If overflow is defined we cannot simplify more. */
700 if (!overflow_undefined)
701 return -2;
702
703 if (strict_overflow_p != NULL
704 /* Symbolic range building sets TREE_NO_WARNING to declare
705 that overflow doesn't happen. */
706 && (!inv1 || !TREE_NO_WARNING (val1))
707 && (!inv2 || !TREE_NO_WARNING (val2)))
708 *strict_overflow_p = true;
709
710 if (!inv1)
711 inv1 = build_int_cst (TREE_TYPE (val1), 0);
712 if (!inv2)
713 inv2 = build_int_cst (TREE_TYPE (val2), 0);
714
715 return wi::cmp (wi::to_wide (inv1), wi::to_wide (inv2),
716 TYPE_SIGN (TREE_TYPE (val1)));
717 }
718
719 const bool cst1 = is_gimple_min_invariant (val1);
720 const bool cst2 = is_gimple_min_invariant (val2);
721
722 /* If one is of the form '[-]NAME + CST' and the other is constant, then
723 it might be possible to say something depending on the constants. */
724 if ((sym1 && inv1 && cst2) || (sym2 && inv2 && cst1))
725 {
726 if (!overflow_undefined)
727 return -2;
728
729 if (strict_overflow_p != NULL
730 /* Symbolic range building sets TREE_NO_WARNING to declare
731 that overflow doesn't happen. */
732 && (!sym1 || !TREE_NO_WARNING (val1))
733 && (!sym2 || !TREE_NO_WARNING (val2)))
734 *strict_overflow_p = true;
735
736 const signop sgn = TYPE_SIGN (TREE_TYPE (val1));
737 tree cst = cst1 ? val1 : val2;
738 tree inv = cst1 ? inv2 : inv1;
739
740 /* Compute the difference between the constants. If it overflows or
741 underflows, this means that we can trivially compare the NAME with
742 it and, consequently, the two values with each other. */
743 wide_int diff = wi::to_wide (cst) - wi::to_wide (inv);
744 if (wi::cmp (0, wi::to_wide (inv), sgn)
745 != wi::cmp (diff, wi::to_wide (cst), sgn))
746 {
747 const int res = wi::cmp (wi::to_wide (cst), wi::to_wide (inv), sgn);
748 return cst1 ? res : -res;
749 }
750
751 return -2;
752 }
753
754 /* We cannot say anything more for non-constants. */
755 if (!cst1 || !cst2)
756 return -2;
757
758 if (!POINTER_TYPE_P (TREE_TYPE (val1)))
759 {
760 /* We cannot compare overflowed values. */
761 if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2))
762 return -2;
763
764 if (TREE_CODE (val1) == INTEGER_CST
765 && TREE_CODE (val2) == INTEGER_CST)
766 return tree_int_cst_compare (val1, val2);
767
768 if (poly_int_tree_p (val1) && poly_int_tree_p (val2))
769 {
770 if (known_eq (wi::to_poly_widest (val1),
771 wi::to_poly_widest (val2)))
772 return 0;
773 if (known_lt (wi::to_poly_widest (val1),
774 wi::to_poly_widest (val2)))
775 return -1;
776 if (known_gt (wi::to_poly_widest (val1),
777 wi::to_poly_widest (val2)))
778 return 1;
779 }
780
781 return -2;
782 }
783 else
784 {
785 tree t;
786
787 /* First see if VAL1 and VAL2 are not the same. */
788 if (val1 == val2 || operand_equal_p (val1, val2, 0))
789 return 0;
790
791 /* If VAL1 is a lower address than VAL2, return -1. */
792 if (operand_less_p (val1, val2) == 1)
793 return -1;
794
795 /* If VAL1 is a higher address than VAL2, return +1. */
796 if (operand_less_p (val2, val1) == 1)
797 return 1;
798
799 /* If VAL1 is different than VAL2, return +2.
800 For integer constants we either have already returned -1 or 1
801 or they are equivalent. We still might succeed in proving
802 something about non-trivial operands. */
803 if (TREE_CODE (val1) != INTEGER_CST
804 || TREE_CODE (val2) != INTEGER_CST)
805 {
806 t = fold_binary_to_constant (NE_EXPR, boolean_type_node, val1, val2);
807 if (t && integer_onep (t))
808 return 2;
809 }
810
811 return -2;
812 }
813 }
814
815 /* Compare values like compare_values_warnv. */
816
817 int
818 compare_values (tree val1, tree val2)
819 {
820 bool sop;
821 return compare_values_warnv (val1, val2, &sop);
822 }
823
824
825 /* Return 1 if VAL is inside value range MIN <= VAL <= MAX,
826 0 if VAL is not inside [MIN, MAX],
827 -2 if we cannot tell either way.
828
829 Benchmark compile/20001226-1.c compilation time after changing this
830 function. */
831
832 int
833 value_inside_range (tree val, tree min, tree max)
834 {
835 int cmp1, cmp2;
836
837 cmp1 = operand_less_p (val, min);
838 if (cmp1 == -2)
839 return -2;
840 if (cmp1 == 1)
841 return 0;
842
843 cmp2 = operand_less_p (max, val);
844 if (cmp2 == -2)
845 return -2;
846
847 return !cmp2;
848 }
849
850
851 /* Return true if value ranges VR0 and VR1 have a non-empty
852 intersection.
853
854 Benchmark compile/20001226-1.c compilation time after changing this
855 function.
856 */
857
858 static inline bool
859 value_ranges_intersect_p (const value_range *vr0, const value_range *vr1)
860 {
861 /* The value ranges do not intersect if the maximum of the first range is
862 less than the minimum of the second range or vice versa.
863 When those relations are unknown, we can't do any better. */
864 if (operand_less_p (vr0->max, vr1->min) != 0)
865 return false;
866 if (operand_less_p (vr1->max, vr0->min) != 0)
867 return false;
868 return true;
869 }
870
871
872 /* Return TRUE if *VR includes the value zero. */
873
874 bool
875 range_includes_zero_p (const value_range *vr)
876 {
877 if (vr->type == VR_VARYING)
878 return true;
879
880 /* Ughh, we don't know. We choose not to optimize. */
881 if (vr->type == VR_UNDEFINED)
882 return true;
883
884 tree zero = build_int_cst (TREE_TYPE (vr->min), 0);
885 if (vr->type == VR_ANTI_RANGE)
886 {
887 int res = value_inside_range (zero, vr->min, vr->max);
888 return res == 0 || res == -2;
889 }
890 return value_inside_range (zero, vr->min, vr->max) != 0;
891 }
892
893 /* Return true if *VR is know to only contain nonnegative values. */
894
895 static inline bool
896 value_range_nonnegative_p (const value_range *vr)
897 {
898 /* Testing for VR_ANTI_RANGE is not useful here as any anti-range
899 which would return a useful value should be encoded as a
900 VR_RANGE. */
901 if (vr->type == VR_RANGE)
902 {
903 int result = compare_values (vr->min, integer_zero_node);
904 return (result == 0 || result == 1);
905 }
906
907 return false;
908 }
909
910 /* If *VR has a value rante that is a single constant value return that,
911 otherwise return NULL_TREE. */
912
913 tree
914 value_range_constant_singleton (const value_range *vr)
915 {
916 if (vr->type == VR_RANGE
917 && vrp_operand_equal_p (vr->min, vr->max)
918 && is_gimple_min_invariant (vr->min))
919 return vr->min;
920
921 return NULL_TREE;
922 }
923
924 /* Value range wrapper for wide_int_range_set_zero_nonzero_bits.
925
926 Compute MAY_BE_NONZERO and MUST_BE_NONZERO bit masks for range in VR.
927
928 Return TRUE if VR was a constant range and we were able to compute
929 the bit masks. */
930
931 bool
932 vrp_set_zero_nonzero_bits (const tree expr_type,
933 const value_range *vr,
934 wide_int *may_be_nonzero,
935 wide_int *must_be_nonzero)
936 {
937 if (!range_int_cst_p (vr))
938 {
939 *may_be_nonzero = wi::minus_one (TYPE_PRECISION (expr_type));
940 *must_be_nonzero = wi::zero (TYPE_PRECISION (expr_type));
941 return false;
942 }
943 wide_int_range_set_zero_nonzero_bits (TYPE_SIGN (expr_type),
944 wi::to_wide (vr->min),
945 wi::to_wide (vr->max),
946 *may_be_nonzero, *must_be_nonzero);
947 return true;
948 }
949
950 /* Create two value-ranges in *VR0 and *VR1 from the anti-range *AR
951 so that *VR0 U *VR1 == *AR. Returns true if that is possible,
952 false otherwise. If *AR can be represented with a single range
953 *VR1 will be VR_UNDEFINED. */
954
955 static bool
956 ranges_from_anti_range (const value_range *ar,
957 value_range *vr0, value_range *vr1)
958 {
959 tree type = TREE_TYPE (ar->min);
960
961 vr0->type = VR_UNDEFINED;
962 vr1->type = VR_UNDEFINED;
963
964 /* As a future improvement, we could handle ~[0, A] as: [-INF, -1] U
965 [A+1, +INF]. Not sure if this helps in practice, though. */
966
967 if (ar->type != VR_ANTI_RANGE
968 || TREE_CODE (ar->min) != INTEGER_CST
969 || TREE_CODE (ar->max) != INTEGER_CST
970 || !vrp_val_min (type)
971 || !vrp_val_max (type))
972 return false;
973
974 if (!vrp_val_is_min (ar->min))
975 {
976 vr0->type = VR_RANGE;
977 vr0->min = vrp_val_min (type);
978 vr0->max = wide_int_to_tree (type, wi::to_wide (ar->min) - 1);
979 }
980 if (!vrp_val_is_max (ar->max))
981 {
982 vr1->type = VR_RANGE;
983 vr1->min = wide_int_to_tree (type, wi::to_wide (ar->max) + 1);
984 vr1->max = vrp_val_max (type);
985 }
986 if (vr0->type == VR_UNDEFINED)
987 {
988 *vr0 = *vr1;
989 vr1->type = VR_UNDEFINED;
990 }
991
992 return vr0->type != VR_UNDEFINED;
993 }
994
995 /* Extract the components of a value range into a pair of wide ints in
996 [WMIN, WMAX].
997
998 If the value range is anything but a VR_*RANGE of constants, the
999 resulting wide ints are set to [-MIN, +MAX] for the type. */
1000
1001 static void inline
1002 extract_range_into_wide_ints (const value_range *vr,
1003 signop sign, unsigned prec,
1004 wide_int &wmin, wide_int &wmax)
1005 {
1006 if ((vr->type == VR_RANGE
1007 || vr->type == VR_ANTI_RANGE)
1008 && TREE_CODE (vr->min) == INTEGER_CST
1009 && TREE_CODE (vr->max) == INTEGER_CST)
1010 {
1011 wmin = wi::to_wide (vr->min);
1012 wmax = wi::to_wide (vr->max);
1013 }
1014 else
1015 {
1016 wmin = wi::min_value (prec, sign);
1017 wmax = wi::max_value (prec, sign);
1018 }
1019 }
1020
1021 /* Value range wrapper for wide_int_range_shift_undefined_p. */
1022
1023 static inline bool
1024 vrp_shift_undefined_p (const value_range &shifter, unsigned prec)
1025 {
1026 tree type = TREE_TYPE (shifter.min);
1027 return wide_int_range_shift_undefined_p (TYPE_SIGN (type), prec,
1028 wi::to_wide (shifter.min),
1029 wi::to_wide (shifter.max));
1030 }
1031
1032 /* Value range wrapper for wide_int_range_multiplicative_op:
1033
1034 *VR = *VR0 .CODE. *VR1. */
1035
1036 static void
1037 extract_range_from_multiplicative_op (value_range *vr,
1038 enum tree_code code,
1039 const value_range *vr0,
1040 const value_range *vr1)
1041 {
1042 gcc_assert (code == MULT_EXPR
1043 || code == TRUNC_DIV_EXPR
1044 || code == FLOOR_DIV_EXPR
1045 || code == CEIL_DIV_EXPR
1046 || code == EXACT_DIV_EXPR
1047 || code == ROUND_DIV_EXPR
1048 || code == RSHIFT_EXPR
1049 || code == LSHIFT_EXPR);
1050 gcc_assert (vr0->type == VR_RANGE && vr0->type == vr1->type);
1051
1052 tree type = TREE_TYPE (vr0->min);
1053 wide_int res_lb, res_ub;
1054 wide_int vr0_lb = wi::to_wide (vr0->min);
1055 wide_int vr0_ub = wi::to_wide (vr0->max);
1056 wide_int vr1_lb = wi::to_wide (vr1->min);
1057 wide_int vr1_ub = wi::to_wide (vr1->max);
1058 bool overflow_undefined = TYPE_OVERFLOW_UNDEFINED (type);
1059 bool overflow_wraps = TYPE_OVERFLOW_WRAPS (type);
1060 unsigned prec = TYPE_PRECISION (type);
1061
1062 if (wide_int_range_multiplicative_op (res_lb, res_ub,
1063 code, TYPE_SIGN (type), prec,
1064 vr0_lb, vr0_ub, vr1_lb, vr1_ub,
1065 overflow_undefined, overflow_wraps))
1066 set_and_canonicalize_value_range (vr, VR_RANGE,
1067 wide_int_to_tree (type, res_lb),
1068 wide_int_to_tree (type, res_ub), NULL);
1069 else
1070 set_value_range_to_varying (vr);
1071 }
1072
1073 /* If BOUND will include a symbolic bound, adjust it accordingly,
1074 otherwise leave it as is.
1075
1076 CODE is the original operation that combined the bounds (PLUS_EXPR
1077 or MINUS_EXPR).
1078
1079 TYPE is the type of the original operation.
1080
1081 SYM_OPn is the symbolic for OPn if it has a symbolic.
1082
1083 NEG_OPn is TRUE if the OPn was negated. */
1084
1085 static void
1086 adjust_symbolic_bound (tree &bound, enum tree_code code, tree type,
1087 tree sym_op0, tree sym_op1,
1088 bool neg_op0, bool neg_op1)
1089 {
1090 bool minus_p = (code == MINUS_EXPR);
1091 /* If the result bound is constant, we're done; otherwise, build the
1092 symbolic lower bound. */
1093 if (sym_op0 == sym_op1)
1094 ;
1095 else if (sym_op0)
1096 bound = build_symbolic_expr (type, sym_op0,
1097 neg_op0, bound);
1098 else if (sym_op1)
1099 {
1100 /* We may not negate if that might introduce
1101 undefined overflow. */
1102 if (!minus_p
1103 || neg_op1
1104 || TYPE_OVERFLOW_WRAPS (type))
1105 bound = build_symbolic_expr (type, sym_op1,
1106 neg_op1 ^ minus_p, bound);
1107 else
1108 bound = NULL_TREE;
1109 }
1110 }
1111
1112 /* Combine OP1 and OP1, which are two parts of a bound, into one wide
1113 int bound according to CODE. CODE is the operation combining the
1114 bound (either a PLUS_EXPR or a MINUS_EXPR).
1115
1116 TYPE is the type of the combine operation.
1117
1118 WI is the wide int to store the result.
1119
1120 OVF is -1 if an underflow occurred, +1 if an overflow occurred or 0
1121 if over/underflow occurred. */
1122
1123 static void
1124 combine_bound (enum tree_code code, wide_int &wi, wi::overflow_type &ovf,
1125 tree type, tree op0, tree op1)
1126 {
1127 bool minus_p = (code == MINUS_EXPR);
1128 const signop sgn = TYPE_SIGN (type);
1129 const unsigned int prec = TYPE_PRECISION (type);
1130
1131 /* Combine the bounds, if any. */
1132 if (op0 && op1)
1133 {
1134 if (minus_p)
1135 wi = wi::sub (wi::to_wide (op0), wi::to_wide (op1), sgn, &ovf);
1136 else
1137 wi = wi::add (wi::to_wide (op0), wi::to_wide (op1), sgn, &ovf);
1138 }
1139 else if (op0)
1140 wi = wi::to_wide (op0);
1141 else if (op1)
1142 {
1143 if (minus_p)
1144 wi = wi::neg (wi::to_wide (op1), &ovf);
1145 else
1146 wi = wi::to_wide (op1);
1147 }
1148 else
1149 wi = wi::shwi (0, prec);
1150 }
1151
1152 /* Given a range in [WMIN, WMAX], adjust it for possible overflow and
1153 put the result in VR.
1154
1155 TYPE is the type of the range.
1156
1157 MIN_OVF and MAX_OVF indicate what type of overflow, if any,
1158 occurred while originally calculating WMIN or WMAX. -1 indicates
1159 underflow. +1 indicates overflow. 0 indicates neither. */
1160
1161 static void
1162 set_value_range_with_overflow (value_range &vr,
1163 tree type,
1164 const wide_int &wmin, const wide_int &wmax,
1165 wi::overflow_type min_ovf,
1166 wi::overflow_type max_ovf)
1167 {
1168 const signop sgn = TYPE_SIGN (type);
1169 const unsigned int prec = TYPE_PRECISION (type);
1170 vr.type = VR_RANGE;
1171 vr.equiv = NULL;
1172 if (TYPE_OVERFLOW_WRAPS (type))
1173 {
1174 /* If overflow wraps, truncate the values and adjust the
1175 range kind and bounds appropriately. */
1176 wide_int tmin = wide_int::from (wmin, prec, sgn);
1177 wide_int tmax = wide_int::from (wmax, prec, sgn);
1178 if ((min_ovf != wi::OVF_NONE) == (max_ovf != wi::OVF_NONE))
1179 {
1180 /* No overflow or both overflow or underflow. The
1181 range kind stays VR_RANGE. */
1182 vr.min = wide_int_to_tree (type, tmin);
1183 vr.max = wide_int_to_tree (type, tmax);
1184 }
1185 else if ((min_ovf == wi::OVF_UNDERFLOW && max_ovf == wi::OVF_NONE)
1186 || (max_ovf == wi::OVF_OVERFLOW && min_ovf == wi::OVF_NONE))
1187 {
1188 /* Min underflow or max overflow. The range kind
1189 changes to VR_ANTI_RANGE. */
1190 bool covers = false;
1191 wide_int tem = tmin;
1192 vr.type = VR_ANTI_RANGE;
1193 tmin = tmax + 1;
1194 if (wi::cmp (tmin, tmax, sgn) < 0)
1195 covers = true;
1196 tmax = tem - 1;
1197 if (wi::cmp (tmax, tem, sgn) > 0)
1198 covers = true;
1199 /* If the anti-range would cover nothing, drop to varying.
1200 Likewise if the anti-range bounds are outside of the
1201 types values. */
1202 if (covers || wi::cmp (tmin, tmax, sgn) > 0)
1203 {
1204 set_value_range_to_varying (&vr);
1205 return;
1206 }
1207 vr.min = wide_int_to_tree (type, tmin);
1208 vr.max = wide_int_to_tree (type, tmax);
1209 }
1210 else
1211 {
1212 /* Other underflow and/or overflow, drop to VR_VARYING. */
1213 set_value_range_to_varying (&vr);
1214 return;
1215 }
1216 }
1217 else
1218 {
1219 /* If overflow does not wrap, saturate to the types min/max
1220 value. */
1221 wide_int type_min = wi::min_value (prec, sgn);
1222 wide_int type_max = wi::max_value (prec, sgn);
1223 if (min_ovf == wi::OVF_UNDERFLOW)
1224 vr.min = wide_int_to_tree (type, type_min);
1225 else if (min_ovf == wi::OVF_OVERFLOW)
1226 vr.min = wide_int_to_tree (type, type_max);
1227 else
1228 vr.min = wide_int_to_tree (type, wmin);
1229
1230 if (max_ovf == wi::OVF_UNDERFLOW)
1231 vr.max = wide_int_to_tree (type, type_min);
1232 else if (max_ovf == wi::OVF_OVERFLOW)
1233 vr.max = wide_int_to_tree (type, type_max);
1234 else
1235 vr.max = wide_int_to_tree (type, wmax);
1236 }
1237 }
1238
1239 /* Extract range information from a binary operation CODE based on
1240 the ranges of each of its operands *VR0 and *VR1 with resulting
1241 type EXPR_TYPE. The resulting range is stored in *VR. */
1242
1243 void
1244 extract_range_from_binary_expr_1 (value_range *vr,
1245 enum tree_code code, tree expr_type,
1246 const value_range *vr0_,
1247 const value_range *vr1_)
1248 {
1249 signop sign = TYPE_SIGN (expr_type);
1250 unsigned int prec = TYPE_PRECISION (expr_type);
1251 value_range vr0 = *vr0_, vr1 = *vr1_;
1252 value_range vrtem0 = VR_INITIALIZER, vrtem1 = VR_INITIALIZER;
1253 enum value_range_type type;
1254 tree min = NULL_TREE, max = NULL_TREE;
1255 int cmp;
1256
1257 if (!INTEGRAL_TYPE_P (expr_type)
1258 && !POINTER_TYPE_P (expr_type))
1259 {
1260 set_value_range_to_varying (vr);
1261 return;
1262 }
1263
1264 /* Not all binary expressions can be applied to ranges in a
1265 meaningful way. Handle only arithmetic operations. */
1266 if (code != PLUS_EXPR
1267 && code != MINUS_EXPR
1268 && code != POINTER_PLUS_EXPR
1269 && code != MULT_EXPR
1270 && code != TRUNC_DIV_EXPR
1271 && code != FLOOR_DIV_EXPR
1272 && code != CEIL_DIV_EXPR
1273 && code != EXACT_DIV_EXPR
1274 && code != ROUND_DIV_EXPR
1275 && code != TRUNC_MOD_EXPR
1276 && code != RSHIFT_EXPR
1277 && code != LSHIFT_EXPR
1278 && code != MIN_EXPR
1279 && code != MAX_EXPR
1280 && code != BIT_AND_EXPR
1281 && code != BIT_IOR_EXPR
1282 && code != BIT_XOR_EXPR)
1283 {
1284 set_value_range_to_varying (vr);
1285 return;
1286 }
1287
1288 /* If both ranges are UNDEFINED, so is the result. */
1289 if (vr0.type == VR_UNDEFINED && vr1.type == VR_UNDEFINED)
1290 {
1291 set_value_range_to_undefined (vr);
1292 return;
1293 }
1294 /* If one of the ranges is UNDEFINED drop it to VARYING for the following
1295 code. At some point we may want to special-case operations that
1296 have UNDEFINED result for all or some value-ranges of the not UNDEFINED
1297 operand. */
1298 else if (vr0.type == VR_UNDEFINED)
1299 set_value_range_to_varying (&vr0);
1300 else if (vr1.type == VR_UNDEFINED)
1301 set_value_range_to_varying (&vr1);
1302
1303 /* We get imprecise results from ranges_from_anti_range when
1304 code is EXACT_DIV_EXPR. We could mask out bits in the resulting
1305 range, but then we also need to hack up vrp_meet. It's just
1306 easier to special case when vr0 is ~[0,0] for EXACT_DIV_EXPR. */
1307 if (code == EXACT_DIV_EXPR
1308 && vr0.type == VR_ANTI_RANGE
1309 && vr0.min == vr0.max
1310 && integer_zerop (vr0.min))
1311 {
1312 set_value_range_to_nonnull (vr, expr_type);
1313 return;
1314 }
1315
1316 /* Now canonicalize anti-ranges to ranges when they are not symbolic
1317 and express ~[] op X as ([]' op X) U ([]'' op X). */
1318 if (vr0.type == VR_ANTI_RANGE
1319 && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1))
1320 {
1321 extract_range_from_binary_expr_1 (vr, code, expr_type, &vrtem0, vr1_);
1322 if (vrtem1.type != VR_UNDEFINED)
1323 {
1324 value_range vrres = VR_INITIALIZER;
1325 extract_range_from_binary_expr_1 (&vrres, code, expr_type,
1326 &vrtem1, vr1_);
1327 vrp_meet (vr, &vrres);
1328 }
1329 return;
1330 }
1331 /* Likewise for X op ~[]. */
1332 if (vr1.type == VR_ANTI_RANGE
1333 && ranges_from_anti_range (&vr1, &vrtem0, &vrtem1))
1334 {
1335 extract_range_from_binary_expr_1 (vr, code, expr_type, vr0_, &vrtem0);
1336 if (vrtem1.type != VR_UNDEFINED)
1337 {
1338 value_range vrres = VR_INITIALIZER;
1339 extract_range_from_binary_expr_1 (&vrres, code, expr_type,
1340 vr0_, &vrtem1);
1341 vrp_meet (vr, &vrres);
1342 }
1343 return;
1344 }
1345
1346 /* The type of the resulting value range defaults to VR0.TYPE. */
1347 type = vr0.type;
1348
1349 /* Refuse to operate on VARYING ranges, ranges of different kinds
1350 and symbolic ranges. As an exception, we allow BIT_{AND,IOR}
1351 because we may be able to derive a useful range even if one of
1352 the operands is VR_VARYING or symbolic range. Similarly for
1353 divisions, MIN/MAX and PLUS/MINUS.
1354
1355 TODO, we may be able to derive anti-ranges in some cases. */
1356 if (code != BIT_AND_EXPR
1357 && code != BIT_IOR_EXPR
1358 && code != TRUNC_DIV_EXPR
1359 && code != FLOOR_DIV_EXPR
1360 && code != CEIL_DIV_EXPR
1361 && code != EXACT_DIV_EXPR
1362 && code != ROUND_DIV_EXPR
1363 && code != TRUNC_MOD_EXPR
1364 && code != MIN_EXPR
1365 && code != MAX_EXPR
1366 && code != PLUS_EXPR
1367 && code != MINUS_EXPR
1368 && code != RSHIFT_EXPR
1369 && code != POINTER_PLUS_EXPR
1370 && (vr0.type == VR_VARYING
1371 || vr1.type == VR_VARYING
1372 || vr0.type != vr1.type
1373 || symbolic_range_p (&vr0)
1374 || symbolic_range_p (&vr1)))
1375 {
1376 set_value_range_to_varying (vr);
1377 return;
1378 }
1379
1380 /* Now evaluate the expression to determine the new range. */
1381 if (POINTER_TYPE_P (expr_type))
1382 {
1383 if (code == MIN_EXPR || code == MAX_EXPR)
1384 {
1385 /* For MIN/MAX expressions with pointers, we only care about
1386 nullness, if both are non null, then the result is nonnull.
1387 If both are null, then the result is null. Otherwise they
1388 are varying. */
1389 if (!range_includes_zero_p (&vr0) && !range_includes_zero_p (&vr1))
1390 set_value_range_to_nonnull (vr, expr_type);
1391 else if (range_is_null (&vr0) && range_is_null (&vr1))
1392 set_value_range_to_null (vr, expr_type);
1393 else
1394 set_value_range_to_varying (vr);
1395 }
1396 else if (code == POINTER_PLUS_EXPR)
1397 {
1398 /* For pointer types, we are really only interested in asserting
1399 whether the expression evaluates to non-NULL. */
1400 if (!range_includes_zero_p (&vr0)
1401 || !range_includes_zero_p (&vr1))
1402 set_value_range_to_nonnull (vr, expr_type);
1403 else if (range_is_null (&vr0) && range_is_null (&vr1))
1404 set_value_range_to_null (vr, expr_type);
1405 else
1406 set_value_range_to_varying (vr);
1407 }
1408 else if (code == BIT_AND_EXPR)
1409 {
1410 /* For pointer types, we are really only interested in asserting
1411 whether the expression evaluates to non-NULL. */
1412 if (!range_includes_zero_p (&vr0) && !range_includes_zero_p (&vr1))
1413 set_value_range_to_nonnull (vr, expr_type);
1414 else if (range_is_null (&vr0) || range_is_null (&vr1))
1415 set_value_range_to_null (vr, expr_type);
1416 else
1417 set_value_range_to_varying (vr);
1418 }
1419 else
1420 set_value_range_to_varying (vr);
1421
1422 return;
1423 }
1424
1425 /* For integer ranges, apply the operation to each end of the
1426 range and see what we end up with. */
1427 if (code == PLUS_EXPR || code == MINUS_EXPR)
1428 {
1429 const bool minus_p = (code == MINUS_EXPR);
1430 tree min_op0 = vr0.min;
1431 tree min_op1 = minus_p ? vr1.max : vr1.min;
1432 tree max_op0 = vr0.max;
1433 tree max_op1 = minus_p ? vr1.min : vr1.max;
1434 tree sym_min_op0 = NULL_TREE;
1435 tree sym_min_op1 = NULL_TREE;
1436 tree sym_max_op0 = NULL_TREE;
1437 tree sym_max_op1 = NULL_TREE;
1438 bool neg_min_op0, neg_min_op1, neg_max_op0, neg_max_op1;
1439
1440 neg_min_op0 = neg_min_op1 = neg_max_op0 = neg_max_op1 = false;
1441
1442 /* If we have a PLUS or MINUS with two VR_RANGEs, either constant or
1443 single-symbolic ranges, try to compute the precise resulting range,
1444 but only if we know that this resulting range will also be constant
1445 or single-symbolic. */
1446 if (vr0.type == VR_RANGE && vr1.type == VR_RANGE
1447 && (TREE_CODE (min_op0) == INTEGER_CST
1448 || (sym_min_op0
1449 = get_single_symbol (min_op0, &neg_min_op0, &min_op0)))
1450 && (TREE_CODE (min_op1) == INTEGER_CST
1451 || (sym_min_op1
1452 = get_single_symbol (min_op1, &neg_min_op1, &min_op1)))
1453 && (!(sym_min_op0 && sym_min_op1)
1454 || (sym_min_op0 == sym_min_op1
1455 && neg_min_op0 == (minus_p ? neg_min_op1 : !neg_min_op1)))
1456 && (TREE_CODE (max_op0) == INTEGER_CST
1457 || (sym_max_op0
1458 = get_single_symbol (max_op0, &neg_max_op0, &max_op0)))
1459 && (TREE_CODE (max_op1) == INTEGER_CST
1460 || (sym_max_op1
1461 = get_single_symbol (max_op1, &neg_max_op1, &max_op1)))
1462 && (!(sym_max_op0 && sym_max_op1)
1463 || (sym_max_op0 == sym_max_op1
1464 && neg_max_op0 == (minus_p ? neg_max_op1 : !neg_max_op1))))
1465 {
1466 wide_int wmin, wmax;
1467 wi::overflow_type min_ovf = wi::OVF_NONE;
1468 wi::overflow_type max_ovf = wi::OVF_NONE;
1469
1470 /* Build the bounds. */
1471 combine_bound (code, wmin, min_ovf, expr_type, min_op0, min_op1);
1472 combine_bound (code, wmax, max_ovf, expr_type, max_op0, max_op1);
1473
1474 /* If we have overflow for the constant part and the resulting
1475 range will be symbolic, drop to VR_VARYING. */
1476 if (((bool)min_ovf && sym_min_op0 != sym_min_op1)
1477 || ((bool)max_ovf && sym_max_op0 != sym_max_op1))
1478 {
1479 set_value_range_to_varying (vr);
1480 return;
1481 }
1482
1483 /* Adjust the range for possible overflow. */
1484 set_value_range_with_overflow (*vr, expr_type,
1485 wmin, wmax, min_ovf, max_ovf);
1486 if (vr->type == VR_VARYING)
1487 return;
1488
1489 /* Build the symbolic bounds if needed. */
1490 adjust_symbolic_bound (vr->min, code, expr_type,
1491 sym_min_op0, sym_min_op1,
1492 neg_min_op0, neg_min_op1);
1493 adjust_symbolic_bound (vr->max, code, expr_type,
1494 sym_max_op0, sym_max_op1,
1495 neg_max_op0, neg_max_op1);
1496 /* ?? It would probably be cleaner to eliminate min/max/type
1497 entirely and hold these values in VR directly. */
1498 min = vr->min;
1499 max = vr->max;
1500 type = vr->type;
1501 }
1502 else
1503 {
1504 /* For other cases, for example if we have a PLUS_EXPR with two
1505 VR_ANTI_RANGEs, drop to VR_VARYING. It would take more effort
1506 to compute a precise range for such a case.
1507 ??? General even mixed range kind operations can be expressed
1508 by for example transforming ~[3, 5] + [1, 2] to range-only
1509 operations and a union primitive:
1510 [-INF, 2] + [1, 2] U [5, +INF] + [1, 2]
1511 [-INF+1, 4] U [6, +INF(OVF)]
1512 though usually the union is not exactly representable with
1513 a single range or anti-range as the above is
1514 [-INF+1, +INF(OVF)] intersected with ~[5, 5]
1515 but one could use a scheme similar to equivalences for this. */
1516 set_value_range_to_varying (vr);
1517 return;
1518 }
1519 }
1520 else if (code == MIN_EXPR
1521 || code == MAX_EXPR)
1522 {
1523 wide_int wmin, wmax;
1524 wide_int vr0_min, vr0_max;
1525 wide_int vr1_min, vr1_max;
1526 extract_range_into_wide_ints (&vr0, sign, prec, vr0_min, vr0_max);
1527 extract_range_into_wide_ints (&vr1, sign, prec, vr1_min, vr1_max);
1528 if (wide_int_range_min_max (wmin, wmax, code, sign, prec,
1529 vr0_min, vr0_max, vr1_min, vr1_max))
1530 set_value_range (vr, VR_RANGE,
1531 wide_int_to_tree (expr_type, wmin),
1532 wide_int_to_tree (expr_type, wmax), NULL);
1533 else
1534 set_value_range_to_varying (vr);
1535 return;
1536 }
1537 else if (code == MULT_EXPR)
1538 {
1539 if (!range_int_cst_p (&vr0)
1540 || !range_int_cst_p (&vr1))
1541 {
1542 set_value_range_to_varying (vr);
1543 return;
1544 }
1545 extract_range_from_multiplicative_op (vr, code, &vr0, &vr1);
1546 return;
1547 }
1548 else if (code == RSHIFT_EXPR
1549 || code == LSHIFT_EXPR)
1550 {
1551 if (range_int_cst_p (&vr1)
1552 && !vrp_shift_undefined_p (vr1, prec))
1553 {
1554 if (code == RSHIFT_EXPR)
1555 {
1556 /* Even if vr0 is VARYING or otherwise not usable, we can derive
1557 useful ranges just from the shift count. E.g.
1558 x >> 63 for signed 64-bit x is always [-1, 0]. */
1559 if (vr0.type != VR_RANGE || symbolic_range_p (&vr0))
1560 {
1561 vr0.type = type = VR_RANGE;
1562 vr0.min = vrp_val_min (expr_type);
1563 vr0.max = vrp_val_max (expr_type);
1564 }
1565 extract_range_from_multiplicative_op (vr, code, &vr0, &vr1);
1566 return;
1567 }
1568 else if (code == LSHIFT_EXPR
1569 && range_int_cst_p (&vr0))
1570 {
1571 wide_int res_lb, res_ub;
1572 if (wide_int_range_lshift (res_lb, res_ub, sign, prec,
1573 wi::to_wide (vr0.min),
1574 wi::to_wide (vr0.max),
1575 wi::to_wide (vr1.min),
1576 wi::to_wide (vr1.max),
1577 TYPE_OVERFLOW_UNDEFINED (expr_type),
1578 TYPE_OVERFLOW_WRAPS (expr_type)))
1579 {
1580 min = wide_int_to_tree (expr_type, res_lb);
1581 max = wide_int_to_tree (expr_type, res_ub);
1582 set_and_canonicalize_value_range (vr, VR_RANGE,
1583 min, max, NULL);
1584 return;
1585 }
1586 }
1587 }
1588 set_value_range_to_varying (vr);
1589 return;
1590 }
1591 else if (code == TRUNC_DIV_EXPR
1592 || code == FLOOR_DIV_EXPR
1593 || code == CEIL_DIV_EXPR
1594 || code == EXACT_DIV_EXPR
1595 || code == ROUND_DIV_EXPR)
1596 {
1597 wide_int dividend_min, dividend_max, divisor_min, divisor_max;
1598 wide_int wmin, wmax, extra_min, extra_max;
1599 bool extra_range_p;
1600
1601 /* Special case explicit division by zero as undefined. */
1602 if (range_is_null (&vr1))
1603 {
1604 set_value_range_to_undefined (vr);
1605 return;
1606 }
1607
1608 /* First, normalize ranges into constants we can handle. Note
1609 that VR_ANTI_RANGE's of constants were already normalized
1610 before arriving here.
1611
1612 NOTE: As a future improvement, we may be able to do better
1613 with mixed symbolic (anti-)ranges like [0, A]. See note in
1614 ranges_from_anti_range. */
1615 extract_range_into_wide_ints (&vr0, sign, prec,
1616 dividend_min, dividend_max);
1617 extract_range_into_wide_ints (&vr1, sign, prec,
1618 divisor_min, divisor_max);
1619 if (!wide_int_range_div (wmin, wmax, code, sign, prec,
1620 dividend_min, dividend_max,
1621 divisor_min, divisor_max,
1622 TYPE_OVERFLOW_UNDEFINED (expr_type),
1623 TYPE_OVERFLOW_WRAPS (expr_type),
1624 extra_range_p, extra_min, extra_max))
1625 {
1626 set_value_range_to_varying (vr);
1627 return;
1628 }
1629 set_value_range (vr, VR_RANGE,
1630 wide_int_to_tree (expr_type, wmin),
1631 wide_int_to_tree (expr_type, wmax), NULL);
1632 if (extra_range_p)
1633 {
1634 value_range extra_range = VR_INITIALIZER;
1635 set_value_range (&extra_range, VR_RANGE,
1636 wide_int_to_tree (expr_type, extra_min),
1637 wide_int_to_tree (expr_type, extra_max), NULL);
1638 vrp_meet (vr, &extra_range);
1639 }
1640 return;
1641 }
1642 else if (code == TRUNC_MOD_EXPR)
1643 {
1644 if (range_is_null (&vr1))
1645 {
1646 set_value_range_to_undefined (vr);
1647 return;
1648 }
1649 wide_int wmin, wmax, tmp;
1650 wide_int vr0_min, vr0_max, vr1_min, vr1_max;
1651 extract_range_into_wide_ints (&vr0, sign, prec, vr0_min, vr0_max);
1652 extract_range_into_wide_ints (&vr1, sign, prec, vr1_min, vr1_max);
1653 wide_int_range_trunc_mod (wmin, wmax, sign, prec,
1654 vr0_min, vr0_max, vr1_min, vr1_max);
1655 min = wide_int_to_tree (expr_type, wmin);
1656 max = wide_int_to_tree (expr_type, wmax);
1657 set_value_range (vr, VR_RANGE, min, max, NULL);
1658 return;
1659 }
1660 else if (code == BIT_AND_EXPR || code == BIT_IOR_EXPR || code == BIT_XOR_EXPR)
1661 {
1662 wide_int may_be_nonzero0, may_be_nonzero1;
1663 wide_int must_be_nonzero0, must_be_nonzero1;
1664 wide_int wmin, wmax;
1665 wide_int vr0_min, vr0_max, vr1_min, vr1_max;
1666 vrp_set_zero_nonzero_bits (expr_type, &vr0,
1667 &may_be_nonzero0, &must_be_nonzero0);
1668 vrp_set_zero_nonzero_bits (expr_type, &vr1,
1669 &may_be_nonzero1, &must_be_nonzero1);
1670 extract_range_into_wide_ints (&vr0, sign, prec, vr0_min, vr0_max);
1671 extract_range_into_wide_ints (&vr1, sign, prec, vr1_min, vr1_max);
1672 if (code == BIT_AND_EXPR)
1673 {
1674 if (wide_int_range_bit_and (wmin, wmax, sign, prec,
1675 vr0_min, vr0_max,
1676 vr1_min, vr1_max,
1677 must_be_nonzero0,
1678 may_be_nonzero0,
1679 must_be_nonzero1,
1680 may_be_nonzero1))
1681 {
1682 min = wide_int_to_tree (expr_type, wmin);
1683 max = wide_int_to_tree (expr_type, wmax);
1684 set_value_range (vr, VR_RANGE, min, max, NULL);
1685 }
1686 else
1687 set_value_range_to_varying (vr);
1688 return;
1689 }
1690 else if (code == BIT_IOR_EXPR)
1691 {
1692 if (wide_int_range_bit_ior (wmin, wmax, sign,
1693 vr0_min, vr0_max,
1694 vr1_min, vr1_max,
1695 must_be_nonzero0,
1696 may_be_nonzero0,
1697 must_be_nonzero1,
1698 may_be_nonzero1))
1699 {
1700 min = wide_int_to_tree (expr_type, wmin);
1701 max = wide_int_to_tree (expr_type, wmax);
1702 set_value_range (vr, VR_RANGE, min, max, NULL);
1703 }
1704 else
1705 set_value_range_to_varying (vr);
1706 return;
1707 }
1708 else if (code == BIT_XOR_EXPR)
1709 {
1710 if (wide_int_range_bit_xor (wmin, wmax, sign, prec,
1711 must_be_nonzero0,
1712 may_be_nonzero0,
1713 must_be_nonzero1,
1714 may_be_nonzero1))
1715 {
1716 min = wide_int_to_tree (expr_type, wmin);
1717 max = wide_int_to_tree (expr_type, wmax);
1718 set_value_range (vr, VR_RANGE, min, max, NULL);
1719 }
1720 else
1721 set_value_range_to_varying (vr);
1722 return;
1723 }
1724 }
1725 else
1726 gcc_unreachable ();
1727
1728 /* If either MIN or MAX overflowed, then set the resulting range to
1729 VARYING. */
1730 if (min == NULL_TREE
1731 || TREE_OVERFLOW_P (min)
1732 || max == NULL_TREE
1733 || TREE_OVERFLOW_P (max))
1734 {
1735 set_value_range_to_varying (vr);
1736 return;
1737 }
1738
1739 /* We punt for [-INF, +INF].
1740 We learn nothing when we have INF on both sides.
1741 Note that we do accept [-INF, -INF] and [+INF, +INF]. */
1742 if (vrp_val_is_min (min) && vrp_val_is_max (max))
1743 {
1744 set_value_range_to_varying (vr);
1745 return;
1746 }
1747
1748 cmp = compare_values (min, max);
1749 if (cmp == -2 || cmp == 1)
1750 {
1751 /* If the new range has its limits swapped around (MIN > MAX),
1752 then the operation caused one of them to wrap around, mark
1753 the new range VARYING. */
1754 set_value_range_to_varying (vr);
1755 }
1756 else
1757 set_value_range (vr, type, min, max, NULL);
1758 }
1759
1760 /* Extract range information from a unary operation CODE based on
1761 the range of its operand *VR0 with type OP0_TYPE with resulting type TYPE.
1762 The resulting range is stored in *VR. */
1763
1764 void
1765 extract_range_from_unary_expr (value_range *vr,
1766 enum tree_code code, tree type,
1767 const value_range *vr0_, tree op0_type)
1768 {
1769 signop sign = TYPE_SIGN (type);
1770 unsigned int prec = TYPE_PRECISION (type);
1771 value_range vr0 = *vr0_, vrtem0 = VR_INITIALIZER, vrtem1 = VR_INITIALIZER;
1772
1773 /* VRP only operates on integral and pointer types. */
1774 if (!(INTEGRAL_TYPE_P (op0_type)
1775 || POINTER_TYPE_P (op0_type))
1776 || !(INTEGRAL_TYPE_P (type)
1777 || POINTER_TYPE_P (type)))
1778 {
1779 set_value_range_to_varying (vr);
1780 return;
1781 }
1782
1783 /* If VR0 is UNDEFINED, so is the result. */
1784 if (vr0.type == VR_UNDEFINED)
1785 {
1786 set_value_range_to_undefined (vr);
1787 return;
1788 }
1789
1790 /* Handle operations that we express in terms of others. */
1791 if (code == PAREN_EXPR || code == OBJ_TYPE_REF)
1792 {
1793 /* PAREN_EXPR and OBJ_TYPE_REF are simple copies. */
1794 copy_value_range (vr, &vr0);
1795 return;
1796 }
1797 else if (code == NEGATE_EXPR)
1798 {
1799 /* -X is simply 0 - X, so re-use existing code that also handles
1800 anti-ranges fine. */
1801 value_range zero = VR_INITIALIZER;
1802 set_value_range_to_value (&zero, build_int_cst (type, 0), NULL);
1803 extract_range_from_binary_expr_1 (vr, MINUS_EXPR, type, &zero, &vr0);
1804 return;
1805 }
1806 else if (code == BIT_NOT_EXPR)
1807 {
1808 /* ~X is simply -1 - X, so re-use existing code that also handles
1809 anti-ranges fine. */
1810 value_range minusone = VR_INITIALIZER;
1811 set_value_range_to_value (&minusone, build_int_cst (type, -1), NULL);
1812 extract_range_from_binary_expr_1 (vr, MINUS_EXPR,
1813 type, &minusone, &vr0);
1814 return;
1815 }
1816
1817 /* Now canonicalize anti-ranges to ranges when they are not symbolic
1818 and express op ~[] as (op []') U (op []''). */
1819 if (vr0.type == VR_ANTI_RANGE
1820 && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1))
1821 {
1822 extract_range_from_unary_expr (vr, code, type, &vrtem0, op0_type);
1823 if (vrtem1.type != VR_UNDEFINED)
1824 {
1825 value_range vrres = VR_INITIALIZER;
1826 extract_range_from_unary_expr (&vrres, code, type,
1827 &vrtem1, op0_type);
1828 vrp_meet (vr, &vrres);
1829 }
1830 return;
1831 }
1832
1833 if (CONVERT_EXPR_CODE_P (code))
1834 {
1835 tree inner_type = op0_type;
1836 tree outer_type = type;
1837
1838 /* If the expression evaluates to a pointer, we are only interested in
1839 determining if it evaluates to NULL [0, 0] or non-NULL (~[0, 0]). */
1840 if (POINTER_TYPE_P (type))
1841 {
1842 if (!range_includes_zero_p (&vr0))
1843 set_value_range_to_nonnull (vr, type);
1844 else if (range_is_null (&vr0))
1845 set_value_range_to_null (vr, type);
1846 else
1847 set_value_range_to_varying (vr);
1848 return;
1849 }
1850
1851 /* We normalize everything to a VR_RANGE, but for constant
1852 anti-ranges we must handle them by leaving the final result
1853 as an anti range. This allows us to convert things like
1854 ~[0,5] seamlessly. */
1855 value_range_type vr_type = VR_RANGE;
1856 if (vr0.type == VR_ANTI_RANGE
1857 && TREE_CODE (vr0.min) == INTEGER_CST
1858 && TREE_CODE (vr0.max) == INTEGER_CST)
1859 vr_type = VR_ANTI_RANGE;
1860
1861 /* NOTES: Previously we were returning VARYING for all symbolics, but
1862 we can do better by treating them as [-MIN, +MAX]. For
1863 example, converting [SYM, SYM] from INT to LONG UNSIGNED,
1864 we can return: ~[0x8000000, 0xffffffff7fffffff].
1865
1866 We were also failing to convert ~[0,0] from char* to unsigned,
1867 instead choosing to return VR_VARYING. Now we return ~[0,0]. */
1868 wide_int vr0_min, vr0_max, wmin, wmax;
1869 signop inner_sign = TYPE_SIGN (inner_type);
1870 signop outer_sign = TYPE_SIGN (outer_type);
1871 unsigned inner_prec = TYPE_PRECISION (inner_type);
1872 unsigned outer_prec = TYPE_PRECISION (outer_type);
1873 extract_range_into_wide_ints (&vr0, inner_sign, inner_prec,
1874 vr0_min, vr0_max);
1875 if (wide_int_range_convert (wmin, wmax,
1876 inner_sign, inner_prec,
1877 outer_sign, outer_prec,
1878 vr0_min, vr0_max))
1879 {
1880 tree min = wide_int_to_tree (outer_type, wmin);
1881 tree max = wide_int_to_tree (outer_type, wmax);
1882 set_and_canonicalize_value_range (vr, vr_type, min, max, NULL);
1883 }
1884 else
1885 set_value_range_to_varying (vr);
1886 return;
1887 }
1888 else if (code == ABS_EXPR)
1889 {
1890 if (vr0.type != VR_RANGE || symbolic_range_p (&vr0))
1891 {
1892 set_value_range_to_varying (vr);
1893 return;
1894 }
1895 wide_int wmin, wmax;
1896 wide_int vr0_min, vr0_max;
1897 extract_range_into_wide_ints (&vr0, sign, prec, vr0_min, vr0_max);
1898 if (wide_int_range_abs (wmin, wmax, sign, prec, vr0_min, vr0_max,
1899 TYPE_OVERFLOW_UNDEFINED (type)))
1900 set_value_range (vr, VR_RANGE,
1901 wide_int_to_tree (type, wmin),
1902 wide_int_to_tree (type, wmax), NULL);
1903 else
1904 set_value_range_to_varying (vr);
1905 return;
1906 }
1907
1908 /* For unhandled operations fall back to varying. */
1909 set_value_range_to_varying (vr);
1910 return;
1911 }
1912
1913 /* Debugging dumps. */
1914
1915 void dump_value_range (FILE *, const value_range *);
1916 void debug_value_range (const value_range *);
1917 void dump_all_value_ranges (FILE *);
1918 void dump_vr_equiv (FILE *, bitmap);
1919 void debug_vr_equiv (bitmap);
1920
1921
1922 /* Dump value range VR to FILE. */
1923
1924 void
1925 dump_value_range (FILE *file, const value_range *vr)
1926 {
1927 if (vr == NULL)
1928 fprintf (file, "[]");
1929 else if (vr->type == VR_UNDEFINED)
1930 fprintf (file, "UNDEFINED");
1931 else if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
1932 {
1933 tree type = TREE_TYPE (vr->min);
1934
1935 fprintf (file, "%s[", (vr->type == VR_ANTI_RANGE) ? "~" : "");
1936
1937 if (INTEGRAL_TYPE_P (type)
1938 && !TYPE_UNSIGNED (type)
1939 && vrp_val_is_min (vr->min))
1940 fprintf (file, "-INF");
1941 else
1942 print_generic_expr (file, vr->min);
1943
1944 fprintf (file, ", ");
1945
1946 if (INTEGRAL_TYPE_P (type)
1947 && vrp_val_is_max (vr->max))
1948 fprintf (file, "+INF");
1949 else
1950 print_generic_expr (file, vr->max);
1951
1952 fprintf (file, "]");
1953
1954 if (vr->equiv)
1955 {
1956 bitmap_iterator bi;
1957 unsigned i, c = 0;
1958
1959 fprintf (file, " EQUIVALENCES: { ");
1960
1961 EXECUTE_IF_SET_IN_BITMAP (vr->equiv, 0, i, bi)
1962 {
1963 print_generic_expr (file, ssa_name (i));
1964 fprintf (file, " ");
1965 c++;
1966 }
1967
1968 fprintf (file, "} (%u elements)", c);
1969 }
1970 }
1971 else if (vr->type == VR_VARYING)
1972 fprintf (file, "VARYING");
1973 else
1974 fprintf (file, "INVALID RANGE");
1975 }
1976
1977
1978 /* Dump value range VR to stderr. */
1979
1980 DEBUG_FUNCTION void
1981 debug_value_range (const value_range *vr)
1982 {
1983 dump_value_range (stderr, vr);
1984 fprintf (stderr, "\n");
1985 }
1986
1987 void
1988 value_range::dump () const
1989 {
1990 debug_value_range (this);
1991 }
1992
1993
1994 /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
1995 create a new SSA name N and return the assertion assignment
1996 'N = ASSERT_EXPR <V, V OP W>'. */
1997
1998 static gimple *
1999 build_assert_expr_for (tree cond, tree v)
2000 {
2001 tree a;
2002 gassign *assertion;
2003
2004 gcc_assert (TREE_CODE (v) == SSA_NAME
2005 && COMPARISON_CLASS_P (cond));
2006
2007 a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond);
2008 assertion = gimple_build_assign (NULL_TREE, a);
2009
2010 /* The new ASSERT_EXPR, creates a new SSA name that replaces the
2011 operand of the ASSERT_EXPR. Create it so the new name and the old one
2012 are registered in the replacement table so that we can fix the SSA web
2013 after adding all the ASSERT_EXPRs. */
2014 tree new_def = create_new_def_for (v, assertion, NULL);
2015 /* Make sure we preserve abnormalness throughout an ASSERT_EXPR chain
2016 given we have to be able to fully propagate those out to re-create
2017 valid SSA when removing the asserts. */
2018 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (v))
2019 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_def) = 1;
2020
2021 return assertion;
2022 }
2023
2024
2025 /* Return false if EXPR is a predicate expression involving floating
2026 point values. */
2027
2028 static inline bool
2029 fp_predicate (gimple *stmt)
2030 {
2031 GIMPLE_CHECK (stmt, GIMPLE_COND);
2032
2033 return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)));
2034 }
2035
2036 /* If the range of values taken by OP can be inferred after STMT executes,
2037 return the comparison code (COMP_CODE_P) and value (VAL_P) that
2038 describes the inferred range. Return true if a range could be
2039 inferred. */
2040
2041 bool
2042 infer_value_range (gimple *stmt, tree op, tree_code *comp_code_p, tree *val_p)
2043 {
2044 *val_p = NULL_TREE;
2045 *comp_code_p = ERROR_MARK;
2046
2047 /* Do not attempt to infer anything in names that flow through
2048 abnormal edges. */
2049 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op))
2050 return false;
2051
2052 /* If STMT is the last statement of a basic block with no normal
2053 successors, there is no point inferring anything about any of its
2054 operands. We would not be able to find a proper insertion point
2055 for the assertion, anyway. */
2056 if (stmt_ends_bb_p (stmt))
2057 {
2058 edge_iterator ei;
2059 edge e;
2060
2061 FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
2062 if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH)))
2063 break;
2064 if (e == NULL)
2065 return false;
2066 }
2067
2068 if (infer_nonnull_range (stmt, op))
2069 {
2070 *val_p = build_int_cst (TREE_TYPE (op), 0);
2071 *comp_code_p = NE_EXPR;
2072 return true;
2073 }
2074
2075 return false;
2076 }
2077
2078
2079 void dump_asserts_for (FILE *, tree);
2080 void debug_asserts_for (tree);
2081 void dump_all_asserts (FILE *);
2082 void debug_all_asserts (void);
2083
2084 /* Dump all the registered assertions for NAME to FILE. */
2085
2086 void
2087 dump_asserts_for (FILE *file, tree name)
2088 {
2089 assert_locus *loc;
2090
2091 fprintf (file, "Assertions to be inserted for ");
2092 print_generic_expr (file, name);
2093 fprintf (file, "\n");
2094
2095 loc = asserts_for[SSA_NAME_VERSION (name)];
2096 while (loc)
2097 {
2098 fprintf (file, "\t");
2099 print_gimple_stmt (file, gsi_stmt (loc->si), 0);
2100 fprintf (file, "\n\tBB #%d", loc->bb->index);
2101 if (loc->e)
2102 {
2103 fprintf (file, "\n\tEDGE %d->%d", loc->e->src->index,
2104 loc->e->dest->index);
2105 dump_edge_info (file, loc->e, dump_flags, 0);
2106 }
2107 fprintf (file, "\n\tPREDICATE: ");
2108 print_generic_expr (file, loc->expr);
2109 fprintf (file, " %s ", get_tree_code_name (loc->comp_code));
2110 print_generic_expr (file, loc->val);
2111 fprintf (file, "\n\n");
2112 loc = loc->next;
2113 }
2114
2115 fprintf (file, "\n");
2116 }
2117
2118
2119 /* Dump all the registered assertions for NAME to stderr. */
2120
2121 DEBUG_FUNCTION void
2122 debug_asserts_for (tree name)
2123 {
2124 dump_asserts_for (stderr, name);
2125 }
2126
2127
2128 /* Dump all the registered assertions for all the names to FILE. */
2129
2130 void
2131 dump_all_asserts (FILE *file)
2132 {
2133 unsigned i;
2134 bitmap_iterator bi;
2135
2136 fprintf (file, "\nASSERT_EXPRs to be inserted\n\n");
2137 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
2138 dump_asserts_for (file, ssa_name (i));
2139 fprintf (file, "\n");
2140 }
2141
2142
2143 /* Dump all the registered assertions for all the names to stderr. */
2144
2145 DEBUG_FUNCTION void
2146 debug_all_asserts (void)
2147 {
2148 dump_all_asserts (stderr);
2149 }
2150
2151 /* Push the assert info for NAME, EXPR, COMP_CODE and VAL to ASSERTS. */
2152
2153 static void
2154 add_assert_info (vec<assert_info> &asserts,
2155 tree name, tree expr, enum tree_code comp_code, tree val)
2156 {
2157 assert_info info;
2158 info.comp_code = comp_code;
2159 info.name = name;
2160 if (TREE_OVERFLOW_P (val))
2161 val = drop_tree_overflow (val);
2162 info.val = val;
2163 info.expr = expr;
2164 asserts.safe_push (info);
2165 }
2166
2167 /* If NAME doesn't have an ASSERT_EXPR registered for asserting
2168 'EXPR COMP_CODE VAL' at a location that dominates block BB or
2169 E->DEST, then register this location as a possible insertion point
2170 for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
2171
2172 BB, E and SI provide the exact insertion point for the new
2173 ASSERT_EXPR. If BB is NULL, then the ASSERT_EXPR is to be inserted
2174 on edge E. Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
2175 BB. If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
2176 must not be NULL. */
2177
2178 static void
2179 register_new_assert_for (tree name, tree expr,
2180 enum tree_code comp_code,
2181 tree val,
2182 basic_block bb,
2183 edge e,
2184 gimple_stmt_iterator si)
2185 {
2186 assert_locus *n, *loc, *last_loc;
2187 basic_block dest_bb;
2188
2189 gcc_checking_assert (bb == NULL || e == NULL);
2190
2191 if (e == NULL)
2192 gcc_checking_assert (gimple_code (gsi_stmt (si)) != GIMPLE_COND
2193 && gimple_code (gsi_stmt (si)) != GIMPLE_SWITCH);
2194
2195 /* Never build an assert comparing against an integer constant with
2196 TREE_OVERFLOW set. This confuses our undefined overflow warning
2197 machinery. */
2198 if (TREE_OVERFLOW_P (val))
2199 val = drop_tree_overflow (val);
2200
2201 /* The new assertion A will be inserted at BB or E. We need to
2202 determine if the new location is dominated by a previously
2203 registered location for A. If we are doing an edge insertion,
2204 assume that A will be inserted at E->DEST. Note that this is not
2205 necessarily true.
2206
2207 If E is a critical edge, it will be split. But even if E is
2208 split, the new block will dominate the same set of blocks that
2209 E->DEST dominates.
2210
2211 The reverse, however, is not true, blocks dominated by E->DEST
2212 will not be dominated by the new block created to split E. So,
2213 if the insertion location is on a critical edge, we will not use
2214 the new location to move another assertion previously registered
2215 at a block dominated by E->DEST. */
2216 dest_bb = (bb) ? bb : e->dest;
2217
2218 /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and
2219 VAL at a block dominating DEST_BB, then we don't need to insert a new
2220 one. Similarly, if the same assertion already exists at a block
2221 dominated by DEST_BB and the new location is not on a critical
2222 edge, then update the existing location for the assertion (i.e.,
2223 move the assertion up in the dominance tree).
2224
2225 Note, this is implemented as a simple linked list because there
2226 should not be more than a handful of assertions registered per
2227 name. If this becomes a performance problem, a table hashed by
2228 COMP_CODE and VAL could be implemented. */
2229 loc = asserts_for[SSA_NAME_VERSION (name)];
2230 last_loc = loc;
2231 while (loc)
2232 {
2233 if (loc->comp_code == comp_code
2234 && (loc->val == val
2235 || operand_equal_p (loc->val, val, 0))
2236 && (loc->expr == expr
2237 || operand_equal_p (loc->expr, expr, 0)))
2238 {
2239 /* If E is not a critical edge and DEST_BB
2240 dominates the existing location for the assertion, move
2241 the assertion up in the dominance tree by updating its
2242 location information. */
2243 if ((e == NULL || !EDGE_CRITICAL_P (e))
2244 && dominated_by_p (CDI_DOMINATORS, loc->bb, dest_bb))
2245 {
2246 loc->bb = dest_bb;
2247 loc->e = e;
2248 loc->si = si;
2249 return;
2250 }
2251 }
2252
2253 /* Update the last node of the list and move to the next one. */
2254 last_loc = loc;
2255 loc = loc->next;
2256 }
2257
2258 /* If we didn't find an assertion already registered for
2259 NAME COMP_CODE VAL, add a new one at the end of the list of
2260 assertions associated with NAME. */
2261 n = XNEW (struct assert_locus);
2262 n->bb = dest_bb;
2263 n->e = e;
2264 n->si = si;
2265 n->comp_code = comp_code;
2266 n->val = val;
2267 n->expr = expr;
2268 n->next = NULL;
2269
2270 if (last_loc)
2271 last_loc->next = n;
2272 else
2273 asserts_for[SSA_NAME_VERSION (name)] = n;
2274
2275 bitmap_set_bit (need_assert_for, SSA_NAME_VERSION (name));
2276 }
2277
2278 /* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME.
2279 Extract a suitable test code and value and store them into *CODE_P and
2280 *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P.
2281
2282 If no extraction was possible, return FALSE, otherwise return TRUE.
2283
2284 If INVERT is true, then we invert the result stored into *CODE_P. */
2285
2286 static bool
2287 extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code,
2288 tree cond_op0, tree cond_op1,
2289 bool invert, enum tree_code *code_p,
2290 tree *val_p)
2291 {
2292 enum tree_code comp_code;
2293 tree val;
2294
2295 /* Otherwise, we have a comparison of the form NAME COMP VAL
2296 or VAL COMP NAME. */
2297 if (name == cond_op1)
2298 {
2299 /* If the predicate is of the form VAL COMP NAME, flip
2300 COMP around because we need to register NAME as the
2301 first operand in the predicate. */
2302 comp_code = swap_tree_comparison (cond_code);
2303 val = cond_op0;
2304 }
2305 else if (name == cond_op0)
2306 {
2307 /* The comparison is of the form NAME COMP VAL, so the
2308 comparison code remains unchanged. */
2309 comp_code = cond_code;
2310 val = cond_op1;
2311 }
2312 else
2313 gcc_unreachable ();
2314
2315 /* Invert the comparison code as necessary. */
2316 if (invert)
2317 comp_code = invert_tree_comparison (comp_code, 0);
2318
2319 /* VRP only handles integral and pointer types. */
2320 if (! INTEGRAL_TYPE_P (TREE_TYPE (val))
2321 && ! POINTER_TYPE_P (TREE_TYPE (val)))
2322 return false;
2323
2324 /* Do not register always-false predicates.
2325 FIXME: this works around a limitation in fold() when dealing with
2326 enumerations. Given 'enum { N1, N2 } x;', fold will not
2327 fold 'if (x > N2)' to 'if (0)'. */
2328 if ((comp_code == GT_EXPR || comp_code == LT_EXPR)
2329 && INTEGRAL_TYPE_P (TREE_TYPE (val)))
2330 {
2331 tree min = TYPE_MIN_VALUE (TREE_TYPE (val));
2332 tree max = TYPE_MAX_VALUE (TREE_TYPE (val));
2333
2334 if (comp_code == GT_EXPR
2335 && (!max
2336 || compare_values (val, max) == 0))
2337 return false;
2338
2339 if (comp_code == LT_EXPR
2340 && (!min
2341 || compare_values (val, min) == 0))
2342 return false;
2343 }
2344 *code_p = comp_code;
2345 *val_p = val;
2346 return true;
2347 }
2348
2349 /* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any
2350 (otherwise return VAL). VAL and MASK must be zero-extended for
2351 precision PREC. If SGNBIT is non-zero, first xor VAL with SGNBIT
2352 (to transform signed values into unsigned) and at the end xor
2353 SGNBIT back. */
2354
2355 static wide_int
2356 masked_increment (const wide_int &val_in, const wide_int &mask,
2357 const wide_int &sgnbit, unsigned int prec)
2358 {
2359 wide_int bit = wi::one (prec), res;
2360 unsigned int i;
2361
2362 wide_int val = val_in ^ sgnbit;
2363 for (i = 0; i < prec; i++, bit += bit)
2364 {
2365 res = mask;
2366 if ((res & bit) == 0)
2367 continue;
2368 res = bit - 1;
2369 res = wi::bit_and_not (val + bit, res);
2370 res &= mask;
2371 if (wi::gtu_p (res, val))
2372 return res ^ sgnbit;
2373 }
2374 return val ^ sgnbit;
2375 }
2376
2377 /* Helper for overflow_comparison_p
2378
2379 OP0 CODE OP1 is a comparison. Examine the comparison and potentially
2380 OP1's defining statement to see if it ultimately has the form
2381 OP0 CODE (OP0 PLUS INTEGER_CST)
2382
2383 If so, return TRUE indicating this is an overflow test and store into
2384 *NEW_CST an updated constant that can be used in a narrowed range test.
2385
2386 REVERSED indicates if the comparison was originally:
2387
2388 OP1 CODE' OP0.
2389
2390 This affects how we build the updated constant. */
2391
2392 static bool
2393 overflow_comparison_p_1 (enum tree_code code, tree op0, tree op1,
2394 bool follow_assert_exprs, bool reversed, tree *new_cst)
2395 {
2396 /* See if this is a relational operation between two SSA_NAMES with
2397 unsigned, overflow wrapping values. If so, check it more deeply. */
2398 if ((code == LT_EXPR || code == LE_EXPR
2399 || code == GE_EXPR || code == GT_EXPR)
2400 && TREE_CODE (op0) == SSA_NAME
2401 && TREE_CODE (op1) == SSA_NAME
2402 && INTEGRAL_TYPE_P (TREE_TYPE (op0))
2403 && TYPE_UNSIGNED (TREE_TYPE (op0))
2404 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (op0)))
2405 {
2406 gimple *op1_def = SSA_NAME_DEF_STMT (op1);
2407
2408 /* If requested, follow any ASSERT_EXPRs backwards for OP1. */
2409 if (follow_assert_exprs)
2410 {
2411 while (gimple_assign_single_p (op1_def)
2412 && TREE_CODE (gimple_assign_rhs1 (op1_def)) == ASSERT_EXPR)
2413 {
2414 op1 = TREE_OPERAND (gimple_assign_rhs1 (op1_def), 0);
2415 if (TREE_CODE (op1) != SSA_NAME)
2416 break;
2417 op1_def = SSA_NAME_DEF_STMT (op1);
2418 }
2419 }
2420
2421 /* Now look at the defining statement of OP1 to see if it adds
2422 or subtracts a nonzero constant from another operand. */
2423 if (op1_def
2424 && is_gimple_assign (op1_def)
2425 && gimple_assign_rhs_code (op1_def) == PLUS_EXPR
2426 && TREE_CODE (gimple_assign_rhs2 (op1_def)) == INTEGER_CST
2427 && !integer_zerop (gimple_assign_rhs2 (op1_def)))
2428 {
2429 tree target = gimple_assign_rhs1 (op1_def);
2430
2431 /* If requested, follow ASSERT_EXPRs backwards for op0 looking
2432 for one where TARGET appears on the RHS. */
2433 if (follow_assert_exprs)
2434 {
2435 /* Now see if that "other operand" is op0, following the chain
2436 of ASSERT_EXPRs if necessary. */
2437 gimple *op0_def = SSA_NAME_DEF_STMT (op0);
2438 while (op0 != target
2439 && gimple_assign_single_p (op0_def)
2440 && TREE_CODE (gimple_assign_rhs1 (op0_def)) == ASSERT_EXPR)
2441 {
2442 op0 = TREE_OPERAND (gimple_assign_rhs1 (op0_def), 0);
2443 if (TREE_CODE (op0) != SSA_NAME)
2444 break;
2445 op0_def = SSA_NAME_DEF_STMT (op0);
2446 }
2447 }
2448
2449 /* If we did not find our target SSA_NAME, then this is not
2450 an overflow test. */
2451 if (op0 != target)
2452 return false;
2453
2454 tree type = TREE_TYPE (op0);
2455 wide_int max = wi::max_value (TYPE_PRECISION (type), UNSIGNED);
2456 tree inc = gimple_assign_rhs2 (op1_def);
2457 if (reversed)
2458 *new_cst = wide_int_to_tree (type, max + wi::to_wide (inc));
2459 else
2460 *new_cst = wide_int_to_tree (type, max - wi::to_wide (inc));
2461 return true;
2462 }
2463 }
2464 return false;
2465 }
2466
2467 /* OP0 CODE OP1 is a comparison. Examine the comparison and potentially
2468 OP1's defining statement to see if it ultimately has the form
2469 OP0 CODE (OP0 PLUS INTEGER_CST)
2470
2471 If so, return TRUE indicating this is an overflow test and store into
2472 *NEW_CST an updated constant that can be used in a narrowed range test.
2473
2474 These statements are left as-is in the IL to facilitate discovery of
2475 {ADD,SUB}_OVERFLOW sequences later in the optimizer pipeline. But
2476 the alternate range representation is often useful within VRP. */
2477
2478 bool
2479 overflow_comparison_p (tree_code code, tree name, tree val,
2480 bool use_equiv_p, tree *new_cst)
2481 {
2482 if (overflow_comparison_p_1 (code, name, val, use_equiv_p, false, new_cst))
2483 return true;
2484 return overflow_comparison_p_1 (swap_tree_comparison (code), val, name,
2485 use_equiv_p, true, new_cst);
2486 }
2487
2488
2489 /* Try to register an edge assertion for SSA name NAME on edge E for
2490 the condition COND contributing to the conditional jump pointed to by BSI.
2491 Invert the condition COND if INVERT is true. */
2492
2493 static void
2494 register_edge_assert_for_2 (tree name, edge e,
2495 enum tree_code cond_code,
2496 tree cond_op0, tree cond_op1, bool invert,
2497 vec<assert_info> &asserts)
2498 {
2499 tree val;
2500 enum tree_code comp_code;
2501
2502 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
2503 cond_op0,
2504 cond_op1,
2505 invert, &comp_code, &val))
2506 return;
2507
2508 /* Queue the assert. */
2509 tree x;
2510 if (overflow_comparison_p (comp_code, name, val, false, &x))
2511 {
2512 enum tree_code new_code = ((comp_code == GT_EXPR || comp_code == GE_EXPR)
2513 ? GT_EXPR : LE_EXPR);
2514 add_assert_info (asserts, name, name, new_code, x);
2515 }
2516 add_assert_info (asserts, name, name, comp_code, val);
2517
2518 /* In the case of NAME <= CST and NAME being defined as
2519 NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2
2520 and NAME2 <= CST - CST2. We can do the same for NAME > CST.
2521 This catches range and anti-range tests. */
2522 if ((comp_code == LE_EXPR
2523 || comp_code == GT_EXPR)
2524 && TREE_CODE (val) == INTEGER_CST
2525 && TYPE_UNSIGNED (TREE_TYPE (val)))
2526 {
2527 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
2528 tree cst2 = NULL_TREE, name2 = NULL_TREE, name3 = NULL_TREE;
2529
2530 /* Extract CST2 from the (optional) addition. */
2531 if (is_gimple_assign (def_stmt)
2532 && gimple_assign_rhs_code (def_stmt) == PLUS_EXPR)
2533 {
2534 name2 = gimple_assign_rhs1 (def_stmt);
2535 cst2 = gimple_assign_rhs2 (def_stmt);
2536 if (TREE_CODE (name2) == SSA_NAME
2537 && TREE_CODE (cst2) == INTEGER_CST)
2538 def_stmt = SSA_NAME_DEF_STMT (name2);
2539 }
2540
2541 /* Extract NAME2 from the (optional) sign-changing cast. */
2542 if (gimple_assign_cast_p (def_stmt))
2543 {
2544 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt))
2545 && ! TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
2546 && (TYPE_PRECISION (gimple_expr_type (def_stmt))
2547 == TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))))
2548 name3 = gimple_assign_rhs1 (def_stmt);
2549 }
2550
2551 /* If name3 is used later, create an ASSERT_EXPR for it. */
2552 if (name3 != NULL_TREE
2553 && TREE_CODE (name3) == SSA_NAME
2554 && (cst2 == NULL_TREE
2555 || TREE_CODE (cst2) == INTEGER_CST)
2556 && INTEGRAL_TYPE_P (TREE_TYPE (name3)))
2557 {
2558 tree tmp;
2559
2560 /* Build an expression for the range test. */
2561 tmp = build1 (NOP_EXPR, TREE_TYPE (name), name3);
2562 if (cst2 != NULL_TREE)
2563 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
2564
2565 if (dump_file)
2566 {
2567 fprintf (dump_file, "Adding assert for ");
2568 print_generic_expr (dump_file, name3);
2569 fprintf (dump_file, " from ");
2570 print_generic_expr (dump_file, tmp);
2571 fprintf (dump_file, "\n");
2572 }
2573
2574 add_assert_info (asserts, name3, tmp, comp_code, val);
2575 }
2576
2577 /* If name2 is used later, create an ASSERT_EXPR for it. */
2578 if (name2 != NULL_TREE
2579 && TREE_CODE (name2) == SSA_NAME
2580 && TREE_CODE (cst2) == INTEGER_CST
2581 && INTEGRAL_TYPE_P (TREE_TYPE (name2)))
2582 {
2583 tree tmp;
2584
2585 /* Build an expression for the range test. */
2586 tmp = name2;
2587 if (TREE_TYPE (name) != TREE_TYPE (name2))
2588 tmp = build1 (NOP_EXPR, TREE_TYPE (name), tmp);
2589 if (cst2 != NULL_TREE)
2590 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
2591
2592 if (dump_file)
2593 {
2594 fprintf (dump_file, "Adding assert for ");
2595 print_generic_expr (dump_file, name2);
2596 fprintf (dump_file, " from ");
2597 print_generic_expr (dump_file, tmp);
2598 fprintf (dump_file, "\n");
2599 }
2600
2601 add_assert_info (asserts, name2, tmp, comp_code, val);
2602 }
2603 }
2604
2605 /* In the case of post-in/decrement tests like if (i++) ... and uses
2606 of the in/decremented value on the edge the extra name we want to
2607 assert for is not on the def chain of the name compared. Instead
2608 it is in the set of use stmts.
2609 Similar cases happen for conversions that were simplified through
2610 fold_{sign_changed,widened}_comparison. */
2611 if ((comp_code == NE_EXPR
2612 || comp_code == EQ_EXPR)
2613 && TREE_CODE (val) == INTEGER_CST)
2614 {
2615 imm_use_iterator ui;
2616 gimple *use_stmt;
2617 FOR_EACH_IMM_USE_STMT (use_stmt, ui, name)
2618 {
2619 if (!is_gimple_assign (use_stmt))
2620 continue;
2621
2622 /* Cut off to use-stmts that are dominating the predecessor. */
2623 if (!dominated_by_p (CDI_DOMINATORS, e->src, gimple_bb (use_stmt)))
2624 continue;
2625
2626 tree name2 = gimple_assign_lhs (use_stmt);
2627 if (TREE_CODE (name2) != SSA_NAME)
2628 continue;
2629
2630 enum tree_code code = gimple_assign_rhs_code (use_stmt);
2631 tree cst;
2632 if (code == PLUS_EXPR
2633 || code == MINUS_EXPR)
2634 {
2635 cst = gimple_assign_rhs2 (use_stmt);
2636 if (TREE_CODE (cst) != INTEGER_CST)
2637 continue;
2638 cst = int_const_binop (code, val, cst);
2639 }
2640 else if (CONVERT_EXPR_CODE_P (code))
2641 {
2642 /* For truncating conversions we cannot record
2643 an inequality. */
2644 if (comp_code == NE_EXPR
2645 && (TYPE_PRECISION (TREE_TYPE (name2))
2646 < TYPE_PRECISION (TREE_TYPE (name))))
2647 continue;
2648 cst = fold_convert (TREE_TYPE (name2), val);
2649 }
2650 else
2651 continue;
2652
2653 if (TREE_OVERFLOW_P (cst))
2654 cst = drop_tree_overflow (cst);
2655 add_assert_info (asserts, name2, name2, comp_code, cst);
2656 }
2657 }
2658
2659 if (TREE_CODE_CLASS (comp_code) == tcc_comparison
2660 && TREE_CODE (val) == INTEGER_CST)
2661 {
2662 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
2663 tree name2 = NULL_TREE, names[2], cst2 = NULL_TREE;
2664 tree val2 = NULL_TREE;
2665 unsigned int prec = TYPE_PRECISION (TREE_TYPE (val));
2666 wide_int mask = wi::zero (prec);
2667 unsigned int nprec = prec;
2668 enum tree_code rhs_code = ERROR_MARK;
2669
2670 if (is_gimple_assign (def_stmt))
2671 rhs_code = gimple_assign_rhs_code (def_stmt);
2672
2673 /* In the case of NAME != CST1 where NAME = A +- CST2 we can
2674 assert that A != CST1 -+ CST2. */
2675 if ((comp_code == EQ_EXPR || comp_code == NE_EXPR)
2676 && (rhs_code == PLUS_EXPR || rhs_code == MINUS_EXPR))
2677 {
2678 tree op0 = gimple_assign_rhs1 (def_stmt);
2679 tree op1 = gimple_assign_rhs2 (def_stmt);
2680 if (TREE_CODE (op0) == SSA_NAME
2681 && TREE_CODE (op1) == INTEGER_CST)
2682 {
2683 enum tree_code reverse_op = (rhs_code == PLUS_EXPR
2684 ? MINUS_EXPR : PLUS_EXPR);
2685 op1 = int_const_binop (reverse_op, val, op1);
2686 if (TREE_OVERFLOW (op1))
2687 op1 = drop_tree_overflow (op1);
2688 add_assert_info (asserts, op0, op0, comp_code, op1);
2689 }
2690 }
2691
2692 /* Add asserts for NAME cmp CST and NAME being defined
2693 as NAME = (int) NAME2. */
2694 if (!TYPE_UNSIGNED (TREE_TYPE (val))
2695 && (comp_code == LE_EXPR || comp_code == LT_EXPR
2696 || comp_code == GT_EXPR || comp_code == GE_EXPR)
2697 && gimple_assign_cast_p (def_stmt))
2698 {
2699 name2 = gimple_assign_rhs1 (def_stmt);
2700 if (CONVERT_EXPR_CODE_P (rhs_code)
2701 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
2702 && TYPE_UNSIGNED (TREE_TYPE (name2))
2703 && prec == TYPE_PRECISION (TREE_TYPE (name2))
2704 && (comp_code == LE_EXPR || comp_code == GT_EXPR
2705 || !tree_int_cst_equal (val,
2706 TYPE_MIN_VALUE (TREE_TYPE (val)))))
2707 {
2708 tree tmp, cst;
2709 enum tree_code new_comp_code = comp_code;
2710
2711 cst = fold_convert (TREE_TYPE (name2),
2712 TYPE_MIN_VALUE (TREE_TYPE (val)));
2713 /* Build an expression for the range test. */
2714 tmp = build2 (PLUS_EXPR, TREE_TYPE (name2), name2, cst);
2715 cst = fold_build2 (PLUS_EXPR, TREE_TYPE (name2), cst,
2716 fold_convert (TREE_TYPE (name2), val));
2717 if (comp_code == LT_EXPR || comp_code == GE_EXPR)
2718 {
2719 new_comp_code = comp_code == LT_EXPR ? LE_EXPR : GT_EXPR;
2720 cst = fold_build2 (MINUS_EXPR, TREE_TYPE (name2), cst,
2721 build_int_cst (TREE_TYPE (name2), 1));
2722 }
2723
2724 if (dump_file)
2725 {
2726 fprintf (dump_file, "Adding assert for ");
2727 print_generic_expr (dump_file, name2);
2728 fprintf (dump_file, " from ");
2729 print_generic_expr (dump_file, tmp);
2730 fprintf (dump_file, "\n");
2731 }
2732
2733 add_assert_info (asserts, name2, tmp, new_comp_code, cst);
2734 }
2735 }
2736
2737 /* Add asserts for NAME cmp CST and NAME being defined as
2738 NAME = NAME2 >> CST2.
2739
2740 Extract CST2 from the right shift. */
2741 if (rhs_code == RSHIFT_EXPR)
2742 {
2743 name2 = gimple_assign_rhs1 (def_stmt);
2744 cst2 = gimple_assign_rhs2 (def_stmt);
2745 if (TREE_CODE (name2) == SSA_NAME
2746 && tree_fits_uhwi_p (cst2)
2747 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
2748 && IN_RANGE (tree_to_uhwi (cst2), 1, prec - 1)
2749 && type_has_mode_precision_p (TREE_TYPE (val)))
2750 {
2751 mask = wi::mask (tree_to_uhwi (cst2), false, prec);
2752 val2 = fold_binary (LSHIFT_EXPR, TREE_TYPE (val), val, cst2);
2753 }
2754 }
2755 if (val2 != NULL_TREE
2756 && TREE_CODE (val2) == INTEGER_CST
2757 && simple_cst_equal (fold_build2 (RSHIFT_EXPR,
2758 TREE_TYPE (val),
2759 val2, cst2), val))
2760 {
2761 enum tree_code new_comp_code = comp_code;
2762 tree tmp, new_val;
2763
2764 tmp = name2;
2765 if (comp_code == EQ_EXPR || comp_code == NE_EXPR)
2766 {
2767 if (!TYPE_UNSIGNED (TREE_TYPE (val)))
2768 {
2769 tree type = build_nonstandard_integer_type (prec, 1);
2770 tmp = build1 (NOP_EXPR, type, name2);
2771 val2 = fold_convert (type, val2);
2772 }
2773 tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (tmp), tmp, val2);
2774 new_val = wide_int_to_tree (TREE_TYPE (tmp), mask);
2775 new_comp_code = comp_code == EQ_EXPR ? LE_EXPR : GT_EXPR;
2776 }
2777 else if (comp_code == LT_EXPR || comp_code == GE_EXPR)
2778 {
2779 wide_int minval
2780 = wi::min_value (prec, TYPE_SIGN (TREE_TYPE (val)));
2781 new_val = val2;
2782 if (minval == wi::to_wide (new_val))
2783 new_val = NULL_TREE;
2784 }
2785 else
2786 {
2787 wide_int maxval
2788 = wi::max_value (prec, TYPE_SIGN (TREE_TYPE (val)));
2789 mask |= wi::to_wide (val2);
2790 if (wi::eq_p (mask, maxval))
2791 new_val = NULL_TREE;
2792 else
2793 new_val = wide_int_to_tree (TREE_TYPE (val2), mask);
2794 }
2795
2796 if (new_val)
2797 {
2798 if (dump_file)
2799 {
2800 fprintf (dump_file, "Adding assert for ");
2801 print_generic_expr (dump_file, name2);
2802 fprintf (dump_file, " from ");
2803 print_generic_expr (dump_file, tmp);
2804 fprintf (dump_file, "\n");
2805 }
2806
2807 add_assert_info (asserts, name2, tmp, new_comp_code, new_val);
2808 }
2809 }
2810
2811 /* Add asserts for NAME cmp CST and NAME being defined as
2812 NAME = NAME2 & CST2.
2813
2814 Extract CST2 from the and.
2815
2816 Also handle
2817 NAME = (unsigned) NAME2;
2818 casts where NAME's type is unsigned and has smaller precision
2819 than NAME2's type as if it was NAME = NAME2 & MASK. */
2820 names[0] = NULL_TREE;
2821 names[1] = NULL_TREE;
2822 cst2 = NULL_TREE;
2823 if (rhs_code == BIT_AND_EXPR
2824 || (CONVERT_EXPR_CODE_P (rhs_code)
2825 && INTEGRAL_TYPE_P (TREE_TYPE (val))
2826 && TYPE_UNSIGNED (TREE_TYPE (val))
2827 && TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
2828 > prec))
2829 {
2830 name2 = gimple_assign_rhs1 (def_stmt);
2831 if (rhs_code == BIT_AND_EXPR)
2832 cst2 = gimple_assign_rhs2 (def_stmt);
2833 else
2834 {
2835 cst2 = TYPE_MAX_VALUE (TREE_TYPE (val));
2836 nprec = TYPE_PRECISION (TREE_TYPE (name2));
2837 }
2838 if (TREE_CODE (name2) == SSA_NAME
2839 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
2840 && TREE_CODE (cst2) == INTEGER_CST
2841 && !integer_zerop (cst2)
2842 && (nprec > 1
2843 || TYPE_UNSIGNED (TREE_TYPE (val))))
2844 {
2845 gimple *def_stmt2 = SSA_NAME_DEF_STMT (name2);
2846 if (gimple_assign_cast_p (def_stmt2))
2847 {
2848 names[1] = gimple_assign_rhs1 (def_stmt2);
2849 if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt2))
2850 || !INTEGRAL_TYPE_P (TREE_TYPE (names[1]))
2851 || (TYPE_PRECISION (TREE_TYPE (name2))
2852 != TYPE_PRECISION (TREE_TYPE (names[1]))))
2853 names[1] = NULL_TREE;
2854 }
2855 names[0] = name2;
2856 }
2857 }
2858 if (names[0] || names[1])
2859 {
2860 wide_int minv, maxv, valv, cst2v;
2861 wide_int tem, sgnbit;
2862 bool valid_p = false, valn, cst2n;
2863 enum tree_code ccode = comp_code;
2864
2865 valv = wide_int::from (wi::to_wide (val), nprec, UNSIGNED);
2866 cst2v = wide_int::from (wi::to_wide (cst2), nprec, UNSIGNED);
2867 valn = wi::neg_p (valv, TYPE_SIGN (TREE_TYPE (val)));
2868 cst2n = wi::neg_p (cst2v, TYPE_SIGN (TREE_TYPE (val)));
2869 /* If CST2 doesn't have most significant bit set,
2870 but VAL is negative, we have comparison like
2871 if ((x & 0x123) > -4) (always true). Just give up. */
2872 if (!cst2n && valn)
2873 ccode = ERROR_MARK;
2874 if (cst2n)
2875 sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
2876 else
2877 sgnbit = wi::zero (nprec);
2878 minv = valv & cst2v;
2879 switch (ccode)
2880 {
2881 case EQ_EXPR:
2882 /* Minimum unsigned value for equality is VAL & CST2
2883 (should be equal to VAL, otherwise we probably should
2884 have folded the comparison into false) and
2885 maximum unsigned value is VAL | ~CST2. */
2886 maxv = valv | ~cst2v;
2887 valid_p = true;
2888 break;
2889
2890 case NE_EXPR:
2891 tem = valv | ~cst2v;
2892 /* If VAL is 0, handle (X & CST2) != 0 as (X & CST2) > 0U. */
2893 if (valv == 0)
2894 {
2895 cst2n = false;
2896 sgnbit = wi::zero (nprec);
2897 goto gt_expr;
2898 }
2899 /* If (VAL | ~CST2) is all ones, handle it as
2900 (X & CST2) < VAL. */
2901 if (tem == -1)
2902 {
2903 cst2n = false;
2904 valn = false;
2905 sgnbit = wi::zero (nprec);
2906 goto lt_expr;
2907 }
2908 if (!cst2n && wi::neg_p (cst2v))
2909 sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
2910 if (sgnbit != 0)
2911 {
2912 if (valv == sgnbit)
2913 {
2914 cst2n = true;
2915 valn = true;
2916 goto gt_expr;
2917 }
2918 if (tem == wi::mask (nprec - 1, false, nprec))
2919 {
2920 cst2n = true;
2921 goto lt_expr;
2922 }
2923 if (!cst2n)
2924 sgnbit = wi::zero (nprec);
2925 }
2926 break;
2927
2928 case GE_EXPR:
2929 /* Minimum unsigned value for >= if (VAL & CST2) == VAL
2930 is VAL and maximum unsigned value is ~0. For signed
2931 comparison, if CST2 doesn't have most significant bit
2932 set, handle it similarly. If CST2 has MSB set,
2933 the minimum is the same, and maximum is ~0U/2. */
2934 if (minv != valv)
2935 {
2936 /* If (VAL & CST2) != VAL, X & CST2 can't be equal to
2937 VAL. */
2938 minv = masked_increment (valv, cst2v, sgnbit, nprec);
2939 if (minv == valv)
2940 break;
2941 }
2942 maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
2943 valid_p = true;
2944 break;
2945
2946 case GT_EXPR:
2947 gt_expr:
2948 /* Find out smallest MINV where MINV > VAL
2949 && (MINV & CST2) == MINV, if any. If VAL is signed and
2950 CST2 has MSB set, compute it biased by 1 << (nprec - 1). */
2951 minv = masked_increment (valv, cst2v, sgnbit, nprec);
2952 if (minv == valv)
2953 break;
2954 maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
2955 valid_p = true;
2956 break;
2957
2958 case LE_EXPR:
2959 /* Minimum unsigned value for <= is 0 and maximum
2960 unsigned value is VAL | ~CST2 if (VAL & CST2) == VAL.
2961 Otherwise, find smallest VAL2 where VAL2 > VAL
2962 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
2963 as maximum.
2964 For signed comparison, if CST2 doesn't have most
2965 significant bit set, handle it similarly. If CST2 has
2966 MSB set, the maximum is the same and minimum is INT_MIN. */
2967 if (minv == valv)
2968 maxv = valv;
2969 else
2970 {
2971 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
2972 if (maxv == valv)
2973 break;
2974 maxv -= 1;
2975 }
2976 maxv |= ~cst2v;
2977 minv = sgnbit;
2978 valid_p = true;
2979 break;
2980
2981 case LT_EXPR:
2982 lt_expr:
2983 /* Minimum unsigned value for < is 0 and maximum
2984 unsigned value is (VAL-1) | ~CST2 if (VAL & CST2) == VAL.
2985 Otherwise, find smallest VAL2 where VAL2 > VAL
2986 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
2987 as maximum.
2988 For signed comparison, if CST2 doesn't have most
2989 significant bit set, handle it similarly. If CST2 has
2990 MSB set, the maximum is the same and minimum is INT_MIN. */
2991 if (minv == valv)
2992 {
2993 if (valv == sgnbit)
2994 break;
2995 maxv = valv;
2996 }
2997 else
2998 {
2999 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
3000 if (maxv == valv)
3001 break;
3002 }
3003 maxv -= 1;
3004 maxv |= ~cst2v;
3005 minv = sgnbit;
3006 valid_p = true;
3007 break;
3008
3009 default:
3010 break;
3011 }
3012 if (valid_p
3013 && (maxv - minv) != -1)
3014 {
3015 tree tmp, new_val, type;
3016 int i;
3017
3018 for (i = 0; i < 2; i++)
3019 if (names[i])
3020 {
3021 wide_int maxv2 = maxv;
3022 tmp = names[i];
3023 type = TREE_TYPE (names[i]);
3024 if (!TYPE_UNSIGNED (type))
3025 {
3026 type = build_nonstandard_integer_type (nprec, 1);
3027 tmp = build1 (NOP_EXPR, type, names[i]);
3028 }
3029 if (minv != 0)
3030 {
3031 tmp = build2 (PLUS_EXPR, type, tmp,
3032 wide_int_to_tree (type, -minv));
3033 maxv2 = maxv - minv;
3034 }
3035 new_val = wide_int_to_tree (type, maxv2);
3036
3037 if (dump_file)
3038 {
3039 fprintf (dump_file, "Adding assert for ");
3040 print_generic_expr (dump_file, names[i]);
3041 fprintf (dump_file, " from ");
3042 print_generic_expr (dump_file, tmp);
3043 fprintf (dump_file, "\n");
3044 }
3045
3046 add_assert_info (asserts, names[i], tmp, LE_EXPR, new_val);
3047 }
3048 }
3049 }
3050 }
3051 }
3052
3053 /* OP is an operand of a truth value expression which is known to have
3054 a particular value. Register any asserts for OP and for any
3055 operands in OP's defining statement.
3056
3057 If CODE is EQ_EXPR, then we want to register OP is zero (false),
3058 if CODE is NE_EXPR, then we want to register OP is nonzero (true). */
3059
3060 static void
3061 register_edge_assert_for_1 (tree op, enum tree_code code,
3062 edge e, vec<assert_info> &asserts)
3063 {
3064 gimple *op_def;
3065 tree val;
3066 enum tree_code rhs_code;
3067
3068 /* We only care about SSA_NAMEs. */
3069 if (TREE_CODE (op) != SSA_NAME)
3070 return;
3071
3072 /* We know that OP will have a zero or nonzero value. */
3073 val = build_int_cst (TREE_TYPE (op), 0);
3074 add_assert_info (asserts, op, op, code, val);
3075
3076 /* Now look at how OP is set. If it's set from a comparison,
3077 a truth operation or some bit operations, then we may be able
3078 to register information about the operands of that assignment. */
3079 op_def = SSA_NAME_DEF_STMT (op);
3080 if (gimple_code (op_def) != GIMPLE_ASSIGN)
3081 return;
3082
3083 rhs_code = gimple_assign_rhs_code (op_def);
3084
3085 if (TREE_CODE_CLASS (rhs_code) == tcc_comparison)
3086 {
3087 bool invert = (code == EQ_EXPR ? true : false);
3088 tree op0 = gimple_assign_rhs1 (op_def);
3089 tree op1 = gimple_assign_rhs2 (op_def);
3090
3091 if (TREE_CODE (op0) == SSA_NAME)
3092 register_edge_assert_for_2 (op0, e, rhs_code, op0, op1, invert, asserts);
3093 if (TREE_CODE (op1) == SSA_NAME)
3094 register_edge_assert_for_2 (op1, e, rhs_code, op0, op1, invert, asserts);
3095 }
3096 else if ((code == NE_EXPR
3097 && gimple_assign_rhs_code (op_def) == BIT_AND_EXPR)
3098 || (code == EQ_EXPR
3099 && gimple_assign_rhs_code (op_def) == BIT_IOR_EXPR))
3100 {
3101 /* Recurse on each operand. */
3102 tree op0 = gimple_assign_rhs1 (op_def);
3103 tree op1 = gimple_assign_rhs2 (op_def);
3104 if (TREE_CODE (op0) == SSA_NAME
3105 && has_single_use (op0))
3106 register_edge_assert_for_1 (op0, code, e, asserts);
3107 if (TREE_CODE (op1) == SSA_NAME
3108 && has_single_use (op1))
3109 register_edge_assert_for_1 (op1, code, e, asserts);
3110 }
3111 else if (gimple_assign_rhs_code (op_def) == BIT_NOT_EXPR
3112 && TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (op_def))) == 1)
3113 {
3114 /* Recurse, flipping CODE. */
3115 code = invert_tree_comparison (code, false);
3116 register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, asserts);
3117 }
3118 else if (gimple_assign_rhs_code (op_def) == SSA_NAME)
3119 {
3120 /* Recurse through the copy. */
3121 register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, asserts);
3122 }
3123 else if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (op_def)))
3124 {
3125 /* Recurse through the type conversion, unless it is a narrowing
3126 conversion or conversion from non-integral type. */
3127 tree rhs = gimple_assign_rhs1 (op_def);
3128 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs))
3129 && (TYPE_PRECISION (TREE_TYPE (rhs))
3130 <= TYPE_PRECISION (TREE_TYPE (op))))
3131 register_edge_assert_for_1 (rhs, code, e, asserts);
3132 }
3133 }
3134
3135 /* Check if comparison
3136 NAME COND_OP INTEGER_CST
3137 has a form of
3138 (X & 11...100..0) COND_OP XX...X00...0
3139 Such comparison can yield assertions like
3140 X >= XX...X00...0
3141 X <= XX...X11...1
3142 in case of COND_OP being EQ_EXPR or
3143 X < XX...X00...0
3144 X > XX...X11...1
3145 in case of NE_EXPR. */
3146
3147 static bool
3148 is_masked_range_test (tree name, tree valt, enum tree_code cond_code,
3149 tree *new_name, tree *low, enum tree_code *low_code,
3150 tree *high, enum tree_code *high_code)
3151 {
3152 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
3153
3154 if (!is_gimple_assign (def_stmt)
3155 || gimple_assign_rhs_code (def_stmt) != BIT_AND_EXPR)
3156 return false;
3157
3158 tree t = gimple_assign_rhs1 (def_stmt);
3159 tree maskt = gimple_assign_rhs2 (def_stmt);
3160 if (TREE_CODE (t) != SSA_NAME || TREE_CODE (maskt) != INTEGER_CST)
3161 return false;
3162
3163 wi::tree_to_wide_ref mask = wi::to_wide (maskt);
3164 wide_int inv_mask = ~mask;
3165 /* Must have been removed by now so don't bother optimizing. */
3166 if (mask == 0 || inv_mask == 0)
3167 return false;
3168
3169 /* Assume VALT is INTEGER_CST. */
3170 wi::tree_to_wide_ref val = wi::to_wide (valt);
3171
3172 if ((inv_mask & (inv_mask + 1)) != 0
3173 || (val & mask) != val)
3174 return false;
3175
3176 bool is_range = cond_code == EQ_EXPR;
3177
3178 tree type = TREE_TYPE (t);
3179 wide_int min = wi::min_value (type),
3180 max = wi::max_value (type);
3181
3182 if (is_range)
3183 {
3184 *low_code = val == min ? ERROR_MARK : GE_EXPR;
3185 *high_code = val == max ? ERROR_MARK : LE_EXPR;
3186 }
3187 else
3188 {
3189 /* We can still generate assertion if one of alternatives
3190 is known to always be false. */
3191 if (val == min)
3192 {
3193 *low_code = (enum tree_code) 0;
3194 *high_code = GT_EXPR;
3195 }
3196 else if ((val | inv_mask) == max)
3197 {
3198 *low_code = LT_EXPR;
3199 *high_code = (enum tree_code) 0;
3200 }
3201 else
3202 return false;
3203 }
3204
3205 *new_name = t;
3206 *low = wide_int_to_tree (type, val);
3207 *high = wide_int_to_tree (type, val | inv_mask);
3208
3209 return true;
3210 }
3211
3212 /* Try to register an edge assertion for SSA name NAME on edge E for
3213 the condition COND contributing to the conditional jump pointed to by
3214 SI. */
3215
3216 void
3217 register_edge_assert_for (tree name, edge e,
3218 enum tree_code cond_code, tree cond_op0,
3219 tree cond_op1, vec<assert_info> &asserts)
3220 {
3221 tree val;
3222 enum tree_code comp_code;
3223 bool is_else_edge = (e->flags & EDGE_FALSE_VALUE) != 0;
3224
3225 /* Do not attempt to infer anything in names that flow through
3226 abnormal edges. */
3227 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
3228 return;
3229
3230 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
3231 cond_op0, cond_op1,
3232 is_else_edge,
3233 &comp_code, &val))
3234 return;
3235
3236 /* Register ASSERT_EXPRs for name. */
3237 register_edge_assert_for_2 (name, e, cond_code, cond_op0,
3238 cond_op1, is_else_edge, asserts);
3239
3240
3241 /* If COND is effectively an equality test of an SSA_NAME against
3242 the value zero or one, then we may be able to assert values
3243 for SSA_NAMEs which flow into COND. */
3244
3245 /* In the case of NAME == 1 or NAME != 0, for BIT_AND_EXPR defining
3246 statement of NAME we can assert both operands of the BIT_AND_EXPR
3247 have nonzero value. */
3248 if (((comp_code == EQ_EXPR && integer_onep (val))
3249 || (comp_code == NE_EXPR && integer_zerop (val))))
3250 {
3251 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
3252
3253 if (is_gimple_assign (def_stmt)
3254 && gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR)
3255 {
3256 tree op0 = gimple_assign_rhs1 (def_stmt);
3257 tree op1 = gimple_assign_rhs2 (def_stmt);
3258 register_edge_assert_for_1 (op0, NE_EXPR, e, asserts);
3259 register_edge_assert_for_1 (op1, NE_EXPR, e, asserts);
3260 }
3261 }
3262
3263 /* In the case of NAME == 0 or NAME != 1, for BIT_IOR_EXPR defining
3264 statement of NAME we can assert both operands of the BIT_IOR_EXPR
3265 have zero value. */
3266 if (((comp_code == EQ_EXPR && integer_zerop (val))
3267 || (comp_code == NE_EXPR && integer_onep (val))))
3268 {
3269 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
3270
3271 /* For BIT_IOR_EXPR only if NAME == 0 both operands have
3272 necessarily zero value, or if type-precision is one. */
3273 if (is_gimple_assign (def_stmt)
3274 && (gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR
3275 && (TYPE_PRECISION (TREE_TYPE (name)) == 1
3276 || comp_code == EQ_EXPR)))
3277 {
3278 tree op0 = gimple_assign_rhs1 (def_stmt);
3279 tree op1 = gimple_assign_rhs2 (def_stmt);
3280 register_edge_assert_for_1 (op0, EQ_EXPR, e, asserts);
3281 register_edge_assert_for_1 (op1, EQ_EXPR, e, asserts);
3282 }
3283 }
3284
3285 /* Sometimes we can infer ranges from (NAME & MASK) == VALUE. */
3286 if ((comp_code == EQ_EXPR || comp_code == NE_EXPR)
3287 && TREE_CODE (val) == INTEGER_CST)
3288 {
3289 enum tree_code low_code, high_code;
3290 tree low, high;
3291 if (is_masked_range_test (name, val, comp_code, &name, &low,
3292 &low_code, &high, &high_code))
3293 {
3294 if (low_code != ERROR_MARK)
3295 register_edge_assert_for_2 (name, e, low_code, name,
3296 low, /*invert*/false, asserts);
3297 if (high_code != ERROR_MARK)
3298 register_edge_assert_for_2 (name, e, high_code, name,
3299 high, /*invert*/false, asserts);
3300 }
3301 }
3302 }
3303
3304 /* Finish found ASSERTS for E and register them at GSI. */
3305
3306 static void
3307 finish_register_edge_assert_for (edge e, gimple_stmt_iterator gsi,
3308 vec<assert_info> &asserts)
3309 {
3310 for (unsigned i = 0; i < asserts.length (); ++i)
3311 /* Only register an ASSERT_EXPR if NAME was found in the sub-graph
3312 reachable from E. */
3313 if (live_on_edge (e, asserts[i].name))
3314 register_new_assert_for (asserts[i].name, asserts[i].expr,
3315 asserts[i].comp_code, asserts[i].val,
3316 NULL, e, gsi);
3317 }
3318
3319
3320
3321 /* Determine whether the outgoing edges of BB should receive an
3322 ASSERT_EXPR for each of the operands of BB's LAST statement.
3323 The last statement of BB must be a COND_EXPR.
3324
3325 If any of the sub-graphs rooted at BB have an interesting use of
3326 the predicate operands, an assert location node is added to the
3327 list of assertions for the corresponding operands. */
3328
3329 static void
3330 find_conditional_asserts (basic_block bb, gcond *last)
3331 {
3332 gimple_stmt_iterator bsi;
3333 tree op;
3334 edge_iterator ei;
3335 edge e;
3336 ssa_op_iter iter;
3337
3338 bsi = gsi_for_stmt (last);
3339
3340 /* Look for uses of the operands in each of the sub-graphs
3341 rooted at BB. We need to check each of the outgoing edges
3342 separately, so that we know what kind of ASSERT_EXPR to
3343 insert. */
3344 FOR_EACH_EDGE (e, ei, bb->succs)
3345 {
3346 if (e->dest == bb)
3347 continue;
3348
3349 /* Register the necessary assertions for each operand in the
3350 conditional predicate. */
3351 auto_vec<assert_info, 8> asserts;
3352 FOR_EACH_SSA_TREE_OPERAND (op, last, iter, SSA_OP_USE)
3353 register_edge_assert_for (op, e,
3354 gimple_cond_code (last),
3355 gimple_cond_lhs (last),
3356 gimple_cond_rhs (last), asserts);
3357 finish_register_edge_assert_for (e, bsi, asserts);
3358 }
3359 }
3360
3361 struct case_info
3362 {
3363 tree expr;
3364 basic_block bb;
3365 };
3366
3367 /* Compare two case labels sorting first by the destination bb index
3368 and then by the case value. */
3369
3370 static int
3371 compare_case_labels (const void *p1, const void *p2)
3372 {
3373 const struct case_info *ci1 = (const struct case_info *) p1;
3374 const struct case_info *ci2 = (const struct case_info *) p2;
3375 int idx1 = ci1->bb->index;
3376 int idx2 = ci2->bb->index;
3377
3378 if (idx1 < idx2)
3379 return -1;
3380 else if (idx1 == idx2)
3381 {
3382 /* Make sure the default label is first in a group. */
3383 if (!CASE_LOW (ci1->expr))
3384 return -1;
3385 else if (!CASE_LOW (ci2->expr))
3386 return 1;
3387 else
3388 return tree_int_cst_compare (CASE_LOW (ci1->expr),
3389 CASE_LOW (ci2->expr));
3390 }
3391 else
3392 return 1;
3393 }
3394
3395 /* Determine whether the outgoing edges of BB should receive an
3396 ASSERT_EXPR for each of the operands of BB's LAST statement.
3397 The last statement of BB must be a SWITCH_EXPR.
3398
3399 If any of the sub-graphs rooted at BB have an interesting use of
3400 the predicate operands, an assert location node is added to the
3401 list of assertions for the corresponding operands. */
3402
3403 static void
3404 find_switch_asserts (basic_block bb, gswitch *last)
3405 {
3406 gimple_stmt_iterator bsi;
3407 tree op;
3408 edge e;
3409 struct case_info *ci;
3410 size_t n = gimple_switch_num_labels (last);
3411 #if GCC_VERSION >= 4000
3412 unsigned int idx;
3413 #else
3414 /* Work around GCC 3.4 bug (PR 37086). */
3415 volatile unsigned int idx;
3416 #endif
3417
3418 bsi = gsi_for_stmt (last);
3419 op = gimple_switch_index (last);
3420 if (TREE_CODE (op) != SSA_NAME)
3421 return;
3422
3423 /* Build a vector of case labels sorted by destination label. */
3424 ci = XNEWVEC (struct case_info, n);
3425 for (idx = 0; idx < n; ++idx)
3426 {
3427 ci[idx].expr = gimple_switch_label (last, idx);
3428 ci[idx].bb = label_to_block (cfun, CASE_LABEL (ci[idx].expr));
3429 }
3430 edge default_edge = find_edge (bb, ci[0].bb);
3431 qsort (ci, n, sizeof (struct case_info), compare_case_labels);
3432
3433 for (idx = 0; idx < n; ++idx)
3434 {
3435 tree min, max;
3436 tree cl = ci[idx].expr;
3437 basic_block cbb = ci[idx].bb;
3438
3439 min = CASE_LOW (cl);
3440 max = CASE_HIGH (cl);
3441
3442 /* If there are multiple case labels with the same destination
3443 we need to combine them to a single value range for the edge. */
3444 if (idx + 1 < n && cbb == ci[idx + 1].bb)
3445 {
3446 /* Skip labels until the last of the group. */
3447 do {
3448 ++idx;
3449 } while (idx < n && cbb == ci[idx].bb);
3450 --idx;
3451
3452 /* Pick up the maximum of the case label range. */
3453 if (CASE_HIGH (ci[idx].expr))
3454 max = CASE_HIGH (ci[idx].expr);
3455 else
3456 max = CASE_LOW (ci[idx].expr);
3457 }
3458
3459 /* Can't extract a useful assertion out of a range that includes the
3460 default label. */
3461 if (min == NULL_TREE)
3462 continue;
3463
3464 /* Find the edge to register the assert expr on. */
3465 e = find_edge (bb, cbb);
3466
3467 /* Register the necessary assertions for the operand in the
3468 SWITCH_EXPR. */
3469 auto_vec<assert_info, 8> asserts;
3470 register_edge_assert_for (op, e,
3471 max ? GE_EXPR : EQ_EXPR,
3472 op, fold_convert (TREE_TYPE (op), min),
3473 asserts);
3474 if (max)
3475 register_edge_assert_for (op, e, LE_EXPR, op,
3476 fold_convert (TREE_TYPE (op), max),
3477 asserts);
3478 finish_register_edge_assert_for (e, bsi, asserts);
3479 }
3480
3481 XDELETEVEC (ci);
3482
3483 if (!live_on_edge (default_edge, op))
3484 return;
3485
3486 /* Now register along the default label assertions that correspond to the
3487 anti-range of each label. */
3488 int insertion_limit = PARAM_VALUE (PARAM_MAX_VRP_SWITCH_ASSERTIONS);
3489 if (insertion_limit == 0)
3490 return;
3491
3492 /* We can't do this if the default case shares a label with another case. */
3493 tree default_cl = gimple_switch_default_label (last);
3494 for (idx = 1; idx < n; idx++)
3495 {
3496 tree min, max;
3497 tree cl = gimple_switch_label (last, idx);
3498 if (CASE_LABEL (cl) == CASE_LABEL (default_cl))
3499 continue;
3500
3501 min = CASE_LOW (cl);
3502 max = CASE_HIGH (cl);
3503
3504 /* Combine contiguous case ranges to reduce the number of assertions
3505 to insert. */
3506 for (idx = idx + 1; idx < n; idx++)
3507 {
3508 tree next_min, next_max;
3509 tree next_cl = gimple_switch_label (last, idx);
3510 if (CASE_LABEL (next_cl) == CASE_LABEL (default_cl))
3511 break;
3512
3513 next_min = CASE_LOW (next_cl);
3514 next_max = CASE_HIGH (next_cl);
3515
3516 wide_int difference = (wi::to_wide (next_min)
3517 - wi::to_wide (max ? max : min));
3518 if (wi::eq_p (difference, 1))
3519 max = next_max ? next_max : next_min;
3520 else
3521 break;
3522 }
3523 idx--;
3524
3525 if (max == NULL_TREE)
3526 {
3527 /* Register the assertion OP != MIN. */
3528 auto_vec<assert_info, 8> asserts;
3529 min = fold_convert (TREE_TYPE (op), min);
3530 register_edge_assert_for (op, default_edge, NE_EXPR, op, min,
3531 asserts);
3532 finish_register_edge_assert_for (default_edge, bsi, asserts);
3533 }
3534 else
3535 {
3536 /* Register the assertion (unsigned)OP - MIN > (MAX - MIN),
3537 which will give OP the anti-range ~[MIN,MAX]. */
3538 tree uop = fold_convert (unsigned_type_for (TREE_TYPE (op)), op);
3539 min = fold_convert (TREE_TYPE (uop), min);
3540 max = fold_convert (TREE_TYPE (uop), max);
3541
3542 tree lhs = fold_build2 (MINUS_EXPR, TREE_TYPE (uop), uop, min);
3543 tree rhs = int_const_binop (MINUS_EXPR, max, min);
3544 register_new_assert_for (op, lhs, GT_EXPR, rhs,
3545 NULL, default_edge, bsi);
3546 }
3547
3548 if (--insertion_limit == 0)
3549 break;
3550 }
3551 }
3552
3553
3554 /* Traverse all the statements in block BB looking for statements that
3555 may generate useful assertions for the SSA names in their operand.
3556 If a statement produces a useful assertion A for name N_i, then the
3557 list of assertions already generated for N_i is scanned to
3558 determine if A is actually needed.
3559
3560 If N_i already had the assertion A at a location dominating the
3561 current location, then nothing needs to be done. Otherwise, the
3562 new location for A is recorded instead.
3563
3564 1- For every statement S in BB, all the variables used by S are
3565 added to bitmap FOUND_IN_SUBGRAPH.
3566
3567 2- If statement S uses an operand N in a way that exposes a known
3568 value range for N, then if N was not already generated by an
3569 ASSERT_EXPR, create a new assert location for N. For instance,
3570 if N is a pointer and the statement dereferences it, we can
3571 assume that N is not NULL.
3572
3573 3- COND_EXPRs are a special case of #2. We can derive range
3574 information from the predicate but need to insert different
3575 ASSERT_EXPRs for each of the sub-graphs rooted at the
3576 conditional block. If the last statement of BB is a conditional
3577 expression of the form 'X op Y', then
3578
3579 a) Remove X and Y from the set FOUND_IN_SUBGRAPH.
3580
3581 b) If the conditional is the only entry point to the sub-graph
3582 corresponding to the THEN_CLAUSE, recurse into it. On
3583 return, if X and/or Y are marked in FOUND_IN_SUBGRAPH, then
3584 an ASSERT_EXPR is added for the corresponding variable.
3585
3586 c) Repeat step (b) on the ELSE_CLAUSE.
3587
3588 d) Mark X and Y in FOUND_IN_SUBGRAPH.
3589
3590 For instance,
3591
3592 if (a == 9)
3593 b = a;
3594 else
3595 b = c + 1;
3596
3597 In this case, an assertion on the THEN clause is useful to
3598 determine that 'a' is always 9 on that edge. However, an assertion
3599 on the ELSE clause would be unnecessary.
3600
3601 4- If BB does not end in a conditional expression, then we recurse
3602 into BB's dominator children.
3603
3604 At the end of the recursive traversal, every SSA name will have a
3605 list of locations where ASSERT_EXPRs should be added. When a new
3606 location for name N is found, it is registered by calling
3607 register_new_assert_for. That function keeps track of all the
3608 registered assertions to prevent adding unnecessary assertions.
3609 For instance, if a pointer P_4 is dereferenced more than once in a
3610 dominator tree, only the location dominating all the dereference of
3611 P_4 will receive an ASSERT_EXPR. */
3612
3613 static void
3614 find_assert_locations_1 (basic_block bb, sbitmap live)
3615 {
3616 gimple *last;
3617
3618 last = last_stmt (bb);
3619
3620 /* If BB's last statement is a conditional statement involving integer
3621 operands, determine if we need to add ASSERT_EXPRs. */
3622 if (last
3623 && gimple_code (last) == GIMPLE_COND
3624 && !fp_predicate (last)
3625 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
3626 find_conditional_asserts (bb, as_a <gcond *> (last));
3627
3628 /* If BB's last statement is a switch statement involving integer
3629 operands, determine if we need to add ASSERT_EXPRs. */
3630 if (last
3631 && gimple_code (last) == GIMPLE_SWITCH
3632 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
3633 find_switch_asserts (bb, as_a <gswitch *> (last));
3634
3635 /* Traverse all the statements in BB marking used names and looking
3636 for statements that may infer assertions for their used operands. */
3637 for (gimple_stmt_iterator si = gsi_last_bb (bb); !gsi_end_p (si);
3638 gsi_prev (&si))
3639 {
3640 gimple *stmt;
3641 tree op;
3642 ssa_op_iter i;
3643
3644 stmt = gsi_stmt (si);
3645
3646 if (is_gimple_debug (stmt))
3647 continue;
3648
3649 /* See if we can derive an assertion for any of STMT's operands. */
3650 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
3651 {
3652 tree value;
3653 enum tree_code comp_code;
3654
3655 /* If op is not live beyond this stmt, do not bother to insert
3656 asserts for it. */
3657 if (!bitmap_bit_p (live, SSA_NAME_VERSION (op)))
3658 continue;
3659
3660 /* If OP is used in such a way that we can infer a value
3661 range for it, and we don't find a previous assertion for
3662 it, create a new assertion location node for OP. */
3663 if (infer_value_range (stmt, op, &comp_code, &value))
3664 {
3665 /* If we are able to infer a nonzero value range for OP,
3666 then walk backwards through the use-def chain to see if OP
3667 was set via a typecast.
3668
3669 If so, then we can also infer a nonzero value range
3670 for the operand of the NOP_EXPR. */
3671 if (comp_code == NE_EXPR && integer_zerop (value))
3672 {
3673 tree t = op;
3674 gimple *def_stmt = SSA_NAME_DEF_STMT (t);
3675
3676 while (is_gimple_assign (def_stmt)
3677 && CONVERT_EXPR_CODE_P
3678 (gimple_assign_rhs_code (def_stmt))
3679 && TREE_CODE
3680 (gimple_assign_rhs1 (def_stmt)) == SSA_NAME
3681 && POINTER_TYPE_P
3682 (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))
3683 {
3684 t = gimple_assign_rhs1 (def_stmt);
3685 def_stmt = SSA_NAME_DEF_STMT (t);
3686
3687 /* Note we want to register the assert for the
3688 operand of the NOP_EXPR after SI, not after the
3689 conversion. */
3690 if (bitmap_bit_p (live, SSA_NAME_VERSION (t)))
3691 register_new_assert_for (t, t, comp_code, value,
3692 bb, NULL, si);
3693 }
3694 }
3695
3696 register_new_assert_for (op, op, comp_code, value, bb, NULL, si);
3697 }
3698 }
3699
3700 /* Update live. */
3701 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
3702 bitmap_set_bit (live, SSA_NAME_VERSION (op));
3703 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_DEF)
3704 bitmap_clear_bit (live, SSA_NAME_VERSION (op));
3705 }
3706
3707 /* Traverse all PHI nodes in BB, updating live. */
3708 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
3709 gsi_next (&si))
3710 {
3711 use_operand_p arg_p;
3712 ssa_op_iter i;
3713 gphi *phi = si.phi ();
3714 tree res = gimple_phi_result (phi);
3715
3716 if (virtual_operand_p (res))
3717 continue;
3718
3719 FOR_EACH_PHI_ARG (arg_p, phi, i, SSA_OP_USE)
3720 {
3721 tree arg = USE_FROM_PTR (arg_p);
3722 if (TREE_CODE (arg) == SSA_NAME)
3723 bitmap_set_bit (live, SSA_NAME_VERSION (arg));
3724 }
3725
3726 bitmap_clear_bit (live, SSA_NAME_VERSION (res));
3727 }
3728 }
3729
3730 /* Do an RPO walk over the function computing SSA name liveness
3731 on-the-fly and deciding on assert expressions to insert. */
3732
3733 static void
3734 find_assert_locations (void)
3735 {
3736 int *rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
3737 int *bb_rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
3738 int *last_rpo = XCNEWVEC (int, last_basic_block_for_fn (cfun));
3739 int rpo_cnt, i;
3740
3741 live = XCNEWVEC (sbitmap, last_basic_block_for_fn (cfun));
3742 rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
3743 for (i = 0; i < rpo_cnt; ++i)
3744 bb_rpo[rpo[i]] = i;
3745
3746 /* Pre-seed loop latch liveness from loop header PHI nodes. Due to
3747 the order we compute liveness and insert asserts we otherwise
3748 fail to insert asserts into the loop latch. */
3749 loop_p loop;
3750 FOR_EACH_LOOP (loop, 0)
3751 {
3752 i = loop->latch->index;
3753 unsigned int j = single_succ_edge (loop->latch)->dest_idx;
3754 for (gphi_iterator gsi = gsi_start_phis (loop->header);
3755 !gsi_end_p (gsi); gsi_next (&gsi))
3756 {
3757 gphi *phi = gsi.phi ();
3758 if (virtual_operand_p (gimple_phi_result (phi)))
3759 continue;
3760 tree arg = gimple_phi_arg_def (phi, j);
3761 if (TREE_CODE (arg) == SSA_NAME)
3762 {
3763 if (live[i] == NULL)
3764 {
3765 live[i] = sbitmap_alloc (num_ssa_names);
3766 bitmap_clear (live[i]);
3767 }
3768 bitmap_set_bit (live[i], SSA_NAME_VERSION (arg));
3769 }
3770 }
3771 }
3772
3773 for (i = rpo_cnt - 1; i >= 0; --i)
3774 {
3775 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, rpo[i]);
3776 edge e;
3777 edge_iterator ei;
3778
3779 if (!live[rpo[i]])
3780 {
3781 live[rpo[i]] = sbitmap_alloc (num_ssa_names);
3782 bitmap_clear (live[rpo[i]]);
3783 }
3784
3785 /* Process BB and update the live information with uses in
3786 this block. */
3787 find_assert_locations_1 (bb, live[rpo[i]]);
3788
3789 /* Merge liveness into the predecessor blocks and free it. */
3790 if (!bitmap_empty_p (live[rpo[i]]))
3791 {
3792 int pred_rpo = i;
3793 FOR_EACH_EDGE (e, ei, bb->preds)
3794 {
3795 int pred = e->src->index;
3796 if ((e->flags & EDGE_DFS_BACK) || pred == ENTRY_BLOCK)
3797 continue;
3798
3799 if (!live[pred])
3800 {
3801 live[pred] = sbitmap_alloc (num_ssa_names);
3802 bitmap_clear (live[pred]);
3803 }
3804 bitmap_ior (live[pred], live[pred], live[rpo[i]]);
3805
3806 if (bb_rpo[pred] < pred_rpo)
3807 pred_rpo = bb_rpo[pred];
3808 }
3809
3810 /* Record the RPO number of the last visited block that needs
3811 live information from this block. */
3812 last_rpo[rpo[i]] = pred_rpo;
3813 }
3814 else
3815 {
3816 sbitmap_free (live[rpo[i]]);
3817 live[rpo[i]] = NULL;
3818 }
3819
3820 /* We can free all successors live bitmaps if all their
3821 predecessors have been visited already. */
3822 FOR_EACH_EDGE (e, ei, bb->succs)
3823 if (last_rpo[e->dest->index] == i
3824 && live[e->dest->index])
3825 {
3826 sbitmap_free (live[e->dest->index]);
3827 live[e->dest->index] = NULL;
3828 }
3829 }
3830
3831 XDELETEVEC (rpo);
3832 XDELETEVEC (bb_rpo);
3833 XDELETEVEC (last_rpo);
3834 for (i = 0; i < last_basic_block_for_fn (cfun); ++i)
3835 if (live[i])
3836 sbitmap_free (live[i]);
3837 XDELETEVEC (live);
3838 }
3839
3840 /* Create an ASSERT_EXPR for NAME and insert it in the location
3841 indicated by LOC. Return true if we made any edge insertions. */
3842
3843 static bool
3844 process_assert_insertions_for (tree name, assert_locus *loc)
3845 {
3846 /* Build the comparison expression NAME_i COMP_CODE VAL. */
3847 gimple *stmt;
3848 tree cond;
3849 gimple *assert_stmt;
3850 edge_iterator ei;
3851 edge e;
3852
3853 /* If we have X <=> X do not insert an assert expr for that. */
3854 if (loc->expr == loc->val)
3855 return false;
3856
3857 cond = build2 (loc->comp_code, boolean_type_node, loc->expr, loc->val);
3858 assert_stmt = build_assert_expr_for (cond, name);
3859 if (loc->e)
3860 {
3861 /* We have been asked to insert the assertion on an edge. This
3862 is used only by COND_EXPR and SWITCH_EXPR assertions. */
3863 gcc_checking_assert (gimple_code (gsi_stmt (loc->si)) == GIMPLE_COND
3864 || (gimple_code (gsi_stmt (loc->si))
3865 == GIMPLE_SWITCH));
3866
3867 gsi_insert_on_edge (loc->e, assert_stmt);
3868 return true;
3869 }
3870
3871 /* If the stmt iterator points at the end then this is an insertion
3872 at the beginning of a block. */
3873 if (gsi_end_p (loc->si))
3874 {
3875 gimple_stmt_iterator si = gsi_after_labels (loc->bb);
3876 gsi_insert_before (&si, assert_stmt, GSI_SAME_STMT);
3877 return false;
3878
3879 }
3880 /* Otherwise, we can insert right after LOC->SI iff the
3881 statement must not be the last statement in the block. */
3882 stmt = gsi_stmt (loc->si);
3883 if (!stmt_ends_bb_p (stmt))
3884 {
3885 gsi_insert_after (&loc->si, assert_stmt, GSI_SAME_STMT);
3886 return false;
3887 }
3888
3889 /* If STMT must be the last statement in BB, we can only insert new
3890 assertions on the non-abnormal edge out of BB. Note that since
3891 STMT is not control flow, there may only be one non-abnormal/eh edge
3892 out of BB. */
3893 FOR_EACH_EDGE (e, ei, loc->bb->succs)
3894 if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH)))
3895 {
3896 gsi_insert_on_edge (e, assert_stmt);
3897 return true;
3898 }
3899
3900 gcc_unreachable ();
3901 }
3902
3903 /* Qsort helper for sorting assert locations. If stable is true, don't
3904 use iterative_hash_expr because it can be unstable for -fcompare-debug,
3905 on the other side some pointers might be NULL. */
3906
3907 template <bool stable>
3908 static int
3909 compare_assert_loc (const void *pa, const void *pb)
3910 {
3911 assert_locus * const a = *(assert_locus * const *)pa;
3912 assert_locus * const b = *(assert_locus * const *)pb;
3913
3914 /* If stable, some asserts might be optimized away already, sort
3915 them last. */
3916 if (stable)
3917 {
3918 if (a == NULL)
3919 return b != NULL;
3920 else if (b == NULL)
3921 return -1;
3922 }
3923
3924 if (a->e == NULL && b->e != NULL)
3925 return 1;
3926 else if (a->e != NULL && b->e == NULL)
3927 return -1;
3928
3929 /* After the above checks, we know that (a->e == NULL) == (b->e == NULL),
3930 no need to test both a->e and b->e. */
3931
3932 /* Sort after destination index. */
3933 if (a->e == NULL)
3934 ;
3935 else if (a->e->dest->index > b->e->dest->index)
3936 return 1;
3937 else if (a->e->dest->index < b->e->dest->index)
3938 return -1;
3939
3940 /* Sort after comp_code. */
3941 if (a->comp_code > b->comp_code)
3942 return 1;
3943 else if (a->comp_code < b->comp_code)
3944 return -1;
3945
3946 hashval_t ha, hb;
3947
3948 /* E.g. if a->val is ADDR_EXPR of a VAR_DECL, iterative_hash_expr
3949 uses DECL_UID of the VAR_DECL, so sorting might differ between
3950 -g and -g0. When doing the removal of redundant assert exprs
3951 and commonization to successors, this does not matter, but for
3952 the final sort needs to be stable. */
3953 if (stable)
3954 {
3955 ha = 0;
3956 hb = 0;
3957 }
3958 else
3959 {
3960 ha = iterative_hash_expr (a->expr, iterative_hash_expr (a->val, 0));
3961 hb = iterative_hash_expr (b->expr, iterative_hash_expr (b->val, 0));
3962 }
3963
3964 /* Break the tie using hashing and source/bb index. */
3965 if (ha == hb)
3966 return (a->e != NULL
3967 ? a->e->src->index - b->e->src->index
3968 : a->bb->index - b->bb->index);
3969 return ha > hb ? 1 : -1;
3970 }
3971
3972 /* Process all the insertions registered for every name N_i registered
3973 in NEED_ASSERT_FOR. The list of assertions to be inserted are
3974 found in ASSERTS_FOR[i]. */
3975
3976 static void
3977 process_assert_insertions (void)
3978 {
3979 unsigned i;
3980 bitmap_iterator bi;
3981 bool update_edges_p = false;
3982 int num_asserts = 0;
3983
3984 if (dump_file && (dump_flags & TDF_DETAILS))
3985 dump_all_asserts (dump_file);
3986
3987 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
3988 {
3989 assert_locus *loc = asserts_for[i];
3990 gcc_assert (loc);
3991
3992 auto_vec<assert_locus *, 16> asserts;
3993 for (; loc; loc = loc->next)
3994 asserts.safe_push (loc);
3995 asserts.qsort (compare_assert_loc<false>);
3996
3997 /* Push down common asserts to successors and remove redundant ones. */
3998 unsigned ecnt = 0;
3999 assert_locus *common = NULL;
4000 unsigned commonj = 0;
4001 for (unsigned j = 0; j < asserts.length (); ++j)
4002 {
4003 loc = asserts[j];
4004 if (! loc->e)
4005 common = NULL;
4006 else if (! common
4007 || loc->e->dest != common->e->dest
4008 || loc->comp_code != common->comp_code
4009 || ! operand_equal_p (loc->val, common->val, 0)
4010 || ! operand_equal_p (loc->expr, common->expr, 0))
4011 {
4012 commonj = j;
4013 common = loc;
4014 ecnt = 1;
4015 }
4016 else if (loc->e == asserts[j-1]->e)
4017 {
4018 /* Remove duplicate asserts. */
4019 if (commonj == j - 1)
4020 {
4021 commonj = j;
4022 common = loc;
4023 }
4024 free (asserts[j-1]);
4025 asserts[j-1] = NULL;
4026 }
4027 else
4028 {
4029 ecnt++;
4030 if (EDGE_COUNT (common->e->dest->preds) == ecnt)
4031 {
4032 /* We have the same assertion on all incoming edges of a BB.
4033 Insert it at the beginning of that block. */
4034 loc->bb = loc->e->dest;
4035 loc->e = NULL;
4036 loc->si = gsi_none ();
4037 common = NULL;
4038 /* Clear asserts commoned. */
4039 for (; commonj != j; ++commonj)
4040 if (asserts[commonj])
4041 {
4042 free (asserts[commonj]);
4043 asserts[commonj] = NULL;
4044 }
4045 }
4046 }
4047 }
4048
4049 /* The asserts vector sorting above might be unstable for
4050 -fcompare-debug, sort again to ensure a stable sort. */
4051 asserts.qsort (compare_assert_loc<true>);
4052 for (unsigned j = 0; j < asserts.length (); ++j)
4053 {
4054 loc = asserts[j];
4055 if (! loc)
4056 break;
4057 update_edges_p |= process_assert_insertions_for (ssa_name (i), loc);
4058 num_asserts++;
4059 free (loc);
4060 }
4061 }
4062
4063 if (update_edges_p)
4064 gsi_commit_edge_inserts ();
4065
4066 statistics_counter_event (cfun, "Number of ASSERT_EXPR expressions inserted",
4067 num_asserts);
4068 }
4069
4070
4071 /* Traverse the flowgraph looking for conditional jumps to insert range
4072 expressions. These range expressions are meant to provide information
4073 to optimizations that need to reason in terms of value ranges. They
4074 will not be expanded into RTL. For instance, given:
4075
4076 x = ...
4077 y = ...
4078 if (x < y)
4079 y = x - 2;
4080 else
4081 x = y + 3;
4082
4083 this pass will transform the code into:
4084
4085 x = ...
4086 y = ...
4087 if (x < y)
4088 {
4089 x = ASSERT_EXPR <x, x < y>
4090 y = x - 2
4091 }
4092 else
4093 {
4094 y = ASSERT_EXPR <y, x >= y>
4095 x = y + 3
4096 }
4097
4098 The idea is that once copy and constant propagation have run, other
4099 optimizations will be able to determine what ranges of values can 'x'
4100 take in different paths of the code, simply by checking the reaching
4101 definition of 'x'. */
4102
4103 static void
4104 insert_range_assertions (void)
4105 {
4106 need_assert_for = BITMAP_ALLOC (NULL);
4107 asserts_for = XCNEWVEC (assert_locus *, num_ssa_names);
4108
4109 calculate_dominance_info (CDI_DOMINATORS);
4110
4111 find_assert_locations ();
4112 if (!bitmap_empty_p (need_assert_for))
4113 {
4114 process_assert_insertions ();
4115 update_ssa (TODO_update_ssa_no_phi);
4116 }
4117
4118 if (dump_file && (dump_flags & TDF_DETAILS))
4119 {
4120 fprintf (dump_file, "\nSSA form after inserting ASSERT_EXPRs\n");
4121 dump_function_to_file (current_function_decl, dump_file, dump_flags);
4122 }
4123
4124 free (asserts_for);
4125 BITMAP_FREE (need_assert_for);
4126 }
4127
4128 class vrp_prop : public ssa_propagation_engine
4129 {
4130 public:
4131 enum ssa_prop_result visit_stmt (gimple *, edge *, tree *) FINAL OVERRIDE;
4132 enum ssa_prop_result visit_phi (gphi *) FINAL OVERRIDE;
4133
4134 void vrp_initialize (void);
4135 void vrp_finalize (bool);
4136 void check_all_array_refs (void);
4137 void check_array_ref (location_t, tree, bool);
4138 void check_mem_ref (location_t, tree, bool);
4139 void search_for_addr_array (tree, location_t);
4140
4141 class vr_values vr_values;
4142 /* Temporary delegator to minimize code churn. */
4143 value_range *get_value_range (const_tree op)
4144 { return vr_values.get_value_range (op); }
4145 void set_defs_to_varying (gimple *stmt)
4146 { return vr_values.set_defs_to_varying (stmt); }
4147 void extract_range_from_stmt (gimple *stmt, edge *taken_edge_p,
4148 tree *output_p, value_range *vr)
4149 { vr_values.extract_range_from_stmt (stmt, taken_edge_p, output_p, vr); }
4150 bool update_value_range (const_tree op, value_range *vr)
4151 { return vr_values.update_value_range (op, vr); }
4152 void extract_range_basic (value_range *vr, gimple *stmt)
4153 { vr_values.extract_range_basic (vr, stmt); }
4154 void extract_range_from_phi_node (gphi *phi, value_range *vr)
4155 { vr_values.extract_range_from_phi_node (phi, vr); }
4156 };
4157 /* Checks one ARRAY_REF in REF, located at LOCUS. Ignores flexible arrays
4158 and "struct" hacks. If VRP can determine that the
4159 array subscript is a constant, check if it is outside valid
4160 range. If the array subscript is a RANGE, warn if it is
4161 non-overlapping with valid range.
4162 IGNORE_OFF_BY_ONE is true if the ARRAY_REF is inside a ADDR_EXPR. */
4163
4164 void
4165 vrp_prop::check_array_ref (location_t location, tree ref,
4166 bool ignore_off_by_one)
4167 {
4168 const value_range *vr = NULL;
4169 tree low_sub, up_sub;
4170 tree low_bound, up_bound, up_bound_p1;
4171
4172 if (TREE_NO_WARNING (ref))
4173 return;
4174
4175 low_sub = up_sub = TREE_OPERAND (ref, 1);
4176 up_bound = array_ref_up_bound (ref);
4177
4178 if (!up_bound
4179 || TREE_CODE (up_bound) != INTEGER_CST
4180 || (warn_array_bounds < 2
4181 && array_at_struct_end_p (ref)))
4182 {
4183 /* Accesses to trailing arrays via pointers may access storage
4184 beyond the types array bounds. For such arrays, or for flexible
4185 array members, as well as for other arrays of an unknown size,
4186 replace the upper bound with a more permissive one that assumes
4187 the size of the largest object is PTRDIFF_MAX. */
4188 tree eltsize = array_ref_element_size (ref);
4189
4190 if (TREE_CODE (eltsize) != INTEGER_CST
4191 || integer_zerop (eltsize))
4192 {
4193 up_bound = NULL_TREE;
4194 up_bound_p1 = NULL_TREE;
4195 }
4196 else
4197 {
4198 tree maxbound = TYPE_MAX_VALUE (ptrdiff_type_node);
4199 tree arg = TREE_OPERAND (ref, 0);
4200 poly_int64 off;
4201
4202 if (get_addr_base_and_unit_offset (arg, &off) && known_gt (off, 0))
4203 maxbound = wide_int_to_tree (sizetype,
4204 wi::sub (wi::to_wide (maxbound),
4205 off));
4206 else
4207 maxbound = fold_convert (sizetype, maxbound);
4208
4209 up_bound_p1 = int_const_binop (TRUNC_DIV_EXPR, maxbound, eltsize);
4210
4211 up_bound = int_const_binop (MINUS_EXPR, up_bound_p1,
4212 build_int_cst (ptrdiff_type_node, 1));
4213 }
4214 }
4215 else
4216 up_bound_p1 = int_const_binop (PLUS_EXPR, up_bound,
4217 build_int_cst (TREE_TYPE (up_bound), 1));
4218
4219 low_bound = array_ref_low_bound (ref);
4220
4221 tree artype = TREE_TYPE (TREE_OPERAND (ref, 0));
4222
4223 bool warned = false;
4224
4225 /* Empty array. */
4226 if (up_bound && tree_int_cst_equal (low_bound, up_bound_p1))
4227 warned = warning_at (location, OPT_Warray_bounds,
4228 "array subscript %E is above array bounds of %qT",
4229 low_bound, artype);
4230
4231 if (TREE_CODE (low_sub) == SSA_NAME)
4232 {
4233 vr = get_value_range (low_sub);
4234 if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
4235 {
4236 low_sub = vr->type == VR_RANGE ? vr->max : vr->min;
4237 up_sub = vr->type == VR_RANGE ? vr->min : vr->max;
4238 }
4239 }
4240
4241 if (vr && vr->type == VR_ANTI_RANGE)
4242 {
4243 if (up_bound
4244 && TREE_CODE (up_sub) == INTEGER_CST
4245 && (ignore_off_by_one
4246 ? tree_int_cst_lt (up_bound, up_sub)
4247 : tree_int_cst_le (up_bound, up_sub))
4248 && TREE_CODE (low_sub) == INTEGER_CST
4249 && tree_int_cst_le (low_sub, low_bound))
4250 warned = warning_at (location, OPT_Warray_bounds,
4251 "array subscript [%E, %E] is outside "
4252 "array bounds of %qT",
4253 low_sub, up_sub, artype);
4254 }
4255 else if (up_bound
4256 && TREE_CODE (up_sub) == INTEGER_CST
4257 && (ignore_off_by_one
4258 ? !tree_int_cst_le (up_sub, up_bound_p1)
4259 : !tree_int_cst_le (up_sub, up_bound)))
4260 {
4261 if (dump_file && (dump_flags & TDF_DETAILS))
4262 {
4263 fprintf (dump_file, "Array bound warning for ");
4264 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
4265 fprintf (dump_file, "\n");
4266 }
4267 warned = warning_at (location, OPT_Warray_bounds,
4268 "array subscript %E is above array bounds of %qT",
4269 up_sub, artype);
4270 }
4271 else if (TREE_CODE (low_sub) == INTEGER_CST
4272 && tree_int_cst_lt (low_sub, low_bound))
4273 {
4274 if (dump_file && (dump_flags & TDF_DETAILS))
4275 {
4276 fprintf (dump_file, "Array bound warning for ");
4277 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
4278 fprintf (dump_file, "\n");
4279 }
4280 warned = warning_at (location, OPT_Warray_bounds,
4281 "array subscript %E is below array bounds of %qT",
4282 low_sub, artype);
4283 }
4284
4285 if (warned)
4286 {
4287 ref = TREE_OPERAND (ref, 0);
4288
4289 if (DECL_P (ref))
4290 inform (DECL_SOURCE_LOCATION (ref), "while referencing %qD", ref);
4291
4292 TREE_NO_WARNING (ref) = 1;
4293 }
4294 }
4295
4296 /* Checks one MEM_REF in REF, located at LOCATION, for out-of-bounds
4297 references to string constants. If VRP can determine that the array
4298 subscript is a constant, check if it is outside valid range.
4299 If the array subscript is a RANGE, warn if it is non-overlapping
4300 with valid range.
4301 IGNORE_OFF_BY_ONE is true if the MEM_REF is inside an ADDR_EXPR
4302 (used to allow one-past-the-end indices for code that takes
4303 the address of the just-past-the-end element of an array). */
4304
4305 void
4306 vrp_prop::check_mem_ref (location_t location, tree ref,
4307 bool ignore_off_by_one)
4308 {
4309 if (TREE_NO_WARNING (ref))
4310 return;
4311
4312 tree arg = TREE_OPERAND (ref, 0);
4313 /* The constant and variable offset of the reference. */
4314 tree cstoff = TREE_OPERAND (ref, 1);
4315 tree varoff = NULL_TREE;
4316
4317 const offset_int maxobjsize = tree_to_shwi (max_object_size ());
4318
4319 /* The array or string constant bounds in bytes. Initially set
4320 to [-MAXOBJSIZE - 1, MAXOBJSIZE] until a tighter bound is
4321 determined. */
4322 offset_int arrbounds[2] = { -maxobjsize - 1, maxobjsize };
4323
4324 /* The minimum and maximum intermediate offset. For a reference
4325 to be valid, not only does the final offset/subscript must be
4326 in bounds but all intermediate offsets should be as well.
4327 GCC may be able to deal gracefully with such out-of-bounds
4328 offsets so the checking is only enbaled at -Warray-bounds=2
4329 where it may help detect bugs in uses of the intermediate
4330 offsets that could otherwise not be detectable. */
4331 offset_int ioff = wi::to_offset (fold_convert (ptrdiff_type_node, cstoff));
4332 offset_int extrema[2] = { 0, wi::abs (ioff) };
4333
4334 /* The range of the byte offset into the reference. */
4335 offset_int offrange[2] = { 0, 0 };
4336
4337 const value_range *vr = NULL;
4338
4339 /* Determine the offsets and increment OFFRANGE for the bounds of each.
4340 The loop computes the the range of the final offset for expressions
4341 such as (A + i0 + ... + iN)[CSTOFF] where i0 through iN are SSA_NAMEs
4342 in some range. */
4343 while (TREE_CODE (arg) == SSA_NAME)
4344 {
4345 gimple *def = SSA_NAME_DEF_STMT (arg);
4346 if (!is_gimple_assign (def))
4347 break;
4348
4349 tree_code code = gimple_assign_rhs_code (def);
4350 if (code == POINTER_PLUS_EXPR)
4351 {
4352 arg = gimple_assign_rhs1 (def);
4353 varoff = gimple_assign_rhs2 (def);
4354 }
4355 else if (code == ASSERT_EXPR)
4356 {
4357 arg = TREE_OPERAND (gimple_assign_rhs1 (def), 0);
4358 continue;
4359 }
4360 else
4361 return;
4362
4363 /* VAROFF should always be a SSA_NAME here (and not even
4364 INTEGER_CST) but there's no point in taking chances. */
4365 if (TREE_CODE (varoff) != SSA_NAME)
4366 break;
4367
4368 vr = get_value_range (varoff);
4369 if (!vr || vr->type == VR_UNDEFINED || !vr->min || !vr->max)
4370 break;
4371
4372 if (TREE_CODE (vr->min) != INTEGER_CST
4373 || TREE_CODE (vr->max) != INTEGER_CST)
4374 break;
4375
4376 if (vr->type == VR_RANGE)
4377 {
4378 if (tree_int_cst_lt (vr->min, vr->max))
4379 {
4380 offset_int min
4381 = wi::to_offset (fold_convert (ptrdiff_type_node, vr->min));
4382 offset_int max
4383 = wi::to_offset (fold_convert (ptrdiff_type_node, vr->max));
4384 if (min < max)
4385 {
4386 offrange[0] += min;
4387 offrange[1] += max;
4388 }
4389 else
4390 {
4391 offrange[0] += max;
4392 offrange[1] += min;
4393 }
4394 }
4395 else
4396 {
4397 /* Conservatively add [-MAXOBJSIZE -1, MAXOBJSIZE]
4398 to OFFRANGE. */
4399 offrange[0] += arrbounds[0];
4400 offrange[1] += arrbounds[1];
4401 }
4402 }
4403 else
4404 {
4405 /* For an anti-range, analogously to the above, conservatively
4406 add [-MAXOBJSIZE -1, MAXOBJSIZE] to OFFRANGE. */
4407 offrange[0] += arrbounds[0];
4408 offrange[1] += arrbounds[1];
4409 }
4410
4411 /* Keep track of the minimum and maximum offset. */
4412 if (offrange[1] < 0 && offrange[1] < extrema[0])
4413 extrema[0] = offrange[1];
4414 if (offrange[0] > 0 && offrange[0] > extrema[1])
4415 extrema[1] = offrange[0];
4416
4417 if (offrange[0] < arrbounds[0])
4418 offrange[0] = arrbounds[0];
4419
4420 if (offrange[1] > arrbounds[1])
4421 offrange[1] = arrbounds[1];
4422 }
4423
4424 if (TREE_CODE (arg) == ADDR_EXPR)
4425 {
4426 arg = TREE_OPERAND (arg, 0);
4427 if (TREE_CODE (arg) != STRING_CST
4428 && TREE_CODE (arg) != VAR_DECL)
4429 return;
4430 }
4431 else
4432 return;
4433
4434 /* The type of the object being referred to. It can be an array,
4435 string literal, or a non-array type when the MEM_REF represents
4436 a reference/subscript via a pointer to an object that is not
4437 an element of an array. References to members of structs and
4438 unions are excluded because MEM_REF doesn't make it possible
4439 to identify the member where the reference originated.
4440 Incomplete types are excluded as well because their size is
4441 not known. */
4442 tree reftype = TREE_TYPE (arg);
4443 if (POINTER_TYPE_P (reftype)
4444 || !COMPLETE_TYPE_P (reftype)
4445 || TREE_CODE (TYPE_SIZE_UNIT (reftype)) != INTEGER_CST
4446 || RECORD_OR_UNION_TYPE_P (reftype))
4447 return;
4448
4449 offset_int eltsize;
4450 if (TREE_CODE (reftype) == ARRAY_TYPE)
4451 {
4452 eltsize = wi::to_offset (TYPE_SIZE_UNIT (TREE_TYPE (reftype)));
4453
4454 if (tree dom = TYPE_DOMAIN (reftype))
4455 {
4456 tree bnds[] = { TYPE_MIN_VALUE (dom), TYPE_MAX_VALUE (dom) };
4457 if (array_at_struct_end_p (arg)
4458 || !bnds[0] || !bnds[1])
4459 {
4460 arrbounds[0] = 0;
4461 arrbounds[1] = wi::lrshift (maxobjsize, wi::floor_log2 (eltsize));
4462 }
4463 else
4464 {
4465 arrbounds[0] = wi::to_offset (bnds[0]) * eltsize;
4466 arrbounds[1] = (wi::to_offset (bnds[1]) + 1) * eltsize;
4467 }
4468 }
4469 else
4470 {
4471 arrbounds[0] = 0;
4472 arrbounds[1] = wi::lrshift (maxobjsize, wi::floor_log2 (eltsize));
4473 }
4474
4475 if (TREE_CODE (ref) == MEM_REF)
4476 {
4477 /* For MEM_REF determine a tighter bound of the non-array
4478 element type. */
4479 tree eltype = TREE_TYPE (reftype);
4480 while (TREE_CODE (eltype) == ARRAY_TYPE)
4481 eltype = TREE_TYPE (eltype);
4482 eltsize = wi::to_offset (TYPE_SIZE_UNIT (eltype));
4483 }
4484 }
4485 else
4486 {
4487 eltsize = 1;
4488 arrbounds[0] = 0;
4489 arrbounds[1] = wi::to_offset (TYPE_SIZE_UNIT (reftype));
4490 }
4491
4492 offrange[0] += ioff;
4493 offrange[1] += ioff;
4494
4495 /* Compute the more permissive upper bound when IGNORE_OFF_BY_ONE
4496 is set (when taking the address of the one-past-last element
4497 of an array) but always use the stricter bound in diagnostics. */
4498 offset_int ubound = arrbounds[1];
4499 if (ignore_off_by_one)
4500 ubound += 1;
4501
4502 if (offrange[0] >= ubound || offrange[1] < arrbounds[0])
4503 {
4504 /* Treat a reference to a non-array object as one to an array
4505 of a single element. */
4506 if (TREE_CODE (reftype) != ARRAY_TYPE)
4507 reftype = build_array_type_nelts (reftype, 1);
4508
4509 if (TREE_CODE (ref) == MEM_REF)
4510 {
4511 /* Extract the element type out of MEM_REF and use its size
4512 to compute the index to print in the diagnostic; arrays
4513 in MEM_REF don't mean anything. */
4514 tree type = TREE_TYPE (ref);
4515 while (TREE_CODE (type) == ARRAY_TYPE)
4516 type = TREE_TYPE (type);
4517 tree size = TYPE_SIZE_UNIT (type);
4518 offrange[0] = offrange[0] / wi::to_offset (size);
4519 offrange[1] = offrange[1] / wi::to_offset (size);
4520 }
4521 else
4522 {
4523 /* For anything other than MEM_REF, compute the index to
4524 print in the diagnostic as the offset over element size. */
4525 offrange[0] = offrange[0] / eltsize;
4526 offrange[1] = offrange[1] / eltsize;
4527 }
4528
4529 bool warned;
4530 if (offrange[0] == offrange[1])
4531 warned = warning_at (location, OPT_Warray_bounds,
4532 "array subscript %wi is outside array bounds "
4533 "of %qT",
4534 offrange[0].to_shwi (), reftype);
4535 else
4536 warned = warning_at (location, OPT_Warray_bounds,
4537 "array subscript [%wi, %wi] is outside "
4538 "array bounds of %qT",
4539 offrange[0].to_shwi (),
4540 offrange[1].to_shwi (), reftype);
4541 if (warned && DECL_P (arg))
4542 inform (DECL_SOURCE_LOCATION (arg), "while referencing %qD", arg);
4543
4544 TREE_NO_WARNING (ref) = 1;
4545 return;
4546 }
4547
4548 if (warn_array_bounds < 2)
4549 return;
4550
4551 /* At level 2 check also intermediate offsets. */
4552 int i = 0;
4553 if (extrema[i] < -arrbounds[1] || extrema[i = 1] > ubound)
4554 {
4555 HOST_WIDE_INT tmpidx = extrema[i].to_shwi () / eltsize.to_shwi ();
4556
4557 warning_at (location, OPT_Warray_bounds,
4558 "intermediate array offset %wi is outside array bounds "
4559 "of %qT",
4560 tmpidx, reftype);
4561 TREE_NO_WARNING (ref) = 1;
4562 }
4563 }
4564
4565 /* Searches if the expr T, located at LOCATION computes
4566 address of an ARRAY_REF, and call check_array_ref on it. */
4567
4568 void
4569 vrp_prop::search_for_addr_array (tree t, location_t location)
4570 {
4571 /* Check each ARRAY_REF and MEM_REF in the reference chain. */
4572 do
4573 {
4574 if (TREE_CODE (t) == ARRAY_REF)
4575 check_array_ref (location, t, true /*ignore_off_by_one*/);
4576 else if (TREE_CODE (t) == MEM_REF)
4577 check_mem_ref (location, t, true /*ignore_off_by_one*/);
4578
4579 t = TREE_OPERAND (t, 0);
4580 }
4581 while (handled_component_p (t) || TREE_CODE (t) == MEM_REF);
4582
4583 if (TREE_CODE (t) != MEM_REF
4584 || TREE_CODE (TREE_OPERAND (t, 0)) != ADDR_EXPR
4585 || TREE_NO_WARNING (t))
4586 return;
4587
4588 tree tem = TREE_OPERAND (TREE_OPERAND (t, 0), 0);
4589 tree low_bound, up_bound, el_sz;
4590 if (TREE_CODE (TREE_TYPE (tem)) != ARRAY_TYPE
4591 || TREE_CODE (TREE_TYPE (TREE_TYPE (tem))) == ARRAY_TYPE
4592 || !TYPE_DOMAIN (TREE_TYPE (tem)))
4593 return;
4594
4595 low_bound = TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
4596 up_bound = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
4597 el_sz = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (tem)));
4598 if (!low_bound
4599 || TREE_CODE (low_bound) != INTEGER_CST
4600 || !up_bound
4601 || TREE_CODE (up_bound) != INTEGER_CST
4602 || !el_sz
4603 || TREE_CODE (el_sz) != INTEGER_CST)
4604 return;
4605
4606 offset_int idx;
4607 if (!mem_ref_offset (t).is_constant (&idx))
4608 return;
4609
4610 bool warned = false;
4611 idx = wi::sdiv_trunc (idx, wi::to_offset (el_sz));
4612 if (idx < 0)
4613 {
4614 if (dump_file && (dump_flags & TDF_DETAILS))
4615 {
4616 fprintf (dump_file, "Array bound warning for ");
4617 dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
4618 fprintf (dump_file, "\n");
4619 }
4620 warned = warning_at (location, OPT_Warray_bounds,
4621 "array subscript %wi is below "
4622 "array bounds of %qT",
4623 idx.to_shwi (), TREE_TYPE (tem));
4624 }
4625 else if (idx > (wi::to_offset (up_bound)
4626 - wi::to_offset (low_bound) + 1))
4627 {
4628 if (dump_file && (dump_flags & TDF_DETAILS))
4629 {
4630 fprintf (dump_file, "Array bound warning for ");
4631 dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
4632 fprintf (dump_file, "\n");
4633 }
4634 warned = warning_at (location, OPT_Warray_bounds,
4635 "array subscript %wu is above "
4636 "array bounds of %qT",
4637 idx.to_uhwi (), TREE_TYPE (tem));
4638 }
4639
4640 if (warned)
4641 {
4642 if (DECL_P (t))
4643 inform (DECL_SOURCE_LOCATION (t), "while referencing %qD", t);
4644
4645 TREE_NO_WARNING (t) = 1;
4646 }
4647 }
4648
4649 /* walk_tree() callback that checks if *TP is
4650 an ARRAY_REF inside an ADDR_EXPR (in which an array
4651 subscript one outside the valid range is allowed). Call
4652 check_array_ref for each ARRAY_REF found. The location is
4653 passed in DATA. */
4654
4655 static tree
4656 check_array_bounds (tree *tp, int *walk_subtree, void *data)
4657 {
4658 tree t = *tp;
4659 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
4660 location_t location;
4661
4662 if (EXPR_HAS_LOCATION (t))
4663 location = EXPR_LOCATION (t);
4664 else
4665 location = gimple_location (wi->stmt);
4666
4667 *walk_subtree = TRUE;
4668
4669 vrp_prop *vrp_prop = (class vrp_prop *)wi->info;
4670 if (TREE_CODE (t) == ARRAY_REF)
4671 vrp_prop->check_array_ref (location, t, false /*ignore_off_by_one*/);
4672 else if (TREE_CODE (t) == MEM_REF)
4673 vrp_prop->check_mem_ref (location, t, false /*ignore_off_by_one*/);
4674 else if (TREE_CODE (t) == ADDR_EXPR)
4675 {
4676 vrp_prop->search_for_addr_array (t, location);
4677 *walk_subtree = FALSE;
4678 }
4679
4680 return NULL_TREE;
4681 }
4682
4683 /* A dom_walker subclass for use by vrp_prop::check_all_array_refs,
4684 to walk over all statements of all reachable BBs and call
4685 check_array_bounds on them. */
4686
4687 class check_array_bounds_dom_walker : public dom_walker
4688 {
4689 public:
4690 check_array_bounds_dom_walker (vrp_prop *prop)
4691 : dom_walker (CDI_DOMINATORS,
4692 /* Discover non-executable edges, preserving EDGE_EXECUTABLE
4693 flags, so that we can merge in information on
4694 non-executable edges from vrp_folder . */
4695 REACHABLE_BLOCKS_PRESERVING_FLAGS),
4696 m_prop (prop) {}
4697 ~check_array_bounds_dom_walker () {}
4698
4699 edge before_dom_children (basic_block) FINAL OVERRIDE;
4700
4701 private:
4702 vrp_prop *m_prop;
4703 };
4704
4705 /* Implementation of dom_walker::before_dom_children.
4706
4707 Walk over all statements of BB and call check_array_bounds on them,
4708 and determine if there's a unique successor edge. */
4709
4710 edge
4711 check_array_bounds_dom_walker::before_dom_children (basic_block bb)
4712 {
4713 gimple_stmt_iterator si;
4714 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
4715 {
4716 gimple *stmt = gsi_stmt (si);
4717 struct walk_stmt_info wi;
4718 if (!gimple_has_location (stmt)
4719 || is_gimple_debug (stmt))
4720 continue;
4721
4722 memset (&wi, 0, sizeof (wi));
4723
4724 wi.info = m_prop;
4725
4726 walk_gimple_op (stmt, check_array_bounds, &wi);
4727 }
4728
4729 /* Determine if there's a unique successor edge, and if so, return
4730 that back to dom_walker, ensuring that we don't visit blocks that
4731 became unreachable during the VRP propagation
4732 (PR tree-optimization/83312). */
4733 return find_taken_edge (bb, NULL_TREE);
4734 }
4735
4736 /* Walk over all statements of all reachable BBs and call check_array_bounds
4737 on them. */
4738
4739 void
4740 vrp_prop::check_all_array_refs ()
4741 {
4742 check_array_bounds_dom_walker w (this);
4743 w.walk (ENTRY_BLOCK_PTR_FOR_FN (cfun));
4744 }
4745
4746 /* Return true if all imm uses of VAR are either in STMT, or
4747 feed (optionally through a chain of single imm uses) GIMPLE_COND
4748 in basic block COND_BB. */
4749
4750 static bool
4751 all_imm_uses_in_stmt_or_feed_cond (tree var, gimple *stmt, basic_block cond_bb)
4752 {
4753 use_operand_p use_p, use2_p;
4754 imm_use_iterator iter;
4755
4756 FOR_EACH_IMM_USE_FAST (use_p, iter, var)
4757 if (USE_STMT (use_p) != stmt)
4758 {
4759 gimple *use_stmt = USE_STMT (use_p), *use_stmt2;
4760 if (is_gimple_debug (use_stmt))
4761 continue;
4762 while (is_gimple_assign (use_stmt)
4763 && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
4764 && single_imm_use (gimple_assign_lhs (use_stmt),
4765 &use2_p, &use_stmt2))
4766 use_stmt = use_stmt2;
4767 if (gimple_code (use_stmt) != GIMPLE_COND
4768 || gimple_bb (use_stmt) != cond_bb)
4769 return false;
4770 }
4771 return true;
4772 }
4773
4774 /* Handle
4775 _4 = x_3 & 31;
4776 if (_4 != 0)
4777 goto <bb 6>;
4778 else
4779 goto <bb 7>;
4780 <bb 6>:
4781 __builtin_unreachable ();
4782 <bb 7>:
4783 x_5 = ASSERT_EXPR <x_3, ...>;
4784 If x_3 has no other immediate uses (checked by caller),
4785 var is the x_3 var from ASSERT_EXPR, we can clear low 5 bits
4786 from the non-zero bitmask. */
4787
4788 void
4789 maybe_set_nonzero_bits (edge e, tree var)
4790 {
4791 basic_block cond_bb = e->src;
4792 gimple *stmt = last_stmt (cond_bb);
4793 tree cst;
4794
4795 if (stmt == NULL
4796 || gimple_code (stmt) != GIMPLE_COND
4797 || gimple_cond_code (stmt) != ((e->flags & EDGE_TRUE_VALUE)
4798 ? EQ_EXPR : NE_EXPR)
4799 || TREE_CODE (gimple_cond_lhs (stmt)) != SSA_NAME
4800 || !integer_zerop (gimple_cond_rhs (stmt)))
4801 return;
4802
4803 stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (stmt));
4804 if (!is_gimple_assign (stmt)
4805 || gimple_assign_rhs_code (stmt) != BIT_AND_EXPR
4806 || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST)
4807 return;
4808 if (gimple_assign_rhs1 (stmt) != var)
4809 {
4810 gimple *stmt2;
4811
4812 if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
4813 return;
4814 stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
4815 if (!gimple_assign_cast_p (stmt2)
4816 || gimple_assign_rhs1 (stmt2) != var
4817 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt2))
4818 || (TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (stmt)))
4819 != TYPE_PRECISION (TREE_TYPE (var))))
4820 return;
4821 }
4822 cst = gimple_assign_rhs2 (stmt);
4823 set_nonzero_bits (var, wi::bit_and_not (get_nonzero_bits (var),
4824 wi::to_wide (cst)));
4825 }
4826
4827 /* Convert range assertion expressions into the implied copies and
4828 copy propagate away the copies. Doing the trivial copy propagation
4829 here avoids the need to run the full copy propagation pass after
4830 VRP.
4831
4832 FIXME, this will eventually lead to copy propagation removing the
4833 names that had useful range information attached to them. For
4834 instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>,
4835 then N_i will have the range [3, +INF].
4836
4837 However, by converting the assertion into the implied copy
4838 operation N_i = N_j, we will then copy-propagate N_j into the uses
4839 of N_i and lose the range information. We may want to hold on to
4840 ASSERT_EXPRs a little while longer as the ranges could be used in
4841 things like jump threading.
4842
4843 The problem with keeping ASSERT_EXPRs around is that passes after
4844 VRP need to handle them appropriately.
4845
4846 Another approach would be to make the range information a first
4847 class property of the SSA_NAME so that it can be queried from
4848 any pass. This is made somewhat more complex by the need for
4849 multiple ranges to be associated with one SSA_NAME. */
4850
4851 static void
4852 remove_range_assertions (void)
4853 {
4854 basic_block bb;
4855 gimple_stmt_iterator si;
4856 /* 1 if looking at ASSERT_EXPRs immediately at the beginning of
4857 a basic block preceeded by GIMPLE_COND branching to it and
4858 __builtin_trap, -1 if not yet checked, 0 otherwise. */
4859 int is_unreachable;
4860
4861 /* Note that the BSI iterator bump happens at the bottom of the
4862 loop and no bump is necessary if we're removing the statement
4863 referenced by the current BSI. */
4864 FOR_EACH_BB_FN (bb, cfun)
4865 for (si = gsi_after_labels (bb), is_unreachable = -1; !gsi_end_p (si);)
4866 {
4867 gimple *stmt = gsi_stmt (si);
4868
4869 if (is_gimple_assign (stmt)
4870 && gimple_assign_rhs_code (stmt) == ASSERT_EXPR)
4871 {
4872 tree lhs = gimple_assign_lhs (stmt);
4873 tree rhs = gimple_assign_rhs1 (stmt);
4874 tree var;
4875
4876 var = ASSERT_EXPR_VAR (rhs);
4877
4878 if (TREE_CODE (var) == SSA_NAME
4879 && !POINTER_TYPE_P (TREE_TYPE (lhs))
4880 && SSA_NAME_RANGE_INFO (lhs))
4881 {
4882 if (is_unreachable == -1)
4883 {
4884 is_unreachable = 0;
4885 if (single_pred_p (bb)
4886 && assert_unreachable_fallthru_edge_p
4887 (single_pred_edge (bb)))
4888 is_unreachable = 1;
4889 }
4890 /* Handle
4891 if (x_7 >= 10 && x_7 < 20)
4892 __builtin_unreachable ();
4893 x_8 = ASSERT_EXPR <x_7, ...>;
4894 if the only uses of x_7 are in the ASSERT_EXPR and
4895 in the condition. In that case, we can copy the
4896 range info from x_8 computed in this pass also
4897 for x_7. */
4898 if (is_unreachable
4899 && all_imm_uses_in_stmt_or_feed_cond (var, stmt,
4900 single_pred (bb)))
4901 {
4902 set_range_info (var, SSA_NAME_RANGE_TYPE (lhs),
4903 SSA_NAME_RANGE_INFO (lhs)->get_min (),
4904 SSA_NAME_RANGE_INFO (lhs)->get_max ());
4905 maybe_set_nonzero_bits (single_pred_edge (bb), var);
4906 }
4907 }
4908
4909 /* Propagate the RHS into every use of the LHS. For SSA names
4910 also propagate abnormals as it merely restores the original
4911 IL in this case (an replace_uses_by would assert). */
4912 if (TREE_CODE (var) == SSA_NAME)
4913 {
4914 imm_use_iterator iter;
4915 use_operand_p use_p;
4916 gimple *use_stmt;
4917 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
4918 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
4919 SET_USE (use_p, var);
4920 }
4921 else
4922 replace_uses_by (lhs, var);
4923
4924 /* And finally, remove the copy, it is not needed. */
4925 gsi_remove (&si, true);
4926 release_defs (stmt);
4927 }
4928 else
4929 {
4930 if (!is_gimple_debug (gsi_stmt (si)))
4931 is_unreachable = 0;
4932 gsi_next (&si);
4933 }
4934 }
4935 }
4936
4937 /* Return true if STMT is interesting for VRP. */
4938
4939 bool
4940 stmt_interesting_for_vrp (gimple *stmt)
4941 {
4942 if (gimple_code (stmt) == GIMPLE_PHI)
4943 {
4944 tree res = gimple_phi_result (stmt);
4945 return (!virtual_operand_p (res)
4946 && (INTEGRAL_TYPE_P (TREE_TYPE (res))
4947 || POINTER_TYPE_P (TREE_TYPE (res))));
4948 }
4949 else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
4950 {
4951 tree lhs = gimple_get_lhs (stmt);
4952
4953 /* In general, assignments with virtual operands are not useful
4954 for deriving ranges, with the obvious exception of calls to
4955 builtin functions. */
4956 if (lhs && TREE_CODE (lhs) == SSA_NAME
4957 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
4958 || POINTER_TYPE_P (TREE_TYPE (lhs)))
4959 && (is_gimple_call (stmt)
4960 || !gimple_vuse (stmt)))
4961 return true;
4962 else if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
4963 switch (gimple_call_internal_fn (stmt))
4964 {
4965 case IFN_ADD_OVERFLOW:
4966 case IFN_SUB_OVERFLOW:
4967 case IFN_MUL_OVERFLOW:
4968 case IFN_ATOMIC_COMPARE_EXCHANGE:
4969 /* These internal calls return _Complex integer type,
4970 but are interesting to VRP nevertheless. */
4971 if (lhs && TREE_CODE (lhs) == SSA_NAME)
4972 return true;
4973 break;
4974 default:
4975 break;
4976 }
4977 }
4978 else if (gimple_code (stmt) == GIMPLE_COND
4979 || gimple_code (stmt) == GIMPLE_SWITCH)
4980 return true;
4981
4982 return false;
4983 }
4984
4985 /* Initialization required by ssa_propagate engine. */
4986
4987 void
4988 vrp_prop::vrp_initialize ()
4989 {
4990 basic_block bb;
4991
4992 FOR_EACH_BB_FN (bb, cfun)
4993 {
4994 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
4995 gsi_next (&si))
4996 {
4997 gphi *phi = si.phi ();
4998 if (!stmt_interesting_for_vrp (phi))
4999 {
5000 tree lhs = PHI_RESULT (phi);
5001 set_value_range_to_varying (get_value_range (lhs));
5002 prop_set_simulate_again (phi, false);
5003 }
5004 else
5005 prop_set_simulate_again (phi, true);
5006 }
5007
5008 for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
5009 gsi_next (&si))
5010 {
5011 gimple *stmt = gsi_stmt (si);
5012
5013 /* If the statement is a control insn, then we do not
5014 want to avoid simulating the statement once. Failure
5015 to do so means that those edges will never get added. */
5016 if (stmt_ends_bb_p (stmt))
5017 prop_set_simulate_again (stmt, true);
5018 else if (!stmt_interesting_for_vrp (stmt))
5019 {
5020 set_defs_to_varying (stmt);
5021 prop_set_simulate_again (stmt, false);
5022 }
5023 else
5024 prop_set_simulate_again (stmt, true);
5025 }
5026 }
5027 }
5028
5029 /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL
5030 that includes the value VAL. The search is restricted to the range
5031 [START_IDX, n - 1] where n is the size of VEC.
5032
5033 If there is a CASE_LABEL for VAL, its index is placed in IDX and true is
5034 returned.
5035
5036 If there is no CASE_LABEL for VAL and there is one that is larger than VAL,
5037 it is placed in IDX and false is returned.
5038
5039 If VAL is larger than any CASE_LABEL, n is placed on IDX and false is
5040 returned. */
5041
5042 bool
5043 find_case_label_index (gswitch *stmt, size_t start_idx, tree val, size_t *idx)
5044 {
5045 size_t n = gimple_switch_num_labels (stmt);
5046 size_t low, high;
5047
5048 /* Find case label for minimum of the value range or the next one.
5049 At each iteration we are searching in [low, high - 1]. */
5050
5051 for (low = start_idx, high = n; high != low; )
5052 {
5053 tree t;
5054 int cmp;
5055 /* Note that i != high, so we never ask for n. */
5056 size_t i = (high + low) / 2;
5057 t = gimple_switch_label (stmt, i);
5058
5059 /* Cache the result of comparing CASE_LOW and val. */
5060 cmp = tree_int_cst_compare (CASE_LOW (t), val);
5061
5062 if (cmp == 0)
5063 {
5064 /* Ranges cannot be empty. */
5065 *idx = i;
5066 return true;
5067 }
5068 else if (cmp > 0)
5069 high = i;
5070 else
5071 {
5072 low = i + 1;
5073 if (CASE_HIGH (t) != NULL
5074 && tree_int_cst_compare (CASE_HIGH (t), val) >= 0)
5075 {
5076 *idx = i;
5077 return true;
5078 }
5079 }
5080 }
5081
5082 *idx = high;
5083 return false;
5084 }
5085
5086 /* Searches the case label vector VEC for the range of CASE_LABELs that is used
5087 for values between MIN and MAX. The first index is placed in MIN_IDX. The
5088 last index is placed in MAX_IDX. If the range of CASE_LABELs is empty
5089 then MAX_IDX < MIN_IDX.
5090 Returns true if the default label is not needed. */
5091
5092 bool
5093 find_case_label_range (gswitch *stmt, tree min, tree max, size_t *min_idx,
5094 size_t *max_idx)
5095 {
5096 size_t i, j;
5097 bool min_take_default = !find_case_label_index (stmt, 1, min, &i);
5098 bool max_take_default = !find_case_label_index (stmt, i, max, &j);
5099
5100 if (i == j
5101 && min_take_default
5102 && max_take_default)
5103 {
5104 /* Only the default case label reached.
5105 Return an empty range. */
5106 *min_idx = 1;
5107 *max_idx = 0;
5108 return false;
5109 }
5110 else
5111 {
5112 bool take_default = min_take_default || max_take_default;
5113 tree low, high;
5114 size_t k;
5115
5116 if (max_take_default)
5117 j--;
5118
5119 /* If the case label range is continuous, we do not need
5120 the default case label. Verify that. */
5121 high = CASE_LOW (gimple_switch_label (stmt, i));
5122 if (CASE_HIGH (gimple_switch_label (stmt, i)))
5123 high = CASE_HIGH (gimple_switch_label (stmt, i));
5124 for (k = i + 1; k <= j; ++k)
5125 {
5126 low = CASE_LOW (gimple_switch_label (stmt, k));
5127 if (!integer_onep (int_const_binop (MINUS_EXPR, low, high)))
5128 {
5129 take_default = true;
5130 break;
5131 }
5132 high = low;
5133 if (CASE_HIGH (gimple_switch_label (stmt, k)))
5134 high = CASE_HIGH (gimple_switch_label (stmt, k));
5135 }
5136
5137 *min_idx = i;
5138 *max_idx = j;
5139 return !take_default;
5140 }
5141 }
5142
5143 /* Evaluate statement STMT. If the statement produces a useful range,
5144 return SSA_PROP_INTERESTING and record the SSA name with the
5145 interesting range into *OUTPUT_P.
5146
5147 If STMT is a conditional branch and we can determine its truth
5148 value, the taken edge is recorded in *TAKEN_EDGE_P.
5149
5150 If STMT produces a varying value, return SSA_PROP_VARYING. */
5151
5152 enum ssa_prop_result
5153 vrp_prop::visit_stmt (gimple *stmt, edge *taken_edge_p, tree *output_p)
5154 {
5155 value_range vr = VR_INITIALIZER;
5156 tree lhs = gimple_get_lhs (stmt);
5157 extract_range_from_stmt (stmt, taken_edge_p, output_p, &vr);
5158
5159 if (*output_p)
5160 {
5161 if (update_value_range (*output_p, &vr))
5162 {
5163 if (dump_file && (dump_flags & TDF_DETAILS))
5164 {
5165 fprintf (dump_file, "Found new range for ");
5166 print_generic_expr (dump_file, *output_p);
5167 fprintf (dump_file, ": ");
5168 dump_value_range (dump_file, &vr);
5169 fprintf (dump_file, "\n");
5170 }
5171
5172 if (vr.type == VR_VARYING)
5173 return SSA_PROP_VARYING;
5174
5175 return SSA_PROP_INTERESTING;
5176 }
5177 return SSA_PROP_NOT_INTERESTING;
5178 }
5179
5180 if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
5181 switch (gimple_call_internal_fn (stmt))
5182 {
5183 case IFN_ADD_OVERFLOW:
5184 case IFN_SUB_OVERFLOW:
5185 case IFN_MUL_OVERFLOW:
5186 case IFN_ATOMIC_COMPARE_EXCHANGE:
5187 /* These internal calls return _Complex integer type,
5188 which VRP does not track, but the immediate uses
5189 thereof might be interesting. */
5190 if (lhs && TREE_CODE (lhs) == SSA_NAME)
5191 {
5192 imm_use_iterator iter;
5193 use_operand_p use_p;
5194 enum ssa_prop_result res = SSA_PROP_VARYING;
5195
5196 set_value_range_to_varying (get_value_range (lhs));
5197
5198 FOR_EACH_IMM_USE_FAST (use_p, iter, lhs)
5199 {
5200 gimple *use_stmt = USE_STMT (use_p);
5201 if (!is_gimple_assign (use_stmt))
5202 continue;
5203 enum tree_code rhs_code = gimple_assign_rhs_code (use_stmt);
5204 if (rhs_code != REALPART_EXPR && rhs_code != IMAGPART_EXPR)
5205 continue;
5206 tree rhs1 = gimple_assign_rhs1 (use_stmt);
5207 tree use_lhs = gimple_assign_lhs (use_stmt);
5208 if (TREE_CODE (rhs1) != rhs_code
5209 || TREE_OPERAND (rhs1, 0) != lhs
5210 || TREE_CODE (use_lhs) != SSA_NAME
5211 || !stmt_interesting_for_vrp (use_stmt)
5212 || (!INTEGRAL_TYPE_P (TREE_TYPE (use_lhs))
5213 || !TYPE_MIN_VALUE (TREE_TYPE (use_lhs))
5214 || !TYPE_MAX_VALUE (TREE_TYPE (use_lhs))))
5215 continue;
5216
5217 /* If there is a change in the value range for any of the
5218 REALPART_EXPR/IMAGPART_EXPR immediate uses, return
5219 SSA_PROP_INTERESTING. If there are any REALPART_EXPR
5220 or IMAGPART_EXPR immediate uses, but none of them have
5221 a change in their value ranges, return
5222 SSA_PROP_NOT_INTERESTING. If there are no
5223 {REAL,IMAG}PART_EXPR uses at all,
5224 return SSA_PROP_VARYING. */
5225 value_range new_vr = VR_INITIALIZER;
5226 extract_range_basic (&new_vr, use_stmt);
5227 const value_range *old_vr = get_value_range (use_lhs);
5228 if (old_vr->type != new_vr.type
5229 || !vrp_operand_equal_p (old_vr->min, new_vr.min)
5230 || !vrp_operand_equal_p (old_vr->max, new_vr.max)
5231 || !vrp_bitmap_equal_p (old_vr->equiv, new_vr.equiv))
5232 res = SSA_PROP_INTERESTING;
5233 else
5234 res = SSA_PROP_NOT_INTERESTING;
5235 BITMAP_FREE (new_vr.equiv);
5236 if (res == SSA_PROP_INTERESTING)
5237 {
5238 *output_p = lhs;
5239 return res;
5240 }
5241 }
5242
5243 return res;
5244 }
5245 break;
5246 default:
5247 break;
5248 }
5249
5250 /* All other statements produce nothing of interest for VRP, so mark
5251 their outputs varying and prevent further simulation. */
5252 set_defs_to_varying (stmt);
5253
5254 return (*taken_edge_p) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING;
5255 }
5256
5257 /* Union the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
5258 { VR1TYPE, VR0MIN, VR0MAX } and store the result
5259 in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest
5260 possible such range. The resulting range is not canonicalized. */
5261
5262 static void
5263 union_ranges (enum value_range_type *vr0type,
5264 tree *vr0min, tree *vr0max,
5265 enum value_range_type vr1type,
5266 tree vr1min, tree vr1max)
5267 {
5268 bool mineq = vrp_operand_equal_p (*vr0min, vr1min);
5269 bool maxeq = vrp_operand_equal_p (*vr0max, vr1max);
5270
5271 /* [] is vr0, () is vr1 in the following classification comments. */
5272 if (mineq && maxeq)
5273 {
5274 /* [( )] */
5275 if (*vr0type == vr1type)
5276 /* Nothing to do for equal ranges. */
5277 ;
5278 else if ((*vr0type == VR_RANGE
5279 && vr1type == VR_ANTI_RANGE)
5280 || (*vr0type == VR_ANTI_RANGE
5281 && vr1type == VR_RANGE))
5282 {
5283 /* For anti-range with range union the result is varying. */
5284 goto give_up;
5285 }
5286 else
5287 gcc_unreachable ();
5288 }
5289 else if (operand_less_p (*vr0max, vr1min) == 1
5290 || operand_less_p (vr1max, *vr0min) == 1)
5291 {
5292 /* [ ] ( ) or ( ) [ ]
5293 If the ranges have an empty intersection, result of the union
5294 operation is the anti-range or if both are anti-ranges
5295 it covers all. */
5296 if (*vr0type == VR_ANTI_RANGE
5297 && vr1type == VR_ANTI_RANGE)
5298 goto give_up;
5299 else if (*vr0type == VR_ANTI_RANGE
5300 && vr1type == VR_RANGE)
5301 ;
5302 else if (*vr0type == VR_RANGE
5303 && vr1type == VR_ANTI_RANGE)
5304 {
5305 *vr0type = vr1type;
5306 *vr0min = vr1min;
5307 *vr0max = vr1max;
5308 }
5309 else if (*vr0type == VR_RANGE
5310 && vr1type == VR_RANGE)
5311 {
5312 /* The result is the convex hull of both ranges. */
5313 if (operand_less_p (*vr0max, vr1min) == 1)
5314 {
5315 /* If the result can be an anti-range, create one. */
5316 if (TREE_CODE (*vr0max) == INTEGER_CST
5317 && TREE_CODE (vr1min) == INTEGER_CST
5318 && vrp_val_is_min (*vr0min)
5319 && vrp_val_is_max (vr1max))
5320 {
5321 tree min = int_const_binop (PLUS_EXPR,
5322 *vr0max,
5323 build_int_cst (TREE_TYPE (*vr0max), 1));
5324 tree max = int_const_binop (MINUS_EXPR,
5325 vr1min,
5326 build_int_cst (TREE_TYPE (vr1min), 1));
5327 if (!operand_less_p (max, min))
5328 {
5329 *vr0type = VR_ANTI_RANGE;
5330 *vr0min = min;
5331 *vr0max = max;
5332 }
5333 else
5334 *vr0max = vr1max;
5335 }
5336 else
5337 *vr0max = vr1max;
5338 }
5339 else
5340 {
5341 /* If the result can be an anti-range, create one. */
5342 if (TREE_CODE (vr1max) == INTEGER_CST
5343 && TREE_CODE (*vr0min) == INTEGER_CST
5344 && vrp_val_is_min (vr1min)
5345 && vrp_val_is_max (*vr0max))
5346 {
5347 tree min = int_const_binop (PLUS_EXPR,
5348 vr1max,
5349 build_int_cst (TREE_TYPE (vr1max), 1));
5350 tree max = int_const_binop (MINUS_EXPR,
5351 *vr0min,
5352 build_int_cst (TREE_TYPE (*vr0min), 1));
5353 if (!operand_less_p (max, min))
5354 {
5355 *vr0type = VR_ANTI_RANGE;
5356 *vr0min = min;
5357 *vr0max = max;
5358 }
5359 else
5360 *vr0min = vr1min;
5361 }
5362 else
5363 *vr0min = vr1min;
5364 }
5365 }
5366 else
5367 gcc_unreachable ();
5368 }
5369 else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1)
5370 && (mineq || operand_less_p (*vr0min, vr1min) == 1))
5371 {
5372 /* [ ( ) ] or [( ) ] or [ ( )] */
5373 if (*vr0type == VR_RANGE
5374 && vr1type == VR_RANGE)
5375 ;
5376 else if (*vr0type == VR_ANTI_RANGE
5377 && vr1type == VR_ANTI_RANGE)
5378 {
5379 *vr0type = vr1type;
5380 *vr0min = vr1min;
5381 *vr0max = vr1max;
5382 }
5383 else if (*vr0type == VR_ANTI_RANGE
5384 && vr1type == VR_RANGE)
5385 {
5386 /* Arbitrarily choose the right or left gap. */
5387 if (!mineq && TREE_CODE (vr1min) == INTEGER_CST)
5388 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
5389 build_int_cst (TREE_TYPE (vr1min), 1));
5390 else if (!maxeq && TREE_CODE (vr1max) == INTEGER_CST)
5391 *vr0min = int_const_binop (PLUS_EXPR, vr1max,
5392 build_int_cst (TREE_TYPE (vr1max), 1));
5393 else
5394 goto give_up;
5395 }
5396 else if (*vr0type == VR_RANGE
5397 && vr1type == VR_ANTI_RANGE)
5398 /* The result covers everything. */
5399 goto give_up;
5400 else
5401 gcc_unreachable ();
5402 }
5403 else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1)
5404 && (mineq || operand_less_p (vr1min, *vr0min) == 1))
5405 {
5406 /* ( [ ] ) or ([ ] ) or ( [ ]) */
5407 if (*vr0type == VR_RANGE
5408 && vr1type == VR_RANGE)
5409 {
5410 *vr0type = vr1type;
5411 *vr0min = vr1min;
5412 *vr0max = vr1max;
5413 }
5414 else if (*vr0type == VR_ANTI_RANGE
5415 && vr1type == VR_ANTI_RANGE)
5416 ;
5417 else if (*vr0type == VR_RANGE
5418 && vr1type == VR_ANTI_RANGE)
5419 {
5420 *vr0type = VR_ANTI_RANGE;
5421 if (!mineq && TREE_CODE (*vr0min) == INTEGER_CST)
5422 {
5423 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
5424 build_int_cst (TREE_TYPE (*vr0min), 1));
5425 *vr0min = vr1min;
5426 }
5427 else if (!maxeq && TREE_CODE (*vr0max) == INTEGER_CST)
5428 {
5429 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
5430 build_int_cst (TREE_TYPE (*vr0max), 1));
5431 *vr0max = vr1max;
5432 }
5433 else
5434 goto give_up;
5435 }
5436 else if (*vr0type == VR_ANTI_RANGE
5437 && vr1type == VR_RANGE)
5438 /* The result covers everything. */
5439 goto give_up;
5440 else
5441 gcc_unreachable ();
5442 }
5443 else if ((operand_less_p (vr1min, *vr0max) == 1
5444 || operand_equal_p (vr1min, *vr0max, 0))
5445 && operand_less_p (*vr0min, vr1min) == 1
5446 && operand_less_p (*vr0max, vr1max) == 1)
5447 {
5448 /* [ ( ] ) or [ ]( ) */
5449 if (*vr0type == VR_RANGE
5450 && vr1type == VR_RANGE)
5451 *vr0max = vr1max;
5452 else if (*vr0type == VR_ANTI_RANGE
5453 && vr1type == VR_ANTI_RANGE)
5454 *vr0min = vr1min;
5455 else if (*vr0type == VR_ANTI_RANGE
5456 && vr1type == VR_RANGE)
5457 {
5458 if (TREE_CODE (vr1min) == INTEGER_CST)
5459 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
5460 build_int_cst (TREE_TYPE (vr1min), 1));
5461 else
5462 goto give_up;
5463 }
5464 else if (*vr0type == VR_RANGE
5465 && vr1type == VR_ANTI_RANGE)
5466 {
5467 if (TREE_CODE (*vr0max) == INTEGER_CST)
5468 {
5469 *vr0type = vr1type;
5470 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
5471 build_int_cst (TREE_TYPE (*vr0max), 1));
5472 *vr0max = vr1max;
5473 }
5474 else
5475 goto give_up;
5476 }
5477 else
5478 gcc_unreachable ();
5479 }
5480 else if ((operand_less_p (*vr0min, vr1max) == 1
5481 || operand_equal_p (*vr0min, vr1max, 0))
5482 && operand_less_p (vr1min, *vr0min) == 1
5483 && operand_less_p (vr1max, *vr0max) == 1)
5484 {
5485 /* ( [ ) ] or ( )[ ] */
5486 if (*vr0type == VR_RANGE
5487 && vr1type == VR_RANGE)
5488 *vr0min = vr1min;
5489 else if (*vr0type == VR_ANTI_RANGE
5490 && vr1type == VR_ANTI_RANGE)
5491 *vr0max = vr1max;
5492 else if (*vr0type == VR_ANTI_RANGE
5493 && vr1type == VR_RANGE)
5494 {
5495 if (TREE_CODE (vr1max) == INTEGER_CST)
5496 *vr0min = int_const_binop (PLUS_EXPR, vr1max,
5497 build_int_cst (TREE_TYPE (vr1max), 1));
5498 else
5499 goto give_up;
5500 }
5501 else if (*vr0type == VR_RANGE
5502 && vr1type == VR_ANTI_RANGE)
5503 {
5504 if (TREE_CODE (*vr0min) == INTEGER_CST)
5505 {
5506 *vr0type = vr1type;
5507 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
5508 build_int_cst (TREE_TYPE (*vr0min), 1));
5509 *vr0min = vr1min;
5510 }
5511 else
5512 goto give_up;
5513 }
5514 else
5515 gcc_unreachable ();
5516 }
5517 else
5518 goto give_up;
5519
5520 return;
5521
5522 give_up:
5523 *vr0type = VR_VARYING;
5524 *vr0min = NULL_TREE;
5525 *vr0max = NULL_TREE;
5526 }
5527
5528 /* Intersect the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
5529 { VR1TYPE, VR0MIN, VR0MAX } and store the result
5530 in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest
5531 possible such range. The resulting range is not canonicalized. */
5532
5533 static void
5534 intersect_ranges (enum value_range_type *vr0type,
5535 tree *vr0min, tree *vr0max,
5536 enum value_range_type vr1type,
5537 tree vr1min, tree vr1max)
5538 {
5539 bool mineq = vrp_operand_equal_p (*vr0min, vr1min);
5540 bool maxeq = vrp_operand_equal_p (*vr0max, vr1max);
5541
5542 /* [] is vr0, () is vr1 in the following classification comments. */
5543 if (mineq && maxeq)
5544 {
5545 /* [( )] */
5546 if (*vr0type == vr1type)
5547 /* Nothing to do for equal ranges. */
5548 ;
5549 else if ((*vr0type == VR_RANGE
5550 && vr1type == VR_ANTI_RANGE)
5551 || (*vr0type == VR_ANTI_RANGE
5552 && vr1type == VR_RANGE))
5553 {
5554 /* For anti-range with range intersection the result is empty. */
5555 *vr0type = VR_UNDEFINED;
5556 *vr0min = NULL_TREE;
5557 *vr0max = NULL_TREE;
5558 }
5559 else
5560 gcc_unreachable ();
5561 }
5562 else if (operand_less_p (*vr0max, vr1min) == 1
5563 || operand_less_p (vr1max, *vr0min) == 1)
5564 {
5565 /* [ ] ( ) or ( ) [ ]
5566 If the ranges have an empty intersection, the result of the
5567 intersect operation is the range for intersecting an
5568 anti-range with a range or empty when intersecting two ranges. */
5569 if (*vr0type == VR_RANGE
5570 && vr1type == VR_ANTI_RANGE)
5571 ;
5572 else if (*vr0type == VR_ANTI_RANGE
5573 && vr1type == VR_RANGE)
5574 {
5575 *vr0type = vr1type;
5576 *vr0min = vr1min;
5577 *vr0max = vr1max;
5578 }
5579 else if (*vr0type == VR_RANGE
5580 && vr1type == VR_RANGE)
5581 {
5582 *vr0type = VR_UNDEFINED;
5583 *vr0min = NULL_TREE;
5584 *vr0max = NULL_TREE;
5585 }
5586 else if (*vr0type == VR_ANTI_RANGE
5587 && vr1type == VR_ANTI_RANGE)
5588 {
5589 /* If the anti-ranges are adjacent to each other merge them. */
5590 if (TREE_CODE (*vr0max) == INTEGER_CST
5591 && TREE_CODE (vr1min) == INTEGER_CST
5592 && operand_less_p (*vr0max, vr1min) == 1
5593 && integer_onep (int_const_binop (MINUS_EXPR,
5594 vr1min, *vr0max)))
5595 *vr0max = vr1max;
5596 else if (TREE_CODE (vr1max) == INTEGER_CST
5597 && TREE_CODE (*vr0min) == INTEGER_CST
5598 && operand_less_p (vr1max, *vr0min) == 1
5599 && integer_onep (int_const_binop (MINUS_EXPR,
5600 *vr0min, vr1max)))
5601 *vr0min = vr1min;
5602 /* Else arbitrarily take VR0. */
5603 }
5604 }
5605 else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1)
5606 && (mineq || operand_less_p (*vr0min, vr1min) == 1))
5607 {
5608 /* [ ( ) ] or [( ) ] or [ ( )] */
5609 if (*vr0type == VR_RANGE
5610 && vr1type == VR_RANGE)
5611 {
5612 /* If both are ranges the result is the inner one. */
5613 *vr0type = vr1type;
5614 *vr0min = vr1min;
5615 *vr0max = vr1max;
5616 }
5617 else if (*vr0type == VR_RANGE
5618 && vr1type == VR_ANTI_RANGE)
5619 {
5620 /* Choose the right gap if the left one is empty. */
5621 if (mineq)
5622 {
5623 if (TREE_CODE (vr1max) != INTEGER_CST)
5624 *vr0min = vr1max;
5625 else if (TYPE_PRECISION (TREE_TYPE (vr1max)) == 1
5626 && !TYPE_UNSIGNED (TREE_TYPE (vr1max)))
5627 *vr0min
5628 = int_const_binop (MINUS_EXPR, vr1max,
5629 build_int_cst (TREE_TYPE (vr1max), -1));
5630 else
5631 *vr0min
5632 = int_const_binop (PLUS_EXPR, vr1max,
5633 build_int_cst (TREE_TYPE (vr1max), 1));
5634 }
5635 /* Choose the left gap if the right one is empty. */
5636 else if (maxeq)
5637 {
5638 if (TREE_CODE (vr1min) != INTEGER_CST)
5639 *vr0max = vr1min;
5640 else if (TYPE_PRECISION (TREE_TYPE (vr1min)) == 1
5641 && !TYPE_UNSIGNED (TREE_TYPE (vr1min)))
5642 *vr0max
5643 = int_const_binop (PLUS_EXPR, vr1min,
5644 build_int_cst (TREE_TYPE (vr1min), -1));
5645 else
5646 *vr0max
5647 = int_const_binop (MINUS_EXPR, vr1min,
5648 build_int_cst (TREE_TYPE (vr1min), 1));
5649 }
5650 /* Choose the anti-range if the range is effectively varying. */
5651 else if (vrp_val_is_min (*vr0min)
5652 && vrp_val_is_max (*vr0max))
5653 {
5654 *vr0type = vr1type;
5655 *vr0min = vr1min;
5656 *vr0max = vr1max;
5657 }
5658 /* Else choose the range. */
5659 }
5660 else if (*vr0type == VR_ANTI_RANGE
5661 && vr1type == VR_ANTI_RANGE)
5662 /* If both are anti-ranges the result is the outer one. */
5663 ;
5664 else if (*vr0type == VR_ANTI_RANGE
5665 && vr1type == VR_RANGE)
5666 {
5667 /* The intersection is empty. */
5668 *vr0type = VR_UNDEFINED;
5669 *vr0min = NULL_TREE;
5670 *vr0max = NULL_TREE;
5671 }
5672 else
5673 gcc_unreachable ();
5674 }
5675 else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1)
5676 && (mineq || operand_less_p (vr1min, *vr0min) == 1))
5677 {
5678 /* ( [ ] ) or ([ ] ) or ( [ ]) */
5679 if (*vr0type == VR_RANGE
5680 && vr1type == VR_RANGE)
5681 /* Choose the inner range. */
5682 ;
5683 else if (*vr0type == VR_ANTI_RANGE
5684 && vr1type == VR_RANGE)
5685 {
5686 /* Choose the right gap if the left is empty. */
5687 if (mineq)
5688 {
5689 *vr0type = VR_RANGE;
5690 if (TREE_CODE (*vr0max) != INTEGER_CST)
5691 *vr0min = *vr0max;
5692 else if (TYPE_PRECISION (TREE_TYPE (*vr0max)) == 1
5693 && !TYPE_UNSIGNED (TREE_TYPE (*vr0max)))
5694 *vr0min
5695 = int_const_binop (MINUS_EXPR, *vr0max,
5696 build_int_cst (TREE_TYPE (*vr0max), -1));
5697 else
5698 *vr0min
5699 = int_const_binop (PLUS_EXPR, *vr0max,
5700 build_int_cst (TREE_TYPE (*vr0max), 1));
5701 *vr0max = vr1max;
5702 }
5703 /* Choose the left gap if the right is empty. */
5704 else if (maxeq)
5705 {
5706 *vr0type = VR_RANGE;
5707 if (TREE_CODE (*vr0min) != INTEGER_CST)
5708 *vr0max = *vr0min;
5709 else if (TYPE_PRECISION (TREE_TYPE (*vr0min)) == 1
5710 && !TYPE_UNSIGNED (TREE_TYPE (*vr0min)))
5711 *vr0max
5712 = int_const_binop (PLUS_EXPR, *vr0min,
5713 build_int_cst (TREE_TYPE (*vr0min), -1));
5714 else
5715 *vr0max
5716 = int_const_binop (MINUS_EXPR, *vr0min,
5717 build_int_cst (TREE_TYPE (*vr0min), 1));
5718 *vr0min = vr1min;
5719 }
5720 /* Choose the anti-range if the range is effectively varying. */
5721 else if (vrp_val_is_min (vr1min)
5722 && vrp_val_is_max (vr1max))
5723 ;
5724 /* Choose the anti-range if it is ~[0,0], that range is special
5725 enough to special case when vr1's range is relatively wide.
5726 At least for types bigger than int - this covers pointers
5727 and arguments to functions like ctz. */
5728 else if (*vr0min == *vr0max
5729 && integer_zerop (*vr0min)
5730 && ((TYPE_PRECISION (TREE_TYPE (*vr0min))
5731 >= TYPE_PRECISION (integer_type_node))
5732 || POINTER_TYPE_P (TREE_TYPE (*vr0min)))
5733 && TREE_CODE (vr1max) == INTEGER_CST
5734 && TREE_CODE (vr1min) == INTEGER_CST
5735 && (wi::clz (wi::to_wide (vr1max) - wi::to_wide (vr1min))
5736 < TYPE_PRECISION (TREE_TYPE (*vr0min)) / 2))
5737 ;
5738 /* Else choose the range. */
5739 else
5740 {
5741 *vr0type = vr1type;
5742 *vr0min = vr1min;
5743 *vr0max = vr1max;
5744 }
5745 }
5746 else if (*vr0type == VR_ANTI_RANGE
5747 && vr1type == VR_ANTI_RANGE)
5748 {
5749 /* If both are anti-ranges the result is the outer one. */
5750 *vr0type = vr1type;
5751 *vr0min = vr1min;
5752 *vr0max = vr1max;
5753 }
5754 else if (vr1type == VR_ANTI_RANGE
5755 && *vr0type == VR_RANGE)
5756 {
5757 /* The intersection is empty. */
5758 *vr0type = VR_UNDEFINED;
5759 *vr0min = NULL_TREE;
5760 *vr0max = NULL_TREE;
5761 }
5762 else
5763 gcc_unreachable ();
5764 }
5765 else if ((operand_less_p (vr1min, *vr0max) == 1
5766 || operand_equal_p (vr1min, *vr0max, 0))
5767 && operand_less_p (*vr0min, vr1min) == 1)
5768 {
5769 /* [ ( ] ) or [ ]( ) */
5770 if (*vr0type == VR_ANTI_RANGE
5771 && vr1type == VR_ANTI_RANGE)
5772 *vr0max = vr1max;
5773 else if (*vr0type == VR_RANGE
5774 && vr1type == VR_RANGE)
5775 *vr0min = vr1min;
5776 else if (*vr0type == VR_RANGE
5777 && vr1type == VR_ANTI_RANGE)
5778 {
5779 if (TREE_CODE (vr1min) == INTEGER_CST)
5780 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
5781 build_int_cst (TREE_TYPE (vr1min), 1));
5782 else
5783 *vr0max = vr1min;
5784 }
5785 else if (*vr0type == VR_ANTI_RANGE
5786 && vr1type == VR_RANGE)
5787 {
5788 *vr0type = VR_RANGE;
5789 if (TREE_CODE (*vr0max) == INTEGER_CST)
5790 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
5791 build_int_cst (TREE_TYPE (*vr0max), 1));
5792 else
5793 *vr0min = *vr0max;
5794 *vr0max = vr1max;
5795 }
5796 else
5797 gcc_unreachable ();
5798 }
5799 else if ((operand_less_p (*vr0min, vr1max) == 1
5800 || operand_equal_p (*vr0min, vr1max, 0))
5801 && operand_less_p (vr1min, *vr0min) == 1)
5802 {
5803 /* ( [ ) ] or ( )[ ] */
5804 if (*vr0type == VR_ANTI_RANGE
5805 && vr1type == VR_ANTI_RANGE)
5806 *vr0min = vr1min;
5807 else if (*vr0type == VR_RANGE
5808 && vr1type == VR_RANGE)
5809 *vr0max = vr1max;
5810 else if (*vr0type == VR_RANGE
5811 && vr1type == VR_ANTI_RANGE)
5812 {
5813 if (TREE_CODE (vr1max) == INTEGER_CST)
5814 *vr0min = int_const_binop (PLUS_EXPR, vr1max,
5815 build_int_cst (TREE_TYPE (vr1max), 1));
5816 else
5817 *vr0min = vr1max;
5818 }
5819 else if (*vr0type == VR_ANTI_RANGE
5820 && vr1type == VR_RANGE)
5821 {
5822 *vr0type = VR_RANGE;
5823 if (TREE_CODE (*vr0min) == INTEGER_CST)
5824 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
5825 build_int_cst (TREE_TYPE (*vr0min), 1));
5826 else
5827 *vr0max = *vr0min;
5828 *vr0min = vr1min;
5829 }
5830 else
5831 gcc_unreachable ();
5832 }
5833
5834 /* As a fallback simply use { *VRTYPE, *VR0MIN, *VR0MAX } as
5835 result for the intersection. That's always a conservative
5836 correct estimate unless VR1 is a constant singleton range
5837 in which case we choose that. */
5838 if (vr1type == VR_RANGE
5839 && is_gimple_min_invariant (vr1min)
5840 && vrp_operand_equal_p (vr1min, vr1max))
5841 {
5842 *vr0type = vr1type;
5843 *vr0min = vr1min;
5844 *vr0max = vr1max;
5845 }
5846
5847 return;
5848 }
5849
5850
5851 /* Intersect the two value-ranges *VR0 and *VR1 and store the result
5852 in *VR0. This may not be the smallest possible such range. */
5853
5854 static void
5855 vrp_intersect_ranges_1 (value_range *vr0, const value_range *vr1)
5856 {
5857 value_range saved;
5858
5859 /* If either range is VR_VARYING the other one wins. */
5860 if (vr1->type == VR_VARYING)
5861 return;
5862 if (vr0->type == VR_VARYING)
5863 {
5864 copy_value_range (vr0, vr1);
5865 return;
5866 }
5867
5868 /* When either range is VR_UNDEFINED the resulting range is
5869 VR_UNDEFINED, too. */
5870 if (vr0->type == VR_UNDEFINED)
5871 return;
5872 if (vr1->type == VR_UNDEFINED)
5873 {
5874 set_value_range_to_undefined (vr0);
5875 return;
5876 }
5877
5878 /* Save the original vr0 so we can return it as conservative intersection
5879 result when our worker turns things to varying. */
5880 saved = *vr0;
5881 intersect_ranges (&vr0->type, &vr0->min, &vr0->max,
5882 vr1->type, vr1->min, vr1->max);
5883 /* Make sure to canonicalize the result though as the inversion of a
5884 VR_RANGE can still be a VR_RANGE. */
5885 set_and_canonicalize_value_range (vr0, vr0->type,
5886 vr0->min, vr0->max, vr0->equiv);
5887 /* If that failed, use the saved original VR0. */
5888 if (vr0->type == VR_VARYING)
5889 {
5890 *vr0 = saved;
5891 return;
5892 }
5893 /* If the result is VR_UNDEFINED there is no need to mess with
5894 the equivalencies. */
5895 if (vr0->type == VR_UNDEFINED)
5896 return;
5897
5898 /* The resulting set of equivalences for range intersection is the union of
5899 the two sets. */
5900 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
5901 bitmap_ior_into (vr0->equiv, vr1->equiv);
5902 else if (vr1->equiv && !vr0->equiv)
5903 {
5904 /* All equivalence bitmaps are allocated from the same obstack. So
5905 we can use the obstack associated with VR to allocate vr0->equiv. */
5906 vr0->equiv = BITMAP_ALLOC (vr1->equiv->obstack);
5907 bitmap_copy (vr0->equiv, vr1->equiv);
5908 }
5909 }
5910
5911 void
5912 vrp_intersect_ranges (value_range *vr0, const value_range *vr1)
5913 {
5914 if (dump_file && (dump_flags & TDF_DETAILS))
5915 {
5916 fprintf (dump_file, "Intersecting\n ");
5917 dump_value_range (dump_file, vr0);
5918 fprintf (dump_file, "\nand\n ");
5919 dump_value_range (dump_file, vr1);
5920 fprintf (dump_file, "\n");
5921 }
5922 vrp_intersect_ranges_1 (vr0, vr1);
5923 if (dump_file && (dump_flags & TDF_DETAILS))
5924 {
5925 fprintf (dump_file, "to\n ");
5926 dump_value_range (dump_file, vr0);
5927 fprintf (dump_file, "\n");
5928 }
5929 }
5930
5931 /* Meet operation for value ranges. Given two value ranges VR0 and
5932 VR1, store in VR0 a range that contains both VR0 and VR1. This
5933 may not be the smallest possible such range. */
5934
5935 static void
5936 vrp_meet_1 (value_range *vr0, const value_range *vr1)
5937 {
5938 value_range saved;
5939
5940 if (vr0->type == VR_UNDEFINED)
5941 {
5942 set_value_range (vr0, vr1->type, vr1->min, vr1->max, vr1->equiv);
5943 return;
5944 }
5945
5946 if (vr1->type == VR_UNDEFINED)
5947 {
5948 /* VR0 already has the resulting range. */
5949 return;
5950 }
5951
5952 if (vr0->type == VR_VARYING)
5953 {
5954 /* Nothing to do. VR0 already has the resulting range. */
5955 return;
5956 }
5957
5958 if (vr1->type == VR_VARYING)
5959 {
5960 set_value_range_to_varying (vr0);
5961 return;
5962 }
5963
5964 saved = *vr0;
5965 union_ranges (&vr0->type, &vr0->min, &vr0->max,
5966 vr1->type, vr1->min, vr1->max);
5967 if (vr0->type == VR_VARYING)
5968 {
5969 /* Failed to find an efficient meet. Before giving up and setting
5970 the result to VARYING, see if we can at least derive a useful
5971 anti-range. */
5972 if (range_includes_zero_p (&saved) == 0
5973 && range_includes_zero_p (vr1) == 0)
5974 {
5975 set_value_range_to_nonnull (vr0, TREE_TYPE (saved.min));
5976
5977 /* Since this meet operation did not result from the meeting of
5978 two equivalent names, VR0 cannot have any equivalences. */
5979 if (vr0->equiv)
5980 bitmap_clear (vr0->equiv);
5981 return;
5982 }
5983
5984 set_value_range_to_varying (vr0);
5985 return;
5986 }
5987 set_and_canonicalize_value_range (vr0, vr0->type, vr0->min, vr0->max,
5988 vr0->equiv);
5989 if (vr0->type == VR_VARYING)
5990 return;
5991
5992 /* The resulting set of equivalences is always the intersection of
5993 the two sets. */
5994 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
5995 bitmap_and_into (vr0->equiv, vr1->equiv);
5996 else if (vr0->equiv && !vr1->equiv)
5997 bitmap_clear (vr0->equiv);
5998 }
5999
6000 void
6001 vrp_meet (value_range *vr0, const value_range *vr1)
6002 {
6003 if (dump_file && (dump_flags & TDF_DETAILS))
6004 {
6005 fprintf (dump_file, "Meeting\n ");
6006 dump_value_range (dump_file, vr0);
6007 fprintf (dump_file, "\nand\n ");
6008 dump_value_range (dump_file, vr1);
6009 fprintf (dump_file, "\n");
6010 }
6011 vrp_meet_1 (vr0, vr1);
6012 if (dump_file && (dump_flags & TDF_DETAILS))
6013 {
6014 fprintf (dump_file, "to\n ");
6015 dump_value_range (dump_file, vr0);
6016 fprintf (dump_file, "\n");
6017 }
6018 }
6019
6020
6021 /* Visit all arguments for PHI node PHI that flow through executable
6022 edges. If a valid value range can be derived from all the incoming
6023 value ranges, set a new range for the LHS of PHI. */
6024
6025 enum ssa_prop_result
6026 vrp_prop::visit_phi (gphi *phi)
6027 {
6028 tree lhs = PHI_RESULT (phi);
6029 value_range vr_result = VR_INITIALIZER;
6030 extract_range_from_phi_node (phi, &vr_result);
6031 if (update_value_range (lhs, &vr_result))
6032 {
6033 if (dump_file && (dump_flags & TDF_DETAILS))
6034 {
6035 fprintf (dump_file, "Found new range for ");
6036 print_generic_expr (dump_file, lhs);
6037 fprintf (dump_file, ": ");
6038 dump_value_range (dump_file, &vr_result);
6039 fprintf (dump_file, "\n");
6040 }
6041
6042 if (vr_result.type == VR_VARYING)
6043 return SSA_PROP_VARYING;
6044
6045 return SSA_PROP_INTERESTING;
6046 }
6047
6048 /* Nothing changed, don't add outgoing edges. */
6049 return SSA_PROP_NOT_INTERESTING;
6050 }
6051
6052 class vrp_folder : public substitute_and_fold_engine
6053 {
6054 public:
6055 tree get_value (tree) FINAL OVERRIDE;
6056 bool fold_stmt (gimple_stmt_iterator *) FINAL OVERRIDE;
6057 bool fold_predicate_in (gimple_stmt_iterator *);
6058
6059 class vr_values *vr_values;
6060
6061 /* Delegators. */
6062 tree vrp_evaluate_conditional (tree_code code, tree op0,
6063 tree op1, gimple *stmt)
6064 { return vr_values->vrp_evaluate_conditional (code, op0, op1, stmt); }
6065 bool simplify_stmt_using_ranges (gimple_stmt_iterator *gsi)
6066 { return vr_values->simplify_stmt_using_ranges (gsi); }
6067 tree op_with_constant_singleton_value_range (tree op)
6068 { return vr_values->op_with_constant_singleton_value_range (op); }
6069 };
6070
6071 /* If the statement pointed by SI has a predicate whose value can be
6072 computed using the value range information computed by VRP, compute
6073 its value and return true. Otherwise, return false. */
6074
6075 bool
6076 vrp_folder::fold_predicate_in (gimple_stmt_iterator *si)
6077 {
6078 bool assignment_p = false;
6079 tree val;
6080 gimple *stmt = gsi_stmt (*si);
6081
6082 if (is_gimple_assign (stmt)
6083 && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison)
6084 {
6085 assignment_p = true;
6086 val = vrp_evaluate_conditional (gimple_assign_rhs_code (stmt),
6087 gimple_assign_rhs1 (stmt),
6088 gimple_assign_rhs2 (stmt),
6089 stmt);
6090 }
6091 else if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
6092 val = vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
6093 gimple_cond_lhs (cond_stmt),
6094 gimple_cond_rhs (cond_stmt),
6095 stmt);
6096 else
6097 return false;
6098
6099 if (val)
6100 {
6101 if (assignment_p)
6102 val = fold_convert (gimple_expr_type (stmt), val);
6103
6104 if (dump_file)
6105 {
6106 fprintf (dump_file, "Folding predicate ");
6107 print_gimple_expr (dump_file, stmt, 0);
6108 fprintf (dump_file, " to ");
6109 print_generic_expr (dump_file, val);
6110 fprintf (dump_file, "\n");
6111 }
6112
6113 if (is_gimple_assign (stmt))
6114 gimple_assign_set_rhs_from_tree (si, val);
6115 else
6116 {
6117 gcc_assert (gimple_code (stmt) == GIMPLE_COND);
6118 gcond *cond_stmt = as_a <gcond *> (stmt);
6119 if (integer_zerop (val))
6120 gimple_cond_make_false (cond_stmt);
6121 else if (integer_onep (val))
6122 gimple_cond_make_true (cond_stmt);
6123 else
6124 gcc_unreachable ();
6125 }
6126
6127 return true;
6128 }
6129
6130 return false;
6131 }
6132
6133 /* Callback for substitute_and_fold folding the stmt at *SI. */
6134
6135 bool
6136 vrp_folder::fold_stmt (gimple_stmt_iterator *si)
6137 {
6138 if (fold_predicate_in (si))
6139 return true;
6140
6141 return simplify_stmt_using_ranges (si);
6142 }
6143
6144 /* If OP has a value range with a single constant value return that,
6145 otherwise return NULL_TREE. This returns OP itself if OP is a
6146 constant.
6147
6148 Implemented as a pure wrapper right now, but this will change. */
6149
6150 tree
6151 vrp_folder::get_value (tree op)
6152 {
6153 return op_with_constant_singleton_value_range (op);
6154 }
6155
6156 /* Return the LHS of any ASSERT_EXPR where OP appears as the first
6157 argument to the ASSERT_EXPR and in which the ASSERT_EXPR dominates
6158 BB. If no such ASSERT_EXPR is found, return OP. */
6159
6160 static tree
6161 lhs_of_dominating_assert (tree op, basic_block bb, gimple *stmt)
6162 {
6163 imm_use_iterator imm_iter;
6164 gimple *use_stmt;
6165 use_operand_p use_p;
6166
6167 if (TREE_CODE (op) == SSA_NAME)
6168 {
6169 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, op)
6170 {
6171 use_stmt = USE_STMT (use_p);
6172 if (use_stmt != stmt
6173 && gimple_assign_single_p (use_stmt)
6174 && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ASSERT_EXPR
6175 && TREE_OPERAND (gimple_assign_rhs1 (use_stmt), 0) == op
6176 && dominated_by_p (CDI_DOMINATORS, bb, gimple_bb (use_stmt)))
6177 return gimple_assign_lhs (use_stmt);
6178 }
6179 }
6180 return op;
6181 }
6182
6183 /* A hack. */
6184 static class vr_values *x_vr_values;
6185
6186 /* A trivial wrapper so that we can present the generic jump threading
6187 code with a simple API for simplifying statements. STMT is the
6188 statement we want to simplify, WITHIN_STMT provides the location
6189 for any overflow warnings. */
6190
6191 static tree
6192 simplify_stmt_for_jump_threading (gimple *stmt, gimple *within_stmt,
6193 class avail_exprs_stack *avail_exprs_stack ATTRIBUTE_UNUSED,
6194 basic_block bb)
6195 {
6196 /* First see if the conditional is in the hash table. */
6197 tree cached_lhs = avail_exprs_stack->lookup_avail_expr (stmt, false, true);
6198 if (cached_lhs && is_gimple_min_invariant (cached_lhs))
6199 return cached_lhs;
6200
6201 vr_values *vr_values = x_vr_values;
6202 if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
6203 {
6204 tree op0 = gimple_cond_lhs (cond_stmt);
6205 op0 = lhs_of_dominating_assert (op0, bb, stmt);
6206
6207 tree op1 = gimple_cond_rhs (cond_stmt);
6208 op1 = lhs_of_dominating_assert (op1, bb, stmt);
6209
6210 return vr_values->vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
6211 op0, op1, within_stmt);
6212 }
6213
6214 /* We simplify a switch statement by trying to determine which case label
6215 will be taken. If we are successful then we return the corresponding
6216 CASE_LABEL_EXPR. */
6217 if (gswitch *switch_stmt = dyn_cast <gswitch *> (stmt))
6218 {
6219 tree op = gimple_switch_index (switch_stmt);
6220 if (TREE_CODE (op) != SSA_NAME)
6221 return NULL_TREE;
6222
6223 op = lhs_of_dominating_assert (op, bb, stmt);
6224
6225 const value_range *vr = vr_values->get_value_range (op);
6226 if ((vr->type != VR_RANGE && vr->type != VR_ANTI_RANGE)
6227 || symbolic_range_p (vr))
6228 return NULL_TREE;
6229
6230 if (vr->type == VR_RANGE)
6231 {
6232 size_t i, j;
6233 /* Get the range of labels that contain a part of the operand's
6234 value range. */
6235 find_case_label_range (switch_stmt, vr->min, vr->max, &i, &j);
6236
6237 /* Is there only one such label? */
6238 if (i == j)
6239 {
6240 tree label = gimple_switch_label (switch_stmt, i);
6241
6242 /* The i'th label will be taken only if the value range of the
6243 operand is entirely within the bounds of this label. */
6244 if (CASE_HIGH (label) != NULL_TREE
6245 ? (tree_int_cst_compare (CASE_LOW (label), vr->min) <= 0
6246 && tree_int_cst_compare (CASE_HIGH (label), vr->max) >= 0)
6247 : (tree_int_cst_equal (CASE_LOW (label), vr->min)
6248 && tree_int_cst_equal (vr->min, vr->max)))
6249 return label;
6250 }
6251
6252 /* If there are no such labels then the default label will be
6253 taken. */
6254 if (i > j)
6255 return gimple_switch_label (switch_stmt, 0);
6256 }
6257
6258 if (vr->type == VR_ANTI_RANGE)
6259 {
6260 unsigned n = gimple_switch_num_labels (switch_stmt);
6261 tree min_label = gimple_switch_label (switch_stmt, 1);
6262 tree max_label = gimple_switch_label (switch_stmt, n - 1);
6263
6264 /* The default label will be taken only if the anti-range of the
6265 operand is entirely outside the bounds of all the (non-default)
6266 case labels. */
6267 if (tree_int_cst_compare (vr->min, CASE_LOW (min_label)) <= 0
6268 && (CASE_HIGH (max_label) != NULL_TREE
6269 ? tree_int_cst_compare (vr->max, CASE_HIGH (max_label)) >= 0
6270 : tree_int_cst_compare (vr->max, CASE_LOW (max_label)) >= 0))
6271 return gimple_switch_label (switch_stmt, 0);
6272 }
6273
6274 return NULL_TREE;
6275 }
6276
6277 if (gassign *assign_stmt = dyn_cast <gassign *> (stmt))
6278 {
6279 tree lhs = gimple_assign_lhs (assign_stmt);
6280 if (TREE_CODE (lhs) == SSA_NAME
6281 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
6282 || POINTER_TYPE_P (TREE_TYPE (lhs)))
6283 && stmt_interesting_for_vrp (stmt))
6284 {
6285 edge dummy_e;
6286 tree dummy_tree;
6287 value_range new_vr = VR_INITIALIZER;
6288 vr_values->extract_range_from_stmt (stmt, &dummy_e,
6289 &dummy_tree, &new_vr);
6290 if (range_int_cst_singleton_p (&new_vr))
6291 return new_vr.min;
6292 }
6293 }
6294
6295 return NULL_TREE;
6296 }
6297
6298 class vrp_dom_walker : public dom_walker
6299 {
6300 public:
6301 vrp_dom_walker (cdi_direction direction,
6302 class const_and_copies *const_and_copies,
6303 class avail_exprs_stack *avail_exprs_stack)
6304 : dom_walker (direction, REACHABLE_BLOCKS),
6305 m_const_and_copies (const_and_copies),
6306 m_avail_exprs_stack (avail_exprs_stack),
6307 m_dummy_cond (NULL) {}
6308
6309 virtual edge before_dom_children (basic_block);
6310 virtual void after_dom_children (basic_block);
6311
6312 class vr_values *vr_values;
6313
6314 private:
6315 class const_and_copies *m_const_and_copies;
6316 class avail_exprs_stack *m_avail_exprs_stack;
6317
6318 gcond *m_dummy_cond;
6319
6320 };
6321
6322 /* Called before processing dominator children of BB. We want to look
6323 at ASSERT_EXPRs and record information from them in the appropriate
6324 tables.
6325
6326 We could look at other statements here. It's not seen as likely
6327 to significantly increase the jump threads we discover. */
6328
6329 edge
6330 vrp_dom_walker::before_dom_children (basic_block bb)
6331 {
6332 gimple_stmt_iterator gsi;
6333
6334 m_avail_exprs_stack->push_marker ();
6335 m_const_and_copies->push_marker ();
6336 for (gsi = gsi_start_nondebug_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
6337 {
6338 gimple *stmt = gsi_stmt (gsi);
6339 if (gimple_assign_single_p (stmt)
6340 && TREE_CODE (gimple_assign_rhs1 (stmt)) == ASSERT_EXPR)
6341 {
6342 tree rhs1 = gimple_assign_rhs1 (stmt);
6343 tree cond = TREE_OPERAND (rhs1, 1);
6344 tree inverted = invert_truthvalue (cond);
6345 vec<cond_equivalence> p;
6346 p.create (3);
6347 record_conditions (&p, cond, inverted);
6348 for (unsigned int i = 0; i < p.length (); i++)
6349 m_avail_exprs_stack->record_cond (&p[i]);
6350
6351 tree lhs = gimple_assign_lhs (stmt);
6352 m_const_and_copies->record_const_or_copy (lhs,
6353 TREE_OPERAND (rhs1, 0));
6354 p.release ();
6355 continue;
6356 }
6357 break;
6358 }
6359 return NULL;
6360 }
6361
6362 /* Called after processing dominator children of BB. This is where we
6363 actually call into the threader. */
6364 void
6365 vrp_dom_walker::after_dom_children (basic_block bb)
6366 {
6367 if (!m_dummy_cond)
6368 m_dummy_cond = gimple_build_cond (NE_EXPR,
6369 integer_zero_node, integer_zero_node,
6370 NULL, NULL);
6371
6372 x_vr_values = vr_values;
6373 thread_outgoing_edges (bb, m_dummy_cond, m_const_and_copies,
6374 m_avail_exprs_stack, NULL,
6375 simplify_stmt_for_jump_threading);
6376 x_vr_values = NULL;
6377
6378 m_avail_exprs_stack->pop_to_marker ();
6379 m_const_and_copies->pop_to_marker ();
6380 }
6381
6382 /* Blocks which have more than one predecessor and more than
6383 one successor present jump threading opportunities, i.e.,
6384 when the block is reached from a specific predecessor, we
6385 may be able to determine which of the outgoing edges will
6386 be traversed. When this optimization applies, we are able
6387 to avoid conditionals at runtime and we may expose secondary
6388 optimization opportunities.
6389
6390 This routine is effectively a driver for the generic jump
6391 threading code. It basically just presents the generic code
6392 with edges that may be suitable for jump threading.
6393
6394 Unlike DOM, we do not iterate VRP if jump threading was successful.
6395 While iterating may expose new opportunities for VRP, it is expected
6396 those opportunities would be very limited and the compile time cost
6397 to expose those opportunities would be significant.
6398
6399 As jump threading opportunities are discovered, they are registered
6400 for later realization. */
6401
6402 static void
6403 identify_jump_threads (class vr_values *vr_values)
6404 {
6405 int i;
6406 edge e;
6407
6408 /* Ugh. When substituting values earlier in this pass we can
6409 wipe the dominance information. So rebuild the dominator
6410 information as we need it within the jump threading code. */
6411 calculate_dominance_info (CDI_DOMINATORS);
6412
6413 /* We do not allow VRP information to be used for jump threading
6414 across a back edge in the CFG. Otherwise it becomes too
6415 difficult to avoid eliminating loop exit tests. Of course
6416 EDGE_DFS_BACK is not accurate at this time so we have to
6417 recompute it. */
6418 mark_dfs_back_edges ();
6419
6420 /* Do not thread across edges we are about to remove. Just marking
6421 them as EDGE_IGNORE will do. */
6422 FOR_EACH_VEC_ELT (to_remove_edges, i, e)
6423 e->flags |= EDGE_IGNORE;
6424
6425 /* Allocate our unwinder stack to unwind any temporary equivalences
6426 that might be recorded. */
6427 const_and_copies *equiv_stack = new const_and_copies ();
6428
6429 hash_table<expr_elt_hasher> *avail_exprs
6430 = new hash_table<expr_elt_hasher> (1024);
6431 avail_exprs_stack *avail_exprs_stack
6432 = new class avail_exprs_stack (avail_exprs);
6433
6434 vrp_dom_walker walker (CDI_DOMINATORS, equiv_stack, avail_exprs_stack);
6435 walker.vr_values = vr_values;
6436 walker.walk (cfun->cfg->x_entry_block_ptr);
6437
6438 /* Clear EDGE_IGNORE. */
6439 FOR_EACH_VEC_ELT (to_remove_edges, i, e)
6440 e->flags &= ~EDGE_IGNORE;
6441
6442 /* We do not actually update the CFG or SSA graphs at this point as
6443 ASSERT_EXPRs are still in the IL and cfg cleanup code does not yet
6444 handle ASSERT_EXPRs gracefully. */
6445 delete equiv_stack;
6446 delete avail_exprs;
6447 delete avail_exprs_stack;
6448 }
6449
6450 /* Traverse all the blocks folding conditionals with known ranges. */
6451
6452 void
6453 vrp_prop::vrp_finalize (bool warn_array_bounds_p)
6454 {
6455 size_t i;
6456
6457 /* We have completed propagating through the lattice. */
6458 vr_values.set_lattice_propagation_complete ();
6459
6460 if (dump_file)
6461 {
6462 fprintf (dump_file, "\nValue ranges after VRP:\n\n");
6463 vr_values.dump_all_value_ranges (dump_file);
6464 fprintf (dump_file, "\n");
6465 }
6466
6467 /* Set value range to non pointer SSA_NAMEs. */
6468 for (i = 0; i < num_ssa_names; i++)
6469 {
6470 tree name = ssa_name (i);
6471 if (!name)
6472 continue;
6473
6474 const value_range *vr = get_value_range (name);
6475 if (!name
6476 || (vr->type == VR_VARYING)
6477 || (vr->type == VR_UNDEFINED)
6478 || (TREE_CODE (vr->min) != INTEGER_CST)
6479 || (TREE_CODE (vr->max) != INTEGER_CST))
6480 continue;
6481
6482 if (POINTER_TYPE_P (TREE_TYPE (name))
6483 && range_includes_zero_p (vr) == 0)
6484 set_ptr_nonnull (name);
6485 else if (!POINTER_TYPE_P (TREE_TYPE (name)))
6486 set_range_info (name, vr->type,
6487 wi::to_wide (vr->min),
6488 wi::to_wide (vr->max));
6489 }
6490
6491 /* If we're checking array refs, we want to merge information on
6492 the executability of each edge between vrp_folder and the
6493 check_array_bounds_dom_walker: each can clear the
6494 EDGE_EXECUTABLE flag on edges, in different ways.
6495
6496 Hence, if we're going to call check_all_array_refs, set
6497 the flag on every edge now, rather than in
6498 check_array_bounds_dom_walker's ctor; vrp_folder may clear
6499 it from some edges. */
6500 if (warn_array_bounds && warn_array_bounds_p)
6501 set_all_edges_as_executable (cfun);
6502
6503 class vrp_folder vrp_folder;
6504 vrp_folder.vr_values = &vr_values;
6505 vrp_folder.substitute_and_fold ();
6506
6507 if (warn_array_bounds && warn_array_bounds_p)
6508 check_all_array_refs ();
6509 }
6510
6511 /* Main entry point to VRP (Value Range Propagation). This pass is
6512 loosely based on J. R. C. Patterson, ``Accurate Static Branch
6513 Prediction by Value Range Propagation,'' in SIGPLAN Conference on
6514 Programming Language Design and Implementation, pp. 67-78, 1995.
6515 Also available at http://citeseer.ist.psu.edu/patterson95accurate.html
6516
6517 This is essentially an SSA-CCP pass modified to deal with ranges
6518 instead of constants.
6519
6520 While propagating ranges, we may find that two or more SSA name
6521 have equivalent, though distinct ranges. For instance,
6522
6523 1 x_9 = p_3->a;
6524 2 p_4 = ASSERT_EXPR <p_3, p_3 != 0>
6525 3 if (p_4 == q_2)
6526 4 p_5 = ASSERT_EXPR <p_4, p_4 == q_2>;
6527 5 endif
6528 6 if (q_2)
6529
6530 In the code above, pointer p_5 has range [q_2, q_2], but from the
6531 code we can also determine that p_5 cannot be NULL and, if q_2 had
6532 a non-varying range, p_5's range should also be compatible with it.
6533
6534 These equivalences are created by two expressions: ASSERT_EXPR and
6535 copy operations. Since p_5 is an assertion on p_4, and p_4 was the
6536 result of another assertion, then we can use the fact that p_5 and
6537 p_4 are equivalent when evaluating p_5's range.
6538
6539 Together with value ranges, we also propagate these equivalences
6540 between names so that we can take advantage of information from
6541 multiple ranges when doing final replacement. Note that this
6542 equivalency relation is transitive but not symmetric.
6543
6544 In the example above, p_5 is equivalent to p_4, q_2 and p_3, but we
6545 cannot assert that q_2 is equivalent to p_5 because q_2 may be used
6546 in contexts where that assertion does not hold (e.g., in line 6).
6547
6548 TODO, the main difference between this pass and Patterson's is that
6549 we do not propagate edge probabilities. We only compute whether
6550 edges can be taken or not. That is, instead of having a spectrum
6551 of jump probabilities between 0 and 1, we only deal with 0, 1 and
6552 DON'T KNOW. In the future, it may be worthwhile to propagate
6553 probabilities to aid branch prediction. */
6554
6555 static unsigned int
6556 execute_vrp (bool warn_array_bounds_p)
6557 {
6558 int i;
6559 edge e;
6560 switch_update *su;
6561
6562 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
6563 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
6564 scev_initialize ();
6565
6566 /* ??? This ends up using stale EDGE_DFS_BACK for liveness computation.
6567 Inserting assertions may split edges which will invalidate
6568 EDGE_DFS_BACK. */
6569 insert_range_assertions ();
6570
6571 to_remove_edges.create (10);
6572 to_update_switch_stmts.create (5);
6573 threadedge_initialize_values ();
6574
6575 /* For visiting PHI nodes we need EDGE_DFS_BACK computed. */
6576 mark_dfs_back_edges ();
6577
6578 class vrp_prop vrp_prop;
6579 vrp_prop.vrp_initialize ();
6580 vrp_prop.ssa_propagate ();
6581 vrp_prop.vrp_finalize (warn_array_bounds_p);
6582
6583 /* We must identify jump threading opportunities before we release
6584 the datastructures built by VRP. */
6585 identify_jump_threads (&vrp_prop.vr_values);
6586
6587 /* A comparison of an SSA_NAME against a constant where the SSA_NAME
6588 was set by a type conversion can often be rewritten to use the
6589 RHS of the type conversion.
6590
6591 However, doing so inhibits jump threading through the comparison.
6592 So that transformation is not performed until after jump threading
6593 is complete. */
6594 basic_block bb;
6595 FOR_EACH_BB_FN (bb, cfun)
6596 {
6597 gimple *last = last_stmt (bb);
6598 if (last && gimple_code (last) == GIMPLE_COND)
6599 vrp_prop.vr_values.simplify_cond_using_ranges_2 (as_a <gcond *> (last));
6600 }
6601
6602 free_numbers_of_iterations_estimates (cfun);
6603
6604 /* ASSERT_EXPRs must be removed before finalizing jump threads
6605 as finalizing jump threads calls the CFG cleanup code which
6606 does not properly handle ASSERT_EXPRs. */
6607 remove_range_assertions ();
6608
6609 /* If we exposed any new variables, go ahead and put them into
6610 SSA form now, before we handle jump threading. This simplifies
6611 interactions between rewriting of _DECL nodes into SSA form
6612 and rewriting SSA_NAME nodes into SSA form after block
6613 duplication and CFG manipulation. */
6614 update_ssa (TODO_update_ssa);
6615
6616 /* We identified all the jump threading opportunities earlier, but could
6617 not transform the CFG at that time. This routine transforms the
6618 CFG and arranges for the dominator tree to be rebuilt if necessary.
6619
6620 Note the SSA graph update will occur during the normal TODO
6621 processing by the pass manager. */
6622 thread_through_all_blocks (false);
6623
6624 /* Remove dead edges from SWITCH_EXPR optimization. This leaves the
6625 CFG in a broken state and requires a cfg_cleanup run. */
6626 FOR_EACH_VEC_ELT (to_remove_edges, i, e)
6627 remove_edge (e);
6628 /* Update SWITCH_EXPR case label vector. */
6629 FOR_EACH_VEC_ELT (to_update_switch_stmts, i, su)
6630 {
6631 size_t j;
6632 size_t n = TREE_VEC_LENGTH (su->vec);
6633 tree label;
6634 gimple_switch_set_num_labels (su->stmt, n);
6635 for (j = 0; j < n; j++)
6636 gimple_switch_set_label (su->stmt, j, TREE_VEC_ELT (su->vec, j));
6637 /* As we may have replaced the default label with a regular one
6638 make sure to make it a real default label again. This ensures
6639 optimal expansion. */
6640 label = gimple_switch_label (su->stmt, 0);
6641 CASE_LOW (label) = NULL_TREE;
6642 CASE_HIGH (label) = NULL_TREE;
6643 }
6644
6645 if (to_remove_edges.length () > 0)
6646 {
6647 free_dominance_info (CDI_DOMINATORS);
6648 loops_state_set (LOOPS_NEED_FIXUP);
6649 }
6650
6651 to_remove_edges.release ();
6652 to_update_switch_stmts.release ();
6653 threadedge_finalize_values ();
6654
6655 scev_finalize ();
6656 loop_optimizer_finalize ();
6657 return 0;
6658 }
6659
6660 namespace {
6661
6662 const pass_data pass_data_vrp =
6663 {
6664 GIMPLE_PASS, /* type */
6665 "vrp", /* name */
6666 OPTGROUP_NONE, /* optinfo_flags */
6667 TV_TREE_VRP, /* tv_id */
6668 PROP_ssa, /* properties_required */
6669 0, /* properties_provided */
6670 0, /* properties_destroyed */
6671 0, /* todo_flags_start */
6672 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
6673 };
6674
6675 class pass_vrp : public gimple_opt_pass
6676 {
6677 public:
6678 pass_vrp (gcc::context *ctxt)
6679 : gimple_opt_pass (pass_data_vrp, ctxt), warn_array_bounds_p (false)
6680 {}
6681
6682 /* opt_pass methods: */
6683 opt_pass * clone () { return new pass_vrp (m_ctxt); }
6684 void set_pass_param (unsigned int n, bool param)
6685 {
6686 gcc_assert (n == 0);
6687 warn_array_bounds_p = param;
6688 }
6689 virtual bool gate (function *) { return flag_tree_vrp != 0; }
6690 virtual unsigned int execute (function *)
6691 { return execute_vrp (warn_array_bounds_p); }
6692
6693 private:
6694 bool warn_array_bounds_p;
6695 }; // class pass_vrp
6696
6697 } // anon namespace
6698
6699 gimple_opt_pass *
6700 make_pass_vrp (gcc::context *ctxt)
6701 {
6702 return new pass_vrp (ctxt);
6703 }
6704
6705
6706 /* Worker for determine_value_range. */
6707
6708 static void
6709 determine_value_range_1 (value_range *vr, tree expr)
6710 {
6711 if (BINARY_CLASS_P (expr))
6712 {
6713 value_range vr0 = VR_INITIALIZER, vr1 = VR_INITIALIZER;
6714 determine_value_range_1 (&vr0, TREE_OPERAND (expr, 0));
6715 determine_value_range_1 (&vr1, TREE_OPERAND (expr, 1));
6716 extract_range_from_binary_expr_1 (vr, TREE_CODE (expr), TREE_TYPE (expr),
6717 &vr0, &vr1);
6718 }
6719 else if (UNARY_CLASS_P (expr))
6720 {
6721 value_range vr0 = VR_INITIALIZER;
6722 determine_value_range_1 (&vr0, TREE_OPERAND (expr, 0));
6723 extract_range_from_unary_expr (vr, TREE_CODE (expr), TREE_TYPE (expr),
6724 &vr0, TREE_TYPE (TREE_OPERAND (expr, 0)));
6725 }
6726 else if (TREE_CODE (expr) == INTEGER_CST)
6727 set_value_range_to_value (vr, expr, NULL);
6728 else
6729 {
6730 value_range_type kind;
6731 wide_int min, max;
6732 /* For SSA names try to extract range info computed by VRP. Otherwise
6733 fall back to varying. */
6734 if (TREE_CODE (expr) == SSA_NAME
6735 && INTEGRAL_TYPE_P (TREE_TYPE (expr))
6736 && (kind = get_range_info (expr, &min, &max)) != VR_VARYING)
6737 set_value_range (vr, kind, wide_int_to_tree (TREE_TYPE (expr), min),
6738 wide_int_to_tree (TREE_TYPE (expr), max), NULL);
6739 else
6740 set_value_range_to_varying (vr);
6741 }
6742 }
6743
6744 /* Compute a value-range for EXPR and set it in *MIN and *MAX. Return
6745 the determined range type. */
6746
6747 value_range_type
6748 determine_value_range (tree expr, wide_int *min, wide_int *max)
6749 {
6750 value_range vr = VR_INITIALIZER;
6751 determine_value_range_1 (&vr, expr);
6752 if ((vr.type == VR_RANGE
6753 || vr.type == VR_ANTI_RANGE)
6754 && !symbolic_range_p (&vr))
6755 {
6756 *min = wi::to_wide (vr.min);
6757 *max = wi::to_wide (vr.max);
6758 return vr.type;
6759 }
6760
6761 return VR_VARYING;
6762 }