* gimple-fold.c (gimple_fold_stmt_to_constant_1): Avoid warning.
[gcc.git] / gcc / tree-vrp.c
1 /* Support routines for Value Range Propagation (VRP).
2 Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
3 Free Software Foundation, Inc.
4 Contributed by Diego Novillo <dnovillo@redhat.com>.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
12
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "ggc.h"
27 #include "flags.h"
28 #include "tree.h"
29 #include "basic-block.h"
30 #include "tree-flow.h"
31 #include "tree-pass.h"
32 #include "tree-dump.h"
33 #include "timevar.h"
34 #include "tree-pretty-print.h"
35 #include "gimple-pretty-print.h"
36 #include "diagnostic-core.h"
37 #include "intl.h"
38 #include "cfgloop.h"
39 #include "tree-scalar-evolution.h"
40 #include "tree-ssa-propagate.h"
41 #include "tree-chrec.h"
42 #include "gimple-fold.h"
43 #include "expr.h"
44 #include "optabs.h"
45
46
47 /* Type of value ranges. See value_range_d for a description of these
48 types. */
49 enum value_range_type { VR_UNDEFINED, VR_RANGE, VR_ANTI_RANGE, VR_VARYING };
50
51 /* Range of values that can be associated with an SSA_NAME after VRP
52 has executed. */
53 struct value_range_d
54 {
55 /* Lattice value represented by this range. */
56 enum value_range_type type;
57
58 /* Minimum and maximum values represented by this range. These
59 values should be interpreted as follows:
60
61 - If TYPE is VR_UNDEFINED or VR_VARYING then MIN and MAX must
62 be NULL.
63
64 - If TYPE == VR_RANGE then MIN holds the minimum value and
65 MAX holds the maximum value of the range [MIN, MAX].
66
67 - If TYPE == ANTI_RANGE the variable is known to NOT
68 take any values in the range [MIN, MAX]. */
69 tree min;
70 tree max;
71
72 /* Set of SSA names whose value ranges are equivalent to this one.
73 This set is only valid when TYPE is VR_RANGE or VR_ANTI_RANGE. */
74 bitmap equiv;
75 };
76
77 typedef struct value_range_d value_range_t;
78
79 /* Set of SSA names found live during the RPO traversal of the function
80 for still active basic-blocks. */
81 static sbitmap *live;
82
83 /* Return true if the SSA name NAME is live on the edge E. */
84
85 static bool
86 live_on_edge (edge e, tree name)
87 {
88 return (live[e->dest->index]
89 && TEST_BIT (live[e->dest->index], SSA_NAME_VERSION (name)));
90 }
91
92 /* Local functions. */
93 static int compare_values (tree val1, tree val2);
94 static int compare_values_warnv (tree val1, tree val2, bool *);
95 static void vrp_meet (value_range_t *, value_range_t *);
96 static tree vrp_evaluate_conditional_warnv_with_ops (enum tree_code,
97 tree, tree, bool, bool *,
98 bool *);
99
100 /* Location information for ASSERT_EXPRs. Each instance of this
101 structure describes an ASSERT_EXPR for an SSA name. Since a single
102 SSA name may have more than one assertion associated with it, these
103 locations are kept in a linked list attached to the corresponding
104 SSA name. */
105 struct assert_locus_d
106 {
107 /* Basic block where the assertion would be inserted. */
108 basic_block bb;
109
110 /* Some assertions need to be inserted on an edge (e.g., assertions
111 generated by COND_EXPRs). In those cases, BB will be NULL. */
112 edge e;
113
114 /* Pointer to the statement that generated this assertion. */
115 gimple_stmt_iterator si;
116
117 /* Predicate code for the ASSERT_EXPR. Must be COMPARISON_CLASS_P. */
118 enum tree_code comp_code;
119
120 /* Value being compared against. */
121 tree val;
122
123 /* Expression to compare. */
124 tree expr;
125
126 /* Next node in the linked list. */
127 struct assert_locus_d *next;
128 };
129
130 typedef struct assert_locus_d *assert_locus_t;
131
132 /* If bit I is present, it means that SSA name N_i has a list of
133 assertions that should be inserted in the IL. */
134 static bitmap need_assert_for;
135
136 /* Array of locations lists where to insert assertions. ASSERTS_FOR[I]
137 holds a list of ASSERT_LOCUS_T nodes that describe where
138 ASSERT_EXPRs for SSA name N_I should be inserted. */
139 static assert_locus_t *asserts_for;
140
141 /* Value range array. After propagation, VR_VALUE[I] holds the range
142 of values that SSA name N_I may take. */
143 static unsigned num_vr_values;
144 static value_range_t **vr_value;
145 static bool values_propagated;
146
147 /* For a PHI node which sets SSA name N_I, VR_COUNTS[I] holds the
148 number of executable edges we saw the last time we visited the
149 node. */
150 static int *vr_phi_edge_counts;
151
152 typedef struct {
153 gimple stmt;
154 tree vec;
155 } switch_update;
156
157 static VEC (edge, heap) *to_remove_edges;
158 DEF_VEC_O(switch_update);
159 DEF_VEC_ALLOC_O(switch_update, heap);
160 static VEC (switch_update, heap) *to_update_switch_stmts;
161
162
163 /* Return the maximum value for TYPE. */
164
165 static inline tree
166 vrp_val_max (const_tree type)
167 {
168 if (!INTEGRAL_TYPE_P (type))
169 return NULL_TREE;
170
171 return TYPE_MAX_VALUE (type);
172 }
173
174 /* Return the minimum value for TYPE. */
175
176 static inline tree
177 vrp_val_min (const_tree type)
178 {
179 if (!INTEGRAL_TYPE_P (type))
180 return NULL_TREE;
181
182 return TYPE_MIN_VALUE (type);
183 }
184
185 /* Return whether VAL is equal to the maximum value of its type. This
186 will be true for a positive overflow infinity. We can't do a
187 simple equality comparison with TYPE_MAX_VALUE because C typedefs
188 and Ada subtypes can produce types whose TYPE_MAX_VALUE is not ==
189 to the integer constant with the same value in the type. */
190
191 static inline bool
192 vrp_val_is_max (const_tree val)
193 {
194 tree type_max = vrp_val_max (TREE_TYPE (val));
195 return (val == type_max
196 || (type_max != NULL_TREE
197 && operand_equal_p (val, type_max, 0)));
198 }
199
200 /* Return whether VAL is equal to the minimum value of its type. This
201 will be true for a negative overflow infinity. */
202
203 static inline bool
204 vrp_val_is_min (const_tree val)
205 {
206 tree type_min = vrp_val_min (TREE_TYPE (val));
207 return (val == type_min
208 || (type_min != NULL_TREE
209 && operand_equal_p (val, type_min, 0)));
210 }
211
212
213 /* Return whether TYPE should use an overflow infinity distinct from
214 TYPE_{MIN,MAX}_VALUE. We use an overflow infinity value to
215 represent a signed overflow during VRP computations. An infinity
216 is distinct from a half-range, which will go from some number to
217 TYPE_{MIN,MAX}_VALUE. */
218
219 static inline bool
220 needs_overflow_infinity (const_tree type)
221 {
222 return INTEGRAL_TYPE_P (type) && !TYPE_OVERFLOW_WRAPS (type);
223 }
224
225 /* Return whether TYPE can support our overflow infinity
226 representation: we use the TREE_OVERFLOW flag, which only exists
227 for constants. If TYPE doesn't support this, we don't optimize
228 cases which would require signed overflow--we drop them to
229 VARYING. */
230
231 static inline bool
232 supports_overflow_infinity (const_tree type)
233 {
234 tree min = vrp_val_min (type), max = vrp_val_max (type);
235 #ifdef ENABLE_CHECKING
236 gcc_assert (needs_overflow_infinity (type));
237 #endif
238 return (min != NULL_TREE
239 && CONSTANT_CLASS_P (min)
240 && max != NULL_TREE
241 && CONSTANT_CLASS_P (max));
242 }
243
244 /* VAL is the maximum or minimum value of a type. Return a
245 corresponding overflow infinity. */
246
247 static inline tree
248 make_overflow_infinity (tree val)
249 {
250 gcc_checking_assert (val != NULL_TREE && CONSTANT_CLASS_P (val));
251 val = copy_node (val);
252 TREE_OVERFLOW (val) = 1;
253 return val;
254 }
255
256 /* Return a negative overflow infinity for TYPE. */
257
258 static inline tree
259 negative_overflow_infinity (tree type)
260 {
261 gcc_checking_assert (supports_overflow_infinity (type));
262 return make_overflow_infinity (vrp_val_min (type));
263 }
264
265 /* Return a positive overflow infinity for TYPE. */
266
267 static inline tree
268 positive_overflow_infinity (tree type)
269 {
270 gcc_checking_assert (supports_overflow_infinity (type));
271 return make_overflow_infinity (vrp_val_max (type));
272 }
273
274 /* Return whether VAL is a negative overflow infinity. */
275
276 static inline bool
277 is_negative_overflow_infinity (const_tree val)
278 {
279 return (needs_overflow_infinity (TREE_TYPE (val))
280 && CONSTANT_CLASS_P (val)
281 && TREE_OVERFLOW (val)
282 && vrp_val_is_min (val));
283 }
284
285 /* Return whether VAL is a positive overflow infinity. */
286
287 static inline bool
288 is_positive_overflow_infinity (const_tree val)
289 {
290 return (needs_overflow_infinity (TREE_TYPE (val))
291 && CONSTANT_CLASS_P (val)
292 && TREE_OVERFLOW (val)
293 && vrp_val_is_max (val));
294 }
295
296 /* Return whether VAL is a positive or negative overflow infinity. */
297
298 static inline bool
299 is_overflow_infinity (const_tree val)
300 {
301 return (needs_overflow_infinity (TREE_TYPE (val))
302 && CONSTANT_CLASS_P (val)
303 && TREE_OVERFLOW (val)
304 && (vrp_val_is_min (val) || vrp_val_is_max (val)));
305 }
306
307 /* Return whether STMT has a constant rhs that is_overflow_infinity. */
308
309 static inline bool
310 stmt_overflow_infinity (gimple stmt)
311 {
312 if (is_gimple_assign (stmt)
313 && get_gimple_rhs_class (gimple_assign_rhs_code (stmt)) ==
314 GIMPLE_SINGLE_RHS)
315 return is_overflow_infinity (gimple_assign_rhs1 (stmt));
316 return false;
317 }
318
319 /* If VAL is now an overflow infinity, return VAL. Otherwise, return
320 the same value with TREE_OVERFLOW clear. This can be used to avoid
321 confusing a regular value with an overflow value. */
322
323 static inline tree
324 avoid_overflow_infinity (tree val)
325 {
326 if (!is_overflow_infinity (val))
327 return val;
328
329 if (vrp_val_is_max (val))
330 return vrp_val_max (TREE_TYPE (val));
331 else
332 {
333 gcc_checking_assert (vrp_val_is_min (val));
334 return vrp_val_min (TREE_TYPE (val));
335 }
336 }
337
338
339 /* Return true if ARG is marked with the nonnull attribute in the
340 current function signature. */
341
342 static bool
343 nonnull_arg_p (const_tree arg)
344 {
345 tree t, attrs, fntype;
346 unsigned HOST_WIDE_INT arg_num;
347
348 gcc_assert (TREE_CODE (arg) == PARM_DECL && POINTER_TYPE_P (TREE_TYPE (arg)));
349
350 /* The static chain decl is always non null. */
351 if (arg == cfun->static_chain_decl)
352 return true;
353
354 fntype = TREE_TYPE (current_function_decl);
355 attrs = lookup_attribute ("nonnull", TYPE_ATTRIBUTES (fntype));
356
357 /* If "nonnull" wasn't specified, we know nothing about the argument. */
358 if (attrs == NULL_TREE)
359 return false;
360
361 /* If "nonnull" applies to all the arguments, then ARG is non-null. */
362 if (TREE_VALUE (attrs) == NULL_TREE)
363 return true;
364
365 /* Get the position number for ARG in the function signature. */
366 for (arg_num = 1, t = DECL_ARGUMENTS (current_function_decl);
367 t;
368 t = DECL_CHAIN (t), arg_num++)
369 {
370 if (t == arg)
371 break;
372 }
373
374 gcc_assert (t == arg);
375
376 /* Now see if ARG_NUM is mentioned in the nonnull list. */
377 for (t = TREE_VALUE (attrs); t; t = TREE_CHAIN (t))
378 {
379 if (compare_tree_int (TREE_VALUE (t), arg_num) == 0)
380 return true;
381 }
382
383 return false;
384 }
385
386
387 /* Set value range VR to VR_VARYING. */
388
389 static inline void
390 set_value_range_to_varying (value_range_t *vr)
391 {
392 vr->type = VR_VARYING;
393 vr->min = vr->max = NULL_TREE;
394 if (vr->equiv)
395 bitmap_clear (vr->equiv);
396 }
397
398
399 /* Set value range VR to {T, MIN, MAX, EQUIV}. */
400
401 static void
402 set_value_range (value_range_t *vr, enum value_range_type t, tree min,
403 tree max, bitmap equiv)
404 {
405 #if defined ENABLE_CHECKING
406 /* Check the validity of the range. */
407 if (t == VR_RANGE || t == VR_ANTI_RANGE)
408 {
409 int cmp;
410
411 gcc_assert (min && max);
412
413 if (INTEGRAL_TYPE_P (TREE_TYPE (min)) && t == VR_ANTI_RANGE)
414 gcc_assert (!vrp_val_is_min (min) || !vrp_val_is_max (max));
415
416 cmp = compare_values (min, max);
417 gcc_assert (cmp == 0 || cmp == -1 || cmp == -2);
418
419 if (needs_overflow_infinity (TREE_TYPE (min)))
420 gcc_assert (!is_overflow_infinity (min)
421 || !is_overflow_infinity (max));
422 }
423
424 if (t == VR_UNDEFINED || t == VR_VARYING)
425 gcc_assert (min == NULL_TREE && max == NULL_TREE);
426
427 if (t == VR_UNDEFINED || t == VR_VARYING)
428 gcc_assert (equiv == NULL || bitmap_empty_p (equiv));
429 #endif
430
431 vr->type = t;
432 vr->min = min;
433 vr->max = max;
434
435 /* Since updating the equivalence set involves deep copying the
436 bitmaps, only do it if absolutely necessary. */
437 if (vr->equiv == NULL
438 && equiv != NULL)
439 vr->equiv = BITMAP_ALLOC (NULL);
440
441 if (equiv != vr->equiv)
442 {
443 if (equiv && !bitmap_empty_p (equiv))
444 bitmap_copy (vr->equiv, equiv);
445 else
446 bitmap_clear (vr->equiv);
447 }
448 }
449
450
451 /* Set value range VR to the canonical form of {T, MIN, MAX, EQUIV}.
452 This means adjusting T, MIN and MAX representing the case of a
453 wrapping range with MAX < MIN covering [MIN, type_max] U [type_min, MAX]
454 as anti-rage ~[MAX+1, MIN-1]. Likewise for wrapping anti-ranges.
455 In corner cases where MAX+1 or MIN-1 wraps this will fall back
456 to varying.
457 This routine exists to ease canonicalization in the case where we
458 extract ranges from var + CST op limit. */
459
460 static void
461 set_and_canonicalize_value_range (value_range_t *vr, enum value_range_type t,
462 tree min, tree max, bitmap equiv)
463 {
464 /* Nothing to canonicalize for symbolic or unknown or varying ranges. */
465 if ((t != VR_RANGE
466 && t != VR_ANTI_RANGE)
467 || TREE_CODE (min) != INTEGER_CST
468 || TREE_CODE (max) != INTEGER_CST)
469 {
470 set_value_range (vr, t, min, max, equiv);
471 return;
472 }
473
474 /* Wrong order for min and max, to swap them and the VR type we need
475 to adjust them. */
476 if (tree_int_cst_lt (max, min))
477 {
478 tree one = build_int_cst (TREE_TYPE (min), 1);
479 tree tmp = int_const_binop (PLUS_EXPR, max, one);
480 max = int_const_binop (MINUS_EXPR, min, one);
481 min = tmp;
482
483 /* There's one corner case, if we had [C+1, C] before we now have
484 that again. But this represents an empty value range, so drop
485 to varying in this case. */
486 if (tree_int_cst_lt (max, min))
487 {
488 set_value_range_to_varying (vr);
489 return;
490 }
491
492 t = t == VR_RANGE ? VR_ANTI_RANGE : VR_RANGE;
493 }
494
495 /* Anti-ranges that can be represented as ranges should be so. */
496 if (t == VR_ANTI_RANGE)
497 {
498 bool is_min = vrp_val_is_min (min);
499 bool is_max = vrp_val_is_max (max);
500
501 if (is_min && is_max)
502 {
503 /* We cannot deal with empty ranges, drop to varying. */
504 set_value_range_to_varying (vr);
505 return;
506 }
507 else if (is_min
508 /* As a special exception preserve non-null ranges. */
509 && !(TYPE_UNSIGNED (TREE_TYPE (min))
510 && integer_zerop (max)))
511 {
512 tree one = build_int_cst (TREE_TYPE (max), 1);
513 min = int_const_binop (PLUS_EXPR, max, one);
514 max = vrp_val_max (TREE_TYPE (max));
515 t = VR_RANGE;
516 }
517 else if (is_max)
518 {
519 tree one = build_int_cst (TREE_TYPE (min), 1);
520 max = int_const_binop (MINUS_EXPR, min, one);
521 min = vrp_val_min (TREE_TYPE (min));
522 t = VR_RANGE;
523 }
524 }
525
526 set_value_range (vr, t, min, max, equiv);
527 }
528
529 /* Copy value range FROM into value range TO. */
530
531 static inline void
532 copy_value_range (value_range_t *to, value_range_t *from)
533 {
534 set_value_range (to, from->type, from->min, from->max, from->equiv);
535 }
536
537 /* Set value range VR to a single value. This function is only called
538 with values we get from statements, and exists to clear the
539 TREE_OVERFLOW flag so that we don't think we have an overflow
540 infinity when we shouldn't. */
541
542 static inline void
543 set_value_range_to_value (value_range_t *vr, tree val, bitmap equiv)
544 {
545 gcc_assert (is_gimple_min_invariant (val));
546 val = avoid_overflow_infinity (val);
547 set_value_range (vr, VR_RANGE, val, val, equiv);
548 }
549
550 /* Set value range VR to a non-negative range of type TYPE.
551 OVERFLOW_INFINITY indicates whether to use an overflow infinity
552 rather than TYPE_MAX_VALUE; this should be true if we determine
553 that the range is nonnegative based on the assumption that signed
554 overflow does not occur. */
555
556 static inline void
557 set_value_range_to_nonnegative (value_range_t *vr, tree type,
558 bool overflow_infinity)
559 {
560 tree zero;
561
562 if (overflow_infinity && !supports_overflow_infinity (type))
563 {
564 set_value_range_to_varying (vr);
565 return;
566 }
567
568 zero = build_int_cst (type, 0);
569 set_value_range (vr, VR_RANGE, zero,
570 (overflow_infinity
571 ? positive_overflow_infinity (type)
572 : TYPE_MAX_VALUE (type)),
573 vr->equiv);
574 }
575
576 /* Set value range VR to a non-NULL range of type TYPE. */
577
578 static inline void
579 set_value_range_to_nonnull (value_range_t *vr, tree type)
580 {
581 tree zero = build_int_cst (type, 0);
582 set_value_range (vr, VR_ANTI_RANGE, zero, zero, vr->equiv);
583 }
584
585
586 /* Set value range VR to a NULL range of type TYPE. */
587
588 static inline void
589 set_value_range_to_null (value_range_t *vr, tree type)
590 {
591 set_value_range_to_value (vr, build_int_cst (type, 0), vr->equiv);
592 }
593
594
595 /* Set value range VR to a range of a truthvalue of type TYPE. */
596
597 static inline void
598 set_value_range_to_truthvalue (value_range_t *vr, tree type)
599 {
600 if (TYPE_PRECISION (type) == 1)
601 set_value_range_to_varying (vr);
602 else
603 set_value_range (vr, VR_RANGE,
604 build_int_cst (type, 0), build_int_cst (type, 1),
605 vr->equiv);
606 }
607
608
609 /* Set value range VR to VR_UNDEFINED. */
610
611 static inline void
612 set_value_range_to_undefined (value_range_t *vr)
613 {
614 vr->type = VR_UNDEFINED;
615 vr->min = vr->max = NULL_TREE;
616 if (vr->equiv)
617 bitmap_clear (vr->equiv);
618 }
619
620
621 /* If abs (min) < abs (max), set VR to [-max, max], if
622 abs (min) >= abs (max), set VR to [-min, min]. */
623
624 static void
625 abs_extent_range (value_range_t *vr, tree min, tree max)
626 {
627 int cmp;
628
629 gcc_assert (TREE_CODE (min) == INTEGER_CST);
630 gcc_assert (TREE_CODE (max) == INTEGER_CST);
631 gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (min)));
632 gcc_assert (!TYPE_UNSIGNED (TREE_TYPE (min)));
633 min = fold_unary (ABS_EXPR, TREE_TYPE (min), min);
634 max = fold_unary (ABS_EXPR, TREE_TYPE (max), max);
635 if (TREE_OVERFLOW (min) || TREE_OVERFLOW (max))
636 {
637 set_value_range_to_varying (vr);
638 return;
639 }
640 cmp = compare_values (min, max);
641 if (cmp == -1)
642 min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), max);
643 else if (cmp == 0 || cmp == 1)
644 {
645 max = min;
646 min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), min);
647 }
648 else
649 {
650 set_value_range_to_varying (vr);
651 return;
652 }
653 set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL);
654 }
655
656
657 /* Return value range information for VAR.
658
659 If we have no values ranges recorded (ie, VRP is not running), then
660 return NULL. Otherwise create an empty range if none existed for VAR. */
661
662 static value_range_t *
663 get_value_range (const_tree var)
664 {
665 static const struct value_range_d vr_const_varying
666 = { VR_VARYING, NULL_TREE, NULL_TREE, NULL };
667 value_range_t *vr;
668 tree sym;
669 unsigned ver = SSA_NAME_VERSION (var);
670
671 /* If we have no recorded ranges, then return NULL. */
672 if (! vr_value)
673 return NULL;
674
675 /* If we query the range for a new SSA name return an unmodifiable VARYING.
676 We should get here at most from the substitute-and-fold stage which
677 will never try to change values. */
678 if (ver >= num_vr_values)
679 return CONST_CAST (value_range_t *, &vr_const_varying);
680
681 vr = vr_value[ver];
682 if (vr)
683 return vr;
684
685 /* After propagation finished do not allocate new value-ranges. */
686 if (values_propagated)
687 return CONST_CAST (value_range_t *, &vr_const_varying);
688
689 /* Create a default value range. */
690 vr_value[ver] = vr = XCNEW (value_range_t);
691
692 /* Defer allocating the equivalence set. */
693 vr->equiv = NULL;
694
695 /* If VAR is a default definition of a parameter, the variable can
696 take any value in VAR's type. */
697 sym = SSA_NAME_VAR (var);
698 if (SSA_NAME_IS_DEFAULT_DEF (var)
699 && TREE_CODE (sym) == PARM_DECL)
700 {
701 /* Try to use the "nonnull" attribute to create ~[0, 0]
702 anti-ranges for pointers. Note that this is only valid with
703 default definitions of PARM_DECLs. */
704 if (POINTER_TYPE_P (TREE_TYPE (sym))
705 && nonnull_arg_p (sym))
706 set_value_range_to_nonnull (vr, TREE_TYPE (sym));
707 else
708 set_value_range_to_varying (vr);
709 }
710
711 return vr;
712 }
713
714 /* Return true, if VAL1 and VAL2 are equal values for VRP purposes. */
715
716 static inline bool
717 vrp_operand_equal_p (const_tree val1, const_tree val2)
718 {
719 if (val1 == val2)
720 return true;
721 if (!val1 || !val2 || !operand_equal_p (val1, val2, 0))
722 return false;
723 if (is_overflow_infinity (val1))
724 return is_overflow_infinity (val2);
725 return true;
726 }
727
728 /* Return true, if the bitmaps B1 and B2 are equal. */
729
730 static inline bool
731 vrp_bitmap_equal_p (const_bitmap b1, const_bitmap b2)
732 {
733 return (b1 == b2
734 || ((!b1 || bitmap_empty_p (b1))
735 && (!b2 || bitmap_empty_p (b2)))
736 || (b1 && b2
737 && bitmap_equal_p (b1, b2)));
738 }
739
740 /* Update the value range and equivalence set for variable VAR to
741 NEW_VR. Return true if NEW_VR is different from VAR's previous
742 value.
743
744 NOTE: This function assumes that NEW_VR is a temporary value range
745 object created for the sole purpose of updating VAR's range. The
746 storage used by the equivalence set from NEW_VR will be freed by
747 this function. Do not call update_value_range when NEW_VR
748 is the range object associated with another SSA name. */
749
750 static inline bool
751 update_value_range (const_tree var, value_range_t *new_vr)
752 {
753 value_range_t *old_vr;
754 bool is_new;
755
756 /* Update the value range, if necessary. */
757 old_vr = get_value_range (var);
758 is_new = old_vr->type != new_vr->type
759 || !vrp_operand_equal_p (old_vr->min, new_vr->min)
760 || !vrp_operand_equal_p (old_vr->max, new_vr->max)
761 || !vrp_bitmap_equal_p (old_vr->equiv, new_vr->equiv);
762
763 if (is_new)
764 set_value_range (old_vr, new_vr->type, new_vr->min, new_vr->max,
765 new_vr->equiv);
766
767 BITMAP_FREE (new_vr->equiv);
768
769 return is_new;
770 }
771
772
773 /* Add VAR and VAR's equivalence set to EQUIV. This is the central
774 point where equivalence processing can be turned on/off. */
775
776 static void
777 add_equivalence (bitmap *equiv, const_tree var)
778 {
779 unsigned ver = SSA_NAME_VERSION (var);
780 value_range_t *vr = vr_value[ver];
781
782 if (*equiv == NULL)
783 *equiv = BITMAP_ALLOC (NULL);
784 bitmap_set_bit (*equiv, ver);
785 if (vr && vr->equiv)
786 bitmap_ior_into (*equiv, vr->equiv);
787 }
788
789
790 /* Return true if VR is ~[0, 0]. */
791
792 static inline bool
793 range_is_nonnull (value_range_t *vr)
794 {
795 return vr->type == VR_ANTI_RANGE
796 && integer_zerop (vr->min)
797 && integer_zerop (vr->max);
798 }
799
800
801 /* Return true if VR is [0, 0]. */
802
803 static inline bool
804 range_is_null (value_range_t *vr)
805 {
806 return vr->type == VR_RANGE
807 && integer_zerop (vr->min)
808 && integer_zerop (vr->max);
809 }
810
811 /* Return true if max and min of VR are INTEGER_CST. It's not necessary
812 a singleton. */
813
814 static inline bool
815 range_int_cst_p (value_range_t *vr)
816 {
817 return (vr->type == VR_RANGE
818 && TREE_CODE (vr->max) == INTEGER_CST
819 && TREE_CODE (vr->min) == INTEGER_CST
820 && !TREE_OVERFLOW (vr->max)
821 && !TREE_OVERFLOW (vr->min));
822 }
823
824 /* Return true if VR is a INTEGER_CST singleton. */
825
826 static inline bool
827 range_int_cst_singleton_p (value_range_t *vr)
828 {
829 return (range_int_cst_p (vr)
830 && tree_int_cst_equal (vr->min, vr->max));
831 }
832
833 /* Return true if value range VR involves at least one symbol. */
834
835 static inline bool
836 symbolic_range_p (value_range_t *vr)
837 {
838 return (!is_gimple_min_invariant (vr->min)
839 || !is_gimple_min_invariant (vr->max));
840 }
841
842 /* Return true if value range VR uses an overflow infinity. */
843
844 static inline bool
845 overflow_infinity_range_p (value_range_t *vr)
846 {
847 return (vr->type == VR_RANGE
848 && (is_overflow_infinity (vr->min)
849 || is_overflow_infinity (vr->max)));
850 }
851
852 /* Return false if we can not make a valid comparison based on VR;
853 this will be the case if it uses an overflow infinity and overflow
854 is not undefined (i.e., -fno-strict-overflow is in effect).
855 Otherwise return true, and set *STRICT_OVERFLOW_P to true if VR
856 uses an overflow infinity. */
857
858 static bool
859 usable_range_p (value_range_t *vr, bool *strict_overflow_p)
860 {
861 gcc_assert (vr->type == VR_RANGE);
862 if (is_overflow_infinity (vr->min))
863 {
864 *strict_overflow_p = true;
865 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr->min)))
866 return false;
867 }
868 if (is_overflow_infinity (vr->max))
869 {
870 *strict_overflow_p = true;
871 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr->max)))
872 return false;
873 }
874 return true;
875 }
876
877
878 /* Return true if the result of assignment STMT is know to be non-negative.
879 If the return value is based on the assumption that signed overflow is
880 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
881 *STRICT_OVERFLOW_P.*/
882
883 static bool
884 gimple_assign_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p)
885 {
886 enum tree_code code = gimple_assign_rhs_code (stmt);
887 switch (get_gimple_rhs_class (code))
888 {
889 case GIMPLE_UNARY_RHS:
890 return tree_unary_nonnegative_warnv_p (gimple_assign_rhs_code (stmt),
891 gimple_expr_type (stmt),
892 gimple_assign_rhs1 (stmt),
893 strict_overflow_p);
894 case GIMPLE_BINARY_RHS:
895 return tree_binary_nonnegative_warnv_p (gimple_assign_rhs_code (stmt),
896 gimple_expr_type (stmt),
897 gimple_assign_rhs1 (stmt),
898 gimple_assign_rhs2 (stmt),
899 strict_overflow_p);
900 case GIMPLE_TERNARY_RHS:
901 return false;
902 case GIMPLE_SINGLE_RHS:
903 return tree_single_nonnegative_warnv_p (gimple_assign_rhs1 (stmt),
904 strict_overflow_p);
905 case GIMPLE_INVALID_RHS:
906 gcc_unreachable ();
907 default:
908 gcc_unreachable ();
909 }
910 }
911
912 /* Return true if return value of call STMT is know to be non-negative.
913 If the return value is based on the assumption that signed overflow is
914 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
915 *STRICT_OVERFLOW_P.*/
916
917 static bool
918 gimple_call_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p)
919 {
920 tree arg0 = gimple_call_num_args (stmt) > 0 ?
921 gimple_call_arg (stmt, 0) : NULL_TREE;
922 tree arg1 = gimple_call_num_args (stmt) > 1 ?
923 gimple_call_arg (stmt, 1) : NULL_TREE;
924
925 return tree_call_nonnegative_warnv_p (gimple_expr_type (stmt),
926 gimple_call_fndecl (stmt),
927 arg0,
928 arg1,
929 strict_overflow_p);
930 }
931
932 /* Return true if STMT is know to to compute a non-negative value.
933 If the return value is based on the assumption that signed overflow is
934 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
935 *STRICT_OVERFLOW_P.*/
936
937 static bool
938 gimple_stmt_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p)
939 {
940 switch (gimple_code (stmt))
941 {
942 case GIMPLE_ASSIGN:
943 return gimple_assign_nonnegative_warnv_p (stmt, strict_overflow_p);
944 case GIMPLE_CALL:
945 return gimple_call_nonnegative_warnv_p (stmt, strict_overflow_p);
946 default:
947 gcc_unreachable ();
948 }
949 }
950
951 /* Return true if the result of assignment STMT is know to be non-zero.
952 If the return value is based on the assumption that signed overflow is
953 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
954 *STRICT_OVERFLOW_P.*/
955
956 static bool
957 gimple_assign_nonzero_warnv_p (gimple stmt, bool *strict_overflow_p)
958 {
959 enum tree_code code = gimple_assign_rhs_code (stmt);
960 switch (get_gimple_rhs_class (code))
961 {
962 case GIMPLE_UNARY_RHS:
963 return tree_unary_nonzero_warnv_p (gimple_assign_rhs_code (stmt),
964 gimple_expr_type (stmt),
965 gimple_assign_rhs1 (stmt),
966 strict_overflow_p);
967 case GIMPLE_BINARY_RHS:
968 return tree_binary_nonzero_warnv_p (gimple_assign_rhs_code (stmt),
969 gimple_expr_type (stmt),
970 gimple_assign_rhs1 (stmt),
971 gimple_assign_rhs2 (stmt),
972 strict_overflow_p);
973 case GIMPLE_TERNARY_RHS:
974 return false;
975 case GIMPLE_SINGLE_RHS:
976 return tree_single_nonzero_warnv_p (gimple_assign_rhs1 (stmt),
977 strict_overflow_p);
978 case GIMPLE_INVALID_RHS:
979 gcc_unreachable ();
980 default:
981 gcc_unreachable ();
982 }
983 }
984
985 /* Return true if STMT is know to to compute a non-zero value.
986 If the return value is based on the assumption that signed overflow is
987 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
988 *STRICT_OVERFLOW_P.*/
989
990 static bool
991 gimple_stmt_nonzero_warnv_p (gimple stmt, bool *strict_overflow_p)
992 {
993 switch (gimple_code (stmt))
994 {
995 case GIMPLE_ASSIGN:
996 return gimple_assign_nonzero_warnv_p (stmt, strict_overflow_p);
997 case GIMPLE_CALL:
998 return gimple_alloca_call_p (stmt);
999 default:
1000 gcc_unreachable ();
1001 }
1002 }
1003
1004 /* Like tree_expr_nonzero_warnv_p, but this function uses value ranges
1005 obtained so far. */
1006
1007 static bool
1008 vrp_stmt_computes_nonzero (gimple stmt, bool *strict_overflow_p)
1009 {
1010 if (gimple_stmt_nonzero_warnv_p (stmt, strict_overflow_p))
1011 return true;
1012
1013 /* If we have an expression of the form &X->a, then the expression
1014 is nonnull if X is nonnull. */
1015 if (is_gimple_assign (stmt)
1016 && gimple_assign_rhs_code (stmt) == ADDR_EXPR)
1017 {
1018 tree expr = gimple_assign_rhs1 (stmt);
1019 tree base = get_base_address (TREE_OPERAND (expr, 0));
1020
1021 if (base != NULL_TREE
1022 && TREE_CODE (base) == MEM_REF
1023 && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME)
1024 {
1025 value_range_t *vr = get_value_range (TREE_OPERAND (base, 0));
1026 if (range_is_nonnull (vr))
1027 return true;
1028 }
1029 }
1030
1031 return false;
1032 }
1033
1034 /* Returns true if EXPR is a valid value (as expected by compare_values) --
1035 a gimple invariant, or SSA_NAME +- CST. */
1036
1037 static bool
1038 valid_value_p (tree expr)
1039 {
1040 if (TREE_CODE (expr) == SSA_NAME)
1041 return true;
1042
1043 if (TREE_CODE (expr) == PLUS_EXPR
1044 || TREE_CODE (expr) == MINUS_EXPR)
1045 return (TREE_CODE (TREE_OPERAND (expr, 0)) == SSA_NAME
1046 && TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST);
1047
1048 return is_gimple_min_invariant (expr);
1049 }
1050
1051 /* Return
1052 1 if VAL < VAL2
1053 0 if !(VAL < VAL2)
1054 -2 if those are incomparable. */
1055 static inline int
1056 operand_less_p (tree val, tree val2)
1057 {
1058 /* LT is folded faster than GE and others. Inline the common case. */
1059 if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
1060 {
1061 if (TYPE_UNSIGNED (TREE_TYPE (val)))
1062 return INT_CST_LT_UNSIGNED (val, val2);
1063 else
1064 {
1065 if (INT_CST_LT (val, val2))
1066 return 1;
1067 }
1068 }
1069 else
1070 {
1071 tree tcmp;
1072
1073 fold_defer_overflow_warnings ();
1074
1075 tcmp = fold_binary_to_constant (LT_EXPR, boolean_type_node, val, val2);
1076
1077 fold_undefer_and_ignore_overflow_warnings ();
1078
1079 if (!tcmp
1080 || TREE_CODE (tcmp) != INTEGER_CST)
1081 return -2;
1082
1083 if (!integer_zerop (tcmp))
1084 return 1;
1085 }
1086
1087 /* val >= val2, not considering overflow infinity. */
1088 if (is_negative_overflow_infinity (val))
1089 return is_negative_overflow_infinity (val2) ? 0 : 1;
1090 else if (is_positive_overflow_infinity (val2))
1091 return is_positive_overflow_infinity (val) ? 0 : 1;
1092
1093 return 0;
1094 }
1095
1096 /* Compare two values VAL1 and VAL2. Return
1097
1098 -2 if VAL1 and VAL2 cannot be compared at compile-time,
1099 -1 if VAL1 < VAL2,
1100 0 if VAL1 == VAL2,
1101 +1 if VAL1 > VAL2, and
1102 +2 if VAL1 != VAL2
1103
1104 This is similar to tree_int_cst_compare but supports pointer values
1105 and values that cannot be compared at compile time.
1106
1107 If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to
1108 true if the return value is only valid if we assume that signed
1109 overflow is undefined. */
1110
1111 static int
1112 compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p)
1113 {
1114 if (val1 == val2)
1115 return 0;
1116
1117 /* Below we rely on the fact that VAL1 and VAL2 are both pointers or
1118 both integers. */
1119 gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1))
1120 == POINTER_TYPE_P (TREE_TYPE (val2)));
1121 /* Convert the two values into the same type. This is needed because
1122 sizetype causes sign extension even for unsigned types. */
1123 val2 = fold_convert (TREE_TYPE (val1), val2);
1124 STRIP_USELESS_TYPE_CONVERSION (val2);
1125
1126 if ((TREE_CODE (val1) == SSA_NAME
1127 || TREE_CODE (val1) == PLUS_EXPR
1128 || TREE_CODE (val1) == MINUS_EXPR)
1129 && (TREE_CODE (val2) == SSA_NAME
1130 || TREE_CODE (val2) == PLUS_EXPR
1131 || TREE_CODE (val2) == MINUS_EXPR))
1132 {
1133 tree n1, c1, n2, c2;
1134 enum tree_code code1, code2;
1135
1136 /* If VAL1 and VAL2 are of the form 'NAME [+-] CST' or 'NAME',
1137 return -1 or +1 accordingly. If VAL1 and VAL2 don't use the
1138 same name, return -2. */
1139 if (TREE_CODE (val1) == SSA_NAME)
1140 {
1141 code1 = SSA_NAME;
1142 n1 = val1;
1143 c1 = NULL_TREE;
1144 }
1145 else
1146 {
1147 code1 = TREE_CODE (val1);
1148 n1 = TREE_OPERAND (val1, 0);
1149 c1 = TREE_OPERAND (val1, 1);
1150 if (tree_int_cst_sgn (c1) == -1)
1151 {
1152 if (is_negative_overflow_infinity (c1))
1153 return -2;
1154 c1 = fold_unary_to_constant (NEGATE_EXPR, TREE_TYPE (c1), c1);
1155 if (!c1)
1156 return -2;
1157 code1 = code1 == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR;
1158 }
1159 }
1160
1161 if (TREE_CODE (val2) == SSA_NAME)
1162 {
1163 code2 = SSA_NAME;
1164 n2 = val2;
1165 c2 = NULL_TREE;
1166 }
1167 else
1168 {
1169 code2 = TREE_CODE (val2);
1170 n2 = TREE_OPERAND (val2, 0);
1171 c2 = TREE_OPERAND (val2, 1);
1172 if (tree_int_cst_sgn (c2) == -1)
1173 {
1174 if (is_negative_overflow_infinity (c2))
1175 return -2;
1176 c2 = fold_unary_to_constant (NEGATE_EXPR, TREE_TYPE (c2), c2);
1177 if (!c2)
1178 return -2;
1179 code2 = code2 == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR;
1180 }
1181 }
1182
1183 /* Both values must use the same name. */
1184 if (n1 != n2)
1185 return -2;
1186
1187 if (code1 == SSA_NAME
1188 && code2 == SSA_NAME)
1189 /* NAME == NAME */
1190 return 0;
1191
1192 /* If overflow is defined we cannot simplify more. */
1193 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1)))
1194 return -2;
1195
1196 if (strict_overflow_p != NULL
1197 && (code1 == SSA_NAME || !TREE_NO_WARNING (val1))
1198 && (code2 == SSA_NAME || !TREE_NO_WARNING (val2)))
1199 *strict_overflow_p = true;
1200
1201 if (code1 == SSA_NAME)
1202 {
1203 if (code2 == PLUS_EXPR)
1204 /* NAME < NAME + CST */
1205 return -1;
1206 else if (code2 == MINUS_EXPR)
1207 /* NAME > NAME - CST */
1208 return 1;
1209 }
1210 else if (code1 == PLUS_EXPR)
1211 {
1212 if (code2 == SSA_NAME)
1213 /* NAME + CST > NAME */
1214 return 1;
1215 else if (code2 == PLUS_EXPR)
1216 /* NAME + CST1 > NAME + CST2, if CST1 > CST2 */
1217 return compare_values_warnv (c1, c2, strict_overflow_p);
1218 else if (code2 == MINUS_EXPR)
1219 /* NAME + CST1 > NAME - CST2 */
1220 return 1;
1221 }
1222 else if (code1 == MINUS_EXPR)
1223 {
1224 if (code2 == SSA_NAME)
1225 /* NAME - CST < NAME */
1226 return -1;
1227 else if (code2 == PLUS_EXPR)
1228 /* NAME - CST1 < NAME + CST2 */
1229 return -1;
1230 else if (code2 == MINUS_EXPR)
1231 /* NAME - CST1 > NAME - CST2, if CST1 < CST2. Notice that
1232 C1 and C2 are swapped in the call to compare_values. */
1233 return compare_values_warnv (c2, c1, strict_overflow_p);
1234 }
1235
1236 gcc_unreachable ();
1237 }
1238
1239 /* We cannot compare non-constants. */
1240 if (!is_gimple_min_invariant (val1) || !is_gimple_min_invariant (val2))
1241 return -2;
1242
1243 if (!POINTER_TYPE_P (TREE_TYPE (val1)))
1244 {
1245 /* We cannot compare overflowed values, except for overflow
1246 infinities. */
1247 if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2))
1248 {
1249 if (strict_overflow_p != NULL)
1250 *strict_overflow_p = true;
1251 if (is_negative_overflow_infinity (val1))
1252 return is_negative_overflow_infinity (val2) ? 0 : -1;
1253 else if (is_negative_overflow_infinity (val2))
1254 return 1;
1255 else if (is_positive_overflow_infinity (val1))
1256 return is_positive_overflow_infinity (val2) ? 0 : 1;
1257 else if (is_positive_overflow_infinity (val2))
1258 return -1;
1259 return -2;
1260 }
1261
1262 return tree_int_cst_compare (val1, val2);
1263 }
1264 else
1265 {
1266 tree t;
1267
1268 /* First see if VAL1 and VAL2 are not the same. */
1269 if (val1 == val2 || operand_equal_p (val1, val2, 0))
1270 return 0;
1271
1272 /* If VAL1 is a lower address than VAL2, return -1. */
1273 if (operand_less_p (val1, val2) == 1)
1274 return -1;
1275
1276 /* If VAL1 is a higher address than VAL2, return +1. */
1277 if (operand_less_p (val2, val1) == 1)
1278 return 1;
1279
1280 /* If VAL1 is different than VAL2, return +2.
1281 For integer constants we either have already returned -1 or 1
1282 or they are equivalent. We still might succeed in proving
1283 something about non-trivial operands. */
1284 if (TREE_CODE (val1) != INTEGER_CST
1285 || TREE_CODE (val2) != INTEGER_CST)
1286 {
1287 t = fold_binary_to_constant (NE_EXPR, boolean_type_node, val1, val2);
1288 if (t && integer_onep (t))
1289 return 2;
1290 }
1291
1292 return -2;
1293 }
1294 }
1295
1296 /* Compare values like compare_values_warnv, but treat comparisons of
1297 nonconstants which rely on undefined overflow as incomparable. */
1298
1299 static int
1300 compare_values (tree val1, tree val2)
1301 {
1302 bool sop;
1303 int ret;
1304
1305 sop = false;
1306 ret = compare_values_warnv (val1, val2, &sop);
1307 if (sop
1308 && (!is_gimple_min_invariant (val1) || !is_gimple_min_invariant (val2)))
1309 ret = -2;
1310 return ret;
1311 }
1312
1313
1314 /* Return 1 if VAL is inside value range VR (VR->MIN <= VAL <= VR->MAX),
1315 0 if VAL is not inside VR,
1316 -2 if we cannot tell either way.
1317
1318 FIXME, the current semantics of this functions are a bit quirky
1319 when taken in the context of VRP. In here we do not care
1320 about VR's type. If VR is the anti-range ~[3, 5] the call
1321 value_inside_range (4, VR) will return 1.
1322
1323 This is counter-intuitive in a strict sense, but the callers
1324 currently expect this. They are calling the function
1325 merely to determine whether VR->MIN <= VAL <= VR->MAX. The
1326 callers are applying the VR_RANGE/VR_ANTI_RANGE semantics
1327 themselves.
1328
1329 This also applies to value_ranges_intersect_p and
1330 range_includes_zero_p. The semantics of VR_RANGE and
1331 VR_ANTI_RANGE should be encoded here, but that also means
1332 adapting the users of these functions to the new semantics.
1333
1334 Benchmark compile/20001226-1.c compilation time after changing this
1335 function. */
1336
1337 static inline int
1338 value_inside_range (tree val, value_range_t * vr)
1339 {
1340 int cmp1, cmp2;
1341
1342 cmp1 = operand_less_p (val, vr->min);
1343 if (cmp1 == -2)
1344 return -2;
1345 if (cmp1 == 1)
1346 return 0;
1347
1348 cmp2 = operand_less_p (vr->max, val);
1349 if (cmp2 == -2)
1350 return -2;
1351
1352 return !cmp2;
1353 }
1354
1355
1356 /* Return true if value ranges VR0 and VR1 have a non-empty
1357 intersection.
1358
1359 Benchmark compile/20001226-1.c compilation time after changing this
1360 function.
1361 */
1362
1363 static inline bool
1364 value_ranges_intersect_p (value_range_t *vr0, value_range_t *vr1)
1365 {
1366 /* The value ranges do not intersect if the maximum of the first range is
1367 less than the minimum of the second range or vice versa.
1368 When those relations are unknown, we can't do any better. */
1369 if (operand_less_p (vr0->max, vr1->min) != 0)
1370 return false;
1371 if (operand_less_p (vr1->max, vr0->min) != 0)
1372 return false;
1373 return true;
1374 }
1375
1376
1377 /* Return true if VR includes the value zero, false otherwise. FIXME,
1378 currently this will return false for an anti-range like ~[-4, 3].
1379 This will be wrong when the semantics of value_inside_range are
1380 modified (currently the users of this function expect these
1381 semantics). */
1382
1383 static inline bool
1384 range_includes_zero_p (value_range_t *vr)
1385 {
1386 tree zero;
1387
1388 gcc_assert (vr->type != VR_UNDEFINED
1389 && vr->type != VR_VARYING
1390 && !symbolic_range_p (vr));
1391
1392 zero = build_int_cst (TREE_TYPE (vr->min), 0);
1393 return (value_inside_range (zero, vr) == 1);
1394 }
1395
1396 /* Return true if *VR is know to only contain nonnegative values. */
1397
1398 static inline bool
1399 value_range_nonnegative_p (value_range_t *vr)
1400 {
1401 /* Testing for VR_ANTI_RANGE is not useful here as any anti-range
1402 which would return a useful value should be encoded as a
1403 VR_RANGE. */
1404 if (vr->type == VR_RANGE)
1405 {
1406 int result = compare_values (vr->min, integer_zero_node);
1407 return (result == 0 || result == 1);
1408 }
1409
1410 return false;
1411 }
1412
1413 /* Return true if T, an SSA_NAME, is known to be nonnegative. Return
1414 false otherwise or if no value range information is available. */
1415
1416 bool
1417 ssa_name_nonnegative_p (const_tree t)
1418 {
1419 value_range_t *vr = get_value_range (t);
1420
1421 if (INTEGRAL_TYPE_P (t)
1422 && TYPE_UNSIGNED (t))
1423 return true;
1424
1425 if (!vr)
1426 return false;
1427
1428 return value_range_nonnegative_p (vr);
1429 }
1430
1431 /* If *VR has a value rante that is a single constant value return that,
1432 otherwise return NULL_TREE. */
1433
1434 static tree
1435 value_range_constant_singleton (value_range_t *vr)
1436 {
1437 if (vr->type == VR_RANGE
1438 && operand_equal_p (vr->min, vr->max, 0)
1439 && is_gimple_min_invariant (vr->min))
1440 return vr->min;
1441
1442 return NULL_TREE;
1443 }
1444
1445 /* If OP has a value range with a single constant value return that,
1446 otherwise return NULL_TREE. This returns OP itself if OP is a
1447 constant. */
1448
1449 static tree
1450 op_with_constant_singleton_value_range (tree op)
1451 {
1452 if (is_gimple_min_invariant (op))
1453 return op;
1454
1455 if (TREE_CODE (op) != SSA_NAME)
1456 return NULL_TREE;
1457
1458 return value_range_constant_singleton (get_value_range (op));
1459 }
1460
1461 /* Return true if op is in a boolean [0, 1] value-range. */
1462
1463 static bool
1464 op_with_boolean_value_range_p (tree op)
1465 {
1466 value_range_t *vr;
1467
1468 if (TYPE_PRECISION (TREE_TYPE (op)) == 1)
1469 return true;
1470
1471 if (integer_zerop (op)
1472 || integer_onep (op))
1473 return true;
1474
1475 if (TREE_CODE (op) != SSA_NAME)
1476 return false;
1477
1478 vr = get_value_range (op);
1479 return (vr->type == VR_RANGE
1480 && integer_zerop (vr->min)
1481 && integer_onep (vr->max));
1482 }
1483
1484 /* Extract value range information from an ASSERT_EXPR EXPR and store
1485 it in *VR_P. */
1486
1487 static void
1488 extract_range_from_assert (value_range_t *vr_p, tree expr)
1489 {
1490 tree var, cond, limit, min, max, type;
1491 value_range_t *var_vr, *limit_vr;
1492 enum tree_code cond_code;
1493
1494 var = ASSERT_EXPR_VAR (expr);
1495 cond = ASSERT_EXPR_COND (expr);
1496
1497 gcc_assert (COMPARISON_CLASS_P (cond));
1498
1499 /* Find VAR in the ASSERT_EXPR conditional. */
1500 if (var == TREE_OPERAND (cond, 0)
1501 || TREE_CODE (TREE_OPERAND (cond, 0)) == PLUS_EXPR
1502 || TREE_CODE (TREE_OPERAND (cond, 0)) == NOP_EXPR)
1503 {
1504 /* If the predicate is of the form VAR COMP LIMIT, then we just
1505 take LIMIT from the RHS and use the same comparison code. */
1506 cond_code = TREE_CODE (cond);
1507 limit = TREE_OPERAND (cond, 1);
1508 cond = TREE_OPERAND (cond, 0);
1509 }
1510 else
1511 {
1512 /* If the predicate is of the form LIMIT COMP VAR, then we need
1513 to flip around the comparison code to create the proper range
1514 for VAR. */
1515 cond_code = swap_tree_comparison (TREE_CODE (cond));
1516 limit = TREE_OPERAND (cond, 0);
1517 cond = TREE_OPERAND (cond, 1);
1518 }
1519
1520 limit = avoid_overflow_infinity (limit);
1521
1522 type = TREE_TYPE (var);
1523 gcc_assert (limit != var);
1524
1525 /* For pointer arithmetic, we only keep track of pointer equality
1526 and inequality. */
1527 if (POINTER_TYPE_P (type) && cond_code != NE_EXPR && cond_code != EQ_EXPR)
1528 {
1529 set_value_range_to_varying (vr_p);
1530 return;
1531 }
1532
1533 /* If LIMIT is another SSA name and LIMIT has a range of its own,
1534 try to use LIMIT's range to avoid creating symbolic ranges
1535 unnecessarily. */
1536 limit_vr = (TREE_CODE (limit) == SSA_NAME) ? get_value_range (limit) : NULL;
1537
1538 /* LIMIT's range is only interesting if it has any useful information. */
1539 if (limit_vr
1540 && (limit_vr->type == VR_UNDEFINED
1541 || limit_vr->type == VR_VARYING
1542 || symbolic_range_p (limit_vr)))
1543 limit_vr = NULL;
1544
1545 /* Initially, the new range has the same set of equivalences of
1546 VAR's range. This will be revised before returning the final
1547 value. Since assertions may be chained via mutually exclusive
1548 predicates, we will need to trim the set of equivalences before
1549 we are done. */
1550 gcc_assert (vr_p->equiv == NULL);
1551 add_equivalence (&vr_p->equiv, var);
1552
1553 /* Extract a new range based on the asserted comparison for VAR and
1554 LIMIT's value range. Notice that if LIMIT has an anti-range, we
1555 will only use it for equality comparisons (EQ_EXPR). For any
1556 other kind of assertion, we cannot derive a range from LIMIT's
1557 anti-range that can be used to describe the new range. For
1558 instance, ASSERT_EXPR <x_2, x_2 <= b_4>. If b_4 is ~[2, 10],
1559 then b_4 takes on the ranges [-INF, 1] and [11, +INF]. There is
1560 no single range for x_2 that could describe LE_EXPR, so we might
1561 as well build the range [b_4, +INF] for it.
1562 One special case we handle is extracting a range from a
1563 range test encoded as (unsigned)var + CST <= limit. */
1564 if (TREE_CODE (cond) == NOP_EXPR
1565 || TREE_CODE (cond) == PLUS_EXPR)
1566 {
1567 if (TREE_CODE (cond) == PLUS_EXPR)
1568 {
1569 min = fold_build1 (NEGATE_EXPR, TREE_TYPE (TREE_OPERAND (cond, 1)),
1570 TREE_OPERAND (cond, 1));
1571 max = int_const_binop (PLUS_EXPR, limit, min);
1572 cond = TREE_OPERAND (cond, 0);
1573 }
1574 else
1575 {
1576 min = build_int_cst (TREE_TYPE (var), 0);
1577 max = limit;
1578 }
1579
1580 /* Make sure to not set TREE_OVERFLOW on the final type
1581 conversion. We are willingly interpreting large positive
1582 unsigned values as negative singed values here. */
1583 min = force_fit_type_double (TREE_TYPE (var), tree_to_double_int (min),
1584 0, false);
1585 max = force_fit_type_double (TREE_TYPE (var), tree_to_double_int (max),
1586 0, false);
1587
1588 /* We can transform a max, min range to an anti-range or
1589 vice-versa. Use set_and_canonicalize_value_range which does
1590 this for us. */
1591 if (cond_code == LE_EXPR)
1592 set_and_canonicalize_value_range (vr_p, VR_RANGE,
1593 min, max, vr_p->equiv);
1594 else if (cond_code == GT_EXPR)
1595 set_and_canonicalize_value_range (vr_p, VR_ANTI_RANGE,
1596 min, max, vr_p->equiv);
1597 else
1598 gcc_unreachable ();
1599 }
1600 else if (cond_code == EQ_EXPR)
1601 {
1602 enum value_range_type range_type;
1603
1604 if (limit_vr)
1605 {
1606 range_type = limit_vr->type;
1607 min = limit_vr->min;
1608 max = limit_vr->max;
1609 }
1610 else
1611 {
1612 range_type = VR_RANGE;
1613 min = limit;
1614 max = limit;
1615 }
1616
1617 set_value_range (vr_p, range_type, min, max, vr_p->equiv);
1618
1619 /* When asserting the equality VAR == LIMIT and LIMIT is another
1620 SSA name, the new range will also inherit the equivalence set
1621 from LIMIT. */
1622 if (TREE_CODE (limit) == SSA_NAME)
1623 add_equivalence (&vr_p->equiv, limit);
1624 }
1625 else if (cond_code == NE_EXPR)
1626 {
1627 /* As described above, when LIMIT's range is an anti-range and
1628 this assertion is an inequality (NE_EXPR), then we cannot
1629 derive anything from the anti-range. For instance, if
1630 LIMIT's range was ~[0, 0], the assertion 'VAR != LIMIT' does
1631 not imply that VAR's range is [0, 0]. So, in the case of
1632 anti-ranges, we just assert the inequality using LIMIT and
1633 not its anti-range.
1634
1635 If LIMIT_VR is a range, we can only use it to build a new
1636 anti-range if LIMIT_VR is a single-valued range. For
1637 instance, if LIMIT_VR is [0, 1], the predicate
1638 VAR != [0, 1] does not mean that VAR's range is ~[0, 1].
1639 Rather, it means that for value 0 VAR should be ~[0, 0]
1640 and for value 1, VAR should be ~[1, 1]. We cannot
1641 represent these ranges.
1642
1643 The only situation in which we can build a valid
1644 anti-range is when LIMIT_VR is a single-valued range
1645 (i.e., LIMIT_VR->MIN == LIMIT_VR->MAX). In that case,
1646 build the anti-range ~[LIMIT_VR->MIN, LIMIT_VR->MAX]. */
1647 if (limit_vr
1648 && limit_vr->type == VR_RANGE
1649 && compare_values (limit_vr->min, limit_vr->max) == 0)
1650 {
1651 min = limit_vr->min;
1652 max = limit_vr->max;
1653 }
1654 else
1655 {
1656 /* In any other case, we cannot use LIMIT's range to build a
1657 valid anti-range. */
1658 min = max = limit;
1659 }
1660
1661 /* If MIN and MAX cover the whole range for their type, then
1662 just use the original LIMIT. */
1663 if (INTEGRAL_TYPE_P (type)
1664 && vrp_val_is_min (min)
1665 && vrp_val_is_max (max))
1666 min = max = limit;
1667
1668 set_value_range (vr_p, VR_ANTI_RANGE, min, max, vr_p->equiv);
1669 }
1670 else if (cond_code == LE_EXPR || cond_code == LT_EXPR)
1671 {
1672 min = TYPE_MIN_VALUE (type);
1673
1674 if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE)
1675 max = limit;
1676 else
1677 {
1678 /* If LIMIT_VR is of the form [N1, N2], we need to build the
1679 range [MIN, N2] for LE_EXPR and [MIN, N2 - 1] for
1680 LT_EXPR. */
1681 max = limit_vr->max;
1682 }
1683
1684 /* If the maximum value forces us to be out of bounds, simply punt.
1685 It would be pointless to try and do anything more since this
1686 all should be optimized away above us. */
1687 if ((cond_code == LT_EXPR
1688 && compare_values (max, min) == 0)
1689 || (CONSTANT_CLASS_P (max) && TREE_OVERFLOW (max)))
1690 set_value_range_to_varying (vr_p);
1691 else
1692 {
1693 /* For LT_EXPR, we create the range [MIN, MAX - 1]. */
1694 if (cond_code == LT_EXPR)
1695 {
1696 if (TYPE_PRECISION (TREE_TYPE (max)) == 1
1697 && !TYPE_UNSIGNED (TREE_TYPE (max)))
1698 max = fold_build2 (PLUS_EXPR, TREE_TYPE (max), max,
1699 build_int_cst (TREE_TYPE (max), -1));
1700 else
1701 max = fold_build2 (MINUS_EXPR, TREE_TYPE (max), max,
1702 build_int_cst (TREE_TYPE (max), 1));
1703 if (EXPR_P (max))
1704 TREE_NO_WARNING (max) = 1;
1705 }
1706
1707 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1708 }
1709 }
1710 else if (cond_code == GE_EXPR || cond_code == GT_EXPR)
1711 {
1712 max = TYPE_MAX_VALUE (type);
1713
1714 if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE)
1715 min = limit;
1716 else
1717 {
1718 /* If LIMIT_VR is of the form [N1, N2], we need to build the
1719 range [N1, MAX] for GE_EXPR and [N1 + 1, MAX] for
1720 GT_EXPR. */
1721 min = limit_vr->min;
1722 }
1723
1724 /* If the minimum value forces us to be out of bounds, simply punt.
1725 It would be pointless to try and do anything more since this
1726 all should be optimized away above us. */
1727 if ((cond_code == GT_EXPR
1728 && compare_values (min, max) == 0)
1729 || (CONSTANT_CLASS_P (min) && TREE_OVERFLOW (min)))
1730 set_value_range_to_varying (vr_p);
1731 else
1732 {
1733 /* For GT_EXPR, we create the range [MIN + 1, MAX]. */
1734 if (cond_code == GT_EXPR)
1735 {
1736 if (TYPE_PRECISION (TREE_TYPE (min)) == 1
1737 && !TYPE_UNSIGNED (TREE_TYPE (min)))
1738 min = fold_build2 (MINUS_EXPR, TREE_TYPE (min), min,
1739 build_int_cst (TREE_TYPE (min), -1));
1740 else
1741 min = fold_build2 (PLUS_EXPR, TREE_TYPE (min), min,
1742 build_int_cst (TREE_TYPE (min), 1));
1743 if (EXPR_P (min))
1744 TREE_NO_WARNING (min) = 1;
1745 }
1746
1747 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1748 }
1749 }
1750 else
1751 gcc_unreachable ();
1752
1753 /* If VAR already had a known range, it may happen that the new
1754 range we have computed and VAR's range are not compatible. For
1755 instance,
1756
1757 if (p_5 == NULL)
1758 p_6 = ASSERT_EXPR <p_5, p_5 == NULL>;
1759 x_7 = p_6->fld;
1760 p_8 = ASSERT_EXPR <p_6, p_6 != NULL>;
1761
1762 While the above comes from a faulty program, it will cause an ICE
1763 later because p_8 and p_6 will have incompatible ranges and at
1764 the same time will be considered equivalent. A similar situation
1765 would arise from
1766
1767 if (i_5 > 10)
1768 i_6 = ASSERT_EXPR <i_5, i_5 > 10>;
1769 if (i_5 < 5)
1770 i_7 = ASSERT_EXPR <i_6, i_6 < 5>;
1771
1772 Again i_6 and i_7 will have incompatible ranges. It would be
1773 pointless to try and do anything with i_7's range because
1774 anything dominated by 'if (i_5 < 5)' will be optimized away.
1775 Note, due to the wa in which simulation proceeds, the statement
1776 i_7 = ASSERT_EXPR <...> we would never be visited because the
1777 conditional 'if (i_5 < 5)' always evaluates to false. However,
1778 this extra check does not hurt and may protect against future
1779 changes to VRP that may get into a situation similar to the
1780 NULL pointer dereference example.
1781
1782 Note that these compatibility tests are only needed when dealing
1783 with ranges or a mix of range and anti-range. If VAR_VR and VR_P
1784 are both anti-ranges, they will always be compatible, because two
1785 anti-ranges will always have a non-empty intersection. */
1786
1787 var_vr = get_value_range (var);
1788
1789 /* We may need to make adjustments when VR_P and VAR_VR are numeric
1790 ranges or anti-ranges. */
1791 if (vr_p->type == VR_VARYING
1792 || vr_p->type == VR_UNDEFINED
1793 || var_vr->type == VR_VARYING
1794 || var_vr->type == VR_UNDEFINED
1795 || symbolic_range_p (vr_p)
1796 || symbolic_range_p (var_vr))
1797 return;
1798
1799 if (var_vr->type == VR_RANGE && vr_p->type == VR_RANGE)
1800 {
1801 /* If the two ranges have a non-empty intersection, we can
1802 refine the resulting range. Since the assert expression
1803 creates an equivalency and at the same time it asserts a
1804 predicate, we can take the intersection of the two ranges to
1805 get better precision. */
1806 if (value_ranges_intersect_p (var_vr, vr_p))
1807 {
1808 /* Use the larger of the two minimums. */
1809 if (compare_values (vr_p->min, var_vr->min) == -1)
1810 min = var_vr->min;
1811 else
1812 min = vr_p->min;
1813
1814 /* Use the smaller of the two maximums. */
1815 if (compare_values (vr_p->max, var_vr->max) == 1)
1816 max = var_vr->max;
1817 else
1818 max = vr_p->max;
1819
1820 set_value_range (vr_p, vr_p->type, min, max, vr_p->equiv);
1821 }
1822 else
1823 {
1824 /* The two ranges do not intersect, set the new range to
1825 VARYING, because we will not be able to do anything
1826 meaningful with it. */
1827 set_value_range_to_varying (vr_p);
1828 }
1829 }
1830 else if ((var_vr->type == VR_RANGE && vr_p->type == VR_ANTI_RANGE)
1831 || (var_vr->type == VR_ANTI_RANGE && vr_p->type == VR_RANGE))
1832 {
1833 /* A range and an anti-range will cancel each other only if
1834 their ends are the same. For instance, in the example above,
1835 p_8's range ~[0, 0] and p_6's range [0, 0] are incompatible,
1836 so VR_P should be set to VR_VARYING. */
1837 if (compare_values (var_vr->min, vr_p->min) == 0
1838 && compare_values (var_vr->max, vr_p->max) == 0)
1839 set_value_range_to_varying (vr_p);
1840 else
1841 {
1842 tree min, max, anti_min, anti_max, real_min, real_max;
1843 int cmp;
1844
1845 /* We want to compute the logical AND of the two ranges;
1846 there are three cases to consider.
1847
1848
1849 1. The VR_ANTI_RANGE range is completely within the
1850 VR_RANGE and the endpoints of the ranges are
1851 different. In that case the resulting range
1852 should be whichever range is more precise.
1853 Typically that will be the VR_RANGE.
1854
1855 2. The VR_ANTI_RANGE is completely disjoint from
1856 the VR_RANGE. In this case the resulting range
1857 should be the VR_RANGE.
1858
1859 3. There is some overlap between the VR_ANTI_RANGE
1860 and the VR_RANGE.
1861
1862 3a. If the high limit of the VR_ANTI_RANGE resides
1863 within the VR_RANGE, then the result is a new
1864 VR_RANGE starting at the high limit of the
1865 VR_ANTI_RANGE + 1 and extending to the
1866 high limit of the original VR_RANGE.
1867
1868 3b. If the low limit of the VR_ANTI_RANGE resides
1869 within the VR_RANGE, then the result is a new
1870 VR_RANGE starting at the low limit of the original
1871 VR_RANGE and extending to the low limit of the
1872 VR_ANTI_RANGE - 1. */
1873 if (vr_p->type == VR_ANTI_RANGE)
1874 {
1875 anti_min = vr_p->min;
1876 anti_max = vr_p->max;
1877 real_min = var_vr->min;
1878 real_max = var_vr->max;
1879 }
1880 else
1881 {
1882 anti_min = var_vr->min;
1883 anti_max = var_vr->max;
1884 real_min = vr_p->min;
1885 real_max = vr_p->max;
1886 }
1887
1888
1889 /* Case 1, VR_ANTI_RANGE completely within VR_RANGE,
1890 not including any endpoints. */
1891 if (compare_values (anti_max, real_max) == -1
1892 && compare_values (anti_min, real_min) == 1)
1893 {
1894 /* If the range is covering the whole valid range of
1895 the type keep the anti-range. */
1896 if (!vrp_val_is_min (real_min)
1897 || !vrp_val_is_max (real_max))
1898 set_value_range (vr_p, VR_RANGE, real_min,
1899 real_max, vr_p->equiv);
1900 }
1901 /* Case 2, VR_ANTI_RANGE completely disjoint from
1902 VR_RANGE. */
1903 else if (compare_values (anti_min, real_max) == 1
1904 || compare_values (anti_max, real_min) == -1)
1905 {
1906 set_value_range (vr_p, VR_RANGE, real_min,
1907 real_max, vr_p->equiv);
1908 }
1909 /* Case 3a, the anti-range extends into the low
1910 part of the real range. Thus creating a new
1911 low for the real range. */
1912 else if (((cmp = compare_values (anti_max, real_min)) == 1
1913 || cmp == 0)
1914 && compare_values (anti_max, real_max) == -1)
1915 {
1916 gcc_assert (!is_positive_overflow_infinity (anti_max));
1917 if (needs_overflow_infinity (TREE_TYPE (anti_max))
1918 && vrp_val_is_max (anti_max))
1919 {
1920 if (!supports_overflow_infinity (TREE_TYPE (var_vr->min)))
1921 {
1922 set_value_range_to_varying (vr_p);
1923 return;
1924 }
1925 min = positive_overflow_infinity (TREE_TYPE (var_vr->min));
1926 }
1927 else if (!POINTER_TYPE_P (TREE_TYPE (var_vr->min)))
1928 {
1929 if (TYPE_PRECISION (TREE_TYPE (var_vr->min)) == 1
1930 && !TYPE_UNSIGNED (TREE_TYPE (var_vr->min)))
1931 min = fold_build2 (MINUS_EXPR, TREE_TYPE (var_vr->min),
1932 anti_max,
1933 build_int_cst (TREE_TYPE (var_vr->min),
1934 -1));
1935 else
1936 min = fold_build2 (PLUS_EXPR, TREE_TYPE (var_vr->min),
1937 anti_max,
1938 build_int_cst (TREE_TYPE (var_vr->min),
1939 1));
1940 }
1941 else
1942 min = fold_build_pointer_plus_hwi (anti_max, 1);
1943 max = real_max;
1944 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1945 }
1946 /* Case 3b, the anti-range extends into the high
1947 part of the real range. Thus creating a new
1948 higher for the real range. */
1949 else if (compare_values (anti_min, real_min) == 1
1950 && ((cmp = compare_values (anti_min, real_max)) == -1
1951 || cmp == 0))
1952 {
1953 gcc_assert (!is_negative_overflow_infinity (anti_min));
1954 if (needs_overflow_infinity (TREE_TYPE (anti_min))
1955 && vrp_val_is_min (anti_min))
1956 {
1957 if (!supports_overflow_infinity (TREE_TYPE (var_vr->min)))
1958 {
1959 set_value_range_to_varying (vr_p);
1960 return;
1961 }
1962 max = negative_overflow_infinity (TREE_TYPE (var_vr->min));
1963 }
1964 else if (!POINTER_TYPE_P (TREE_TYPE (var_vr->min)))
1965 {
1966 if (TYPE_PRECISION (TREE_TYPE (var_vr->min)) == 1
1967 && !TYPE_UNSIGNED (TREE_TYPE (var_vr->min)))
1968 max = fold_build2 (PLUS_EXPR, TREE_TYPE (var_vr->min),
1969 anti_min,
1970 build_int_cst (TREE_TYPE (var_vr->min),
1971 -1));
1972 else
1973 max = fold_build2 (MINUS_EXPR, TREE_TYPE (var_vr->min),
1974 anti_min,
1975 build_int_cst (TREE_TYPE (var_vr->min),
1976 1));
1977 }
1978 else
1979 max = fold_build_pointer_plus_hwi (anti_min, -1);
1980 min = real_min;
1981 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1982 }
1983 }
1984 }
1985 }
1986
1987
1988 /* Extract range information from SSA name VAR and store it in VR. If
1989 VAR has an interesting range, use it. Otherwise, create the
1990 range [VAR, VAR] and return it. This is useful in situations where
1991 we may have conditionals testing values of VARYING names. For
1992 instance,
1993
1994 x_3 = y_5;
1995 if (x_3 > y_5)
1996 ...
1997
1998 Even if y_5 is deemed VARYING, we can determine that x_3 > y_5 is
1999 always false. */
2000
2001 static void
2002 extract_range_from_ssa_name (value_range_t *vr, tree var)
2003 {
2004 value_range_t *var_vr = get_value_range (var);
2005
2006 if (var_vr->type != VR_UNDEFINED && var_vr->type != VR_VARYING)
2007 copy_value_range (vr, var_vr);
2008 else
2009 set_value_range (vr, VR_RANGE, var, var, NULL);
2010
2011 add_equivalence (&vr->equiv, var);
2012 }
2013
2014
2015 /* Wrapper around int_const_binop. If the operation overflows and we
2016 are not using wrapping arithmetic, then adjust the result to be
2017 -INF or +INF depending on CODE, VAL1 and VAL2. This can return
2018 NULL_TREE if we need to use an overflow infinity representation but
2019 the type does not support it. */
2020
2021 static tree
2022 vrp_int_const_binop (enum tree_code code, tree val1, tree val2)
2023 {
2024 tree res;
2025
2026 res = int_const_binop (code, val1, val2);
2027
2028 /* If we are using unsigned arithmetic, operate symbolically
2029 on -INF and +INF as int_const_binop only handles signed overflow. */
2030 if (TYPE_UNSIGNED (TREE_TYPE (val1)))
2031 {
2032 int checkz = compare_values (res, val1);
2033 bool overflow = false;
2034
2035 /* Ensure that res = val1 [+*] val2 >= val1
2036 or that res = val1 - val2 <= val1. */
2037 if ((code == PLUS_EXPR
2038 && !(checkz == 1 || checkz == 0))
2039 || (code == MINUS_EXPR
2040 && !(checkz == 0 || checkz == -1)))
2041 {
2042 overflow = true;
2043 }
2044 /* Checking for multiplication overflow is done by dividing the
2045 output of the multiplication by the first input of the
2046 multiplication. If the result of that division operation is
2047 not equal to the second input of the multiplication, then the
2048 multiplication overflowed. */
2049 else if (code == MULT_EXPR && !integer_zerop (val1))
2050 {
2051 tree tmp = int_const_binop (TRUNC_DIV_EXPR,
2052 res,
2053 val1);
2054 int check = compare_values (tmp, val2);
2055
2056 if (check != 0)
2057 overflow = true;
2058 }
2059
2060 if (overflow)
2061 {
2062 res = copy_node (res);
2063 TREE_OVERFLOW (res) = 1;
2064 }
2065
2066 }
2067 else if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (val1)))
2068 /* If the singed operation wraps then int_const_binop has done
2069 everything we want. */
2070 ;
2071 else if ((TREE_OVERFLOW (res)
2072 && !TREE_OVERFLOW (val1)
2073 && !TREE_OVERFLOW (val2))
2074 || is_overflow_infinity (val1)
2075 || is_overflow_infinity (val2))
2076 {
2077 /* If the operation overflowed but neither VAL1 nor VAL2 are
2078 overflown, return -INF or +INF depending on the operation
2079 and the combination of signs of the operands. */
2080 int sgn1 = tree_int_cst_sgn (val1);
2081 int sgn2 = tree_int_cst_sgn (val2);
2082
2083 if (needs_overflow_infinity (TREE_TYPE (res))
2084 && !supports_overflow_infinity (TREE_TYPE (res)))
2085 return NULL_TREE;
2086
2087 /* We have to punt on adding infinities of different signs,
2088 since we can't tell what the sign of the result should be.
2089 Likewise for subtracting infinities of the same sign. */
2090 if (((code == PLUS_EXPR && sgn1 != sgn2)
2091 || (code == MINUS_EXPR && sgn1 == sgn2))
2092 && is_overflow_infinity (val1)
2093 && is_overflow_infinity (val2))
2094 return NULL_TREE;
2095
2096 /* Don't try to handle division or shifting of infinities. */
2097 if ((code == TRUNC_DIV_EXPR
2098 || code == FLOOR_DIV_EXPR
2099 || code == CEIL_DIV_EXPR
2100 || code == EXACT_DIV_EXPR
2101 || code == ROUND_DIV_EXPR
2102 || code == RSHIFT_EXPR)
2103 && (is_overflow_infinity (val1)
2104 || is_overflow_infinity (val2)))
2105 return NULL_TREE;
2106
2107 /* Notice that we only need to handle the restricted set of
2108 operations handled by extract_range_from_binary_expr.
2109 Among them, only multiplication, addition and subtraction
2110 can yield overflow without overflown operands because we
2111 are working with integral types only... except in the
2112 case VAL1 = -INF and VAL2 = -1 which overflows to +INF
2113 for division too. */
2114
2115 /* For multiplication, the sign of the overflow is given
2116 by the comparison of the signs of the operands. */
2117 if ((code == MULT_EXPR && sgn1 == sgn2)
2118 /* For addition, the operands must be of the same sign
2119 to yield an overflow. Its sign is therefore that
2120 of one of the operands, for example the first. For
2121 infinite operands X + -INF is negative, not positive. */
2122 || (code == PLUS_EXPR
2123 && (sgn1 >= 0
2124 ? !is_negative_overflow_infinity (val2)
2125 : is_positive_overflow_infinity (val2)))
2126 /* For subtraction, non-infinite operands must be of
2127 different signs to yield an overflow. Its sign is
2128 therefore that of the first operand or the opposite of
2129 that of the second operand. A first operand of 0 counts
2130 as positive here, for the corner case 0 - (-INF), which
2131 overflows, but must yield +INF. For infinite operands 0
2132 - INF is negative, not positive. */
2133 || (code == MINUS_EXPR
2134 && (sgn1 >= 0
2135 ? !is_positive_overflow_infinity (val2)
2136 : is_negative_overflow_infinity (val2)))
2137 /* We only get in here with positive shift count, so the
2138 overflow direction is the same as the sign of val1.
2139 Actually rshift does not overflow at all, but we only
2140 handle the case of shifting overflowed -INF and +INF. */
2141 || (code == RSHIFT_EXPR
2142 && sgn1 >= 0)
2143 /* For division, the only case is -INF / -1 = +INF. */
2144 || code == TRUNC_DIV_EXPR
2145 || code == FLOOR_DIV_EXPR
2146 || code == CEIL_DIV_EXPR
2147 || code == EXACT_DIV_EXPR
2148 || code == ROUND_DIV_EXPR)
2149 return (needs_overflow_infinity (TREE_TYPE (res))
2150 ? positive_overflow_infinity (TREE_TYPE (res))
2151 : TYPE_MAX_VALUE (TREE_TYPE (res)));
2152 else
2153 return (needs_overflow_infinity (TREE_TYPE (res))
2154 ? negative_overflow_infinity (TREE_TYPE (res))
2155 : TYPE_MIN_VALUE (TREE_TYPE (res)));
2156 }
2157
2158 return res;
2159 }
2160
2161
2162 /* For range VR compute two double_int bitmasks. In *MAY_BE_NONZERO
2163 bitmask if some bit is unset, it means for all numbers in the range
2164 the bit is 0, otherwise it might be 0 or 1. In *MUST_BE_NONZERO
2165 bitmask if some bit is set, it means for all numbers in the range
2166 the bit is 1, otherwise it might be 0 or 1. */
2167
2168 static bool
2169 zero_nonzero_bits_from_vr (value_range_t *vr,
2170 double_int *may_be_nonzero,
2171 double_int *must_be_nonzero)
2172 {
2173 *may_be_nonzero = double_int_minus_one;
2174 *must_be_nonzero = double_int_zero;
2175 if (!range_int_cst_p (vr))
2176 return false;
2177
2178 if (range_int_cst_singleton_p (vr))
2179 {
2180 *may_be_nonzero = tree_to_double_int (vr->min);
2181 *must_be_nonzero = *may_be_nonzero;
2182 }
2183 else if (tree_int_cst_sgn (vr->min) >= 0
2184 || tree_int_cst_sgn (vr->max) < 0)
2185 {
2186 double_int dmin = tree_to_double_int (vr->min);
2187 double_int dmax = tree_to_double_int (vr->max);
2188 double_int xor_mask = double_int_xor (dmin, dmax);
2189 *may_be_nonzero = double_int_ior (dmin, dmax);
2190 *must_be_nonzero = double_int_and (dmin, dmax);
2191 if (xor_mask.high != 0)
2192 {
2193 unsigned HOST_WIDE_INT mask
2194 = ((unsigned HOST_WIDE_INT) 1
2195 << floor_log2 (xor_mask.high)) - 1;
2196 may_be_nonzero->low = ALL_ONES;
2197 may_be_nonzero->high |= mask;
2198 must_be_nonzero->low = 0;
2199 must_be_nonzero->high &= ~mask;
2200 }
2201 else if (xor_mask.low != 0)
2202 {
2203 unsigned HOST_WIDE_INT mask
2204 = ((unsigned HOST_WIDE_INT) 1
2205 << floor_log2 (xor_mask.low)) - 1;
2206 may_be_nonzero->low |= mask;
2207 must_be_nonzero->low &= ~mask;
2208 }
2209 }
2210
2211 return true;
2212 }
2213
2214 /* Helper to extract a value-range *VR for a multiplicative operation
2215 *VR0 CODE *VR1. */
2216
2217 static void
2218 extract_range_from_multiplicative_op_1 (value_range_t *vr,
2219 enum tree_code code,
2220 value_range_t *vr0, value_range_t *vr1)
2221 {
2222 enum value_range_type type;
2223 tree val[4];
2224 size_t i;
2225 tree min, max;
2226 bool sop;
2227 int cmp;
2228
2229 /* Multiplications, divisions and shifts are a bit tricky to handle,
2230 depending on the mix of signs we have in the two ranges, we
2231 need to operate on different values to get the minimum and
2232 maximum values for the new range. One approach is to figure
2233 out all the variations of range combinations and do the
2234 operations.
2235
2236 However, this involves several calls to compare_values and it
2237 is pretty convoluted. It's simpler to do the 4 operations
2238 (MIN0 OP MIN1, MIN0 OP MAX1, MAX0 OP MIN1 and MAX0 OP MAX0 OP
2239 MAX1) and then figure the smallest and largest values to form
2240 the new range. */
2241 gcc_assert (code == MULT_EXPR
2242 || code == TRUNC_DIV_EXPR
2243 || code == FLOOR_DIV_EXPR
2244 || code == CEIL_DIV_EXPR
2245 || code == EXACT_DIV_EXPR
2246 || code == ROUND_DIV_EXPR
2247 || code == RSHIFT_EXPR);
2248 gcc_assert ((vr0->type == VR_RANGE
2249 || (code == MULT_EXPR && vr0->type == VR_ANTI_RANGE))
2250 && vr0->type == vr1->type);
2251
2252 type = vr0->type;
2253
2254 /* Compute the 4 cross operations. */
2255 sop = false;
2256 val[0] = vrp_int_const_binop (code, vr0->min, vr1->min);
2257 if (val[0] == NULL_TREE)
2258 sop = true;
2259
2260 if (vr1->max == vr1->min)
2261 val[1] = NULL_TREE;
2262 else
2263 {
2264 val[1] = vrp_int_const_binop (code, vr0->min, vr1->max);
2265 if (val[1] == NULL_TREE)
2266 sop = true;
2267 }
2268
2269 if (vr0->max == vr0->min)
2270 val[2] = NULL_TREE;
2271 else
2272 {
2273 val[2] = vrp_int_const_binop (code, vr0->max, vr1->min);
2274 if (val[2] == NULL_TREE)
2275 sop = true;
2276 }
2277
2278 if (vr0->min == vr0->max || vr1->min == vr1->max)
2279 val[3] = NULL_TREE;
2280 else
2281 {
2282 val[3] = vrp_int_const_binop (code, vr0->max, vr1->max);
2283 if (val[3] == NULL_TREE)
2284 sop = true;
2285 }
2286
2287 if (sop)
2288 {
2289 set_value_range_to_varying (vr);
2290 return;
2291 }
2292
2293 /* Set MIN to the minimum of VAL[i] and MAX to the maximum
2294 of VAL[i]. */
2295 min = val[0];
2296 max = val[0];
2297 for (i = 1; i < 4; i++)
2298 {
2299 if (!is_gimple_min_invariant (min)
2300 || (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
2301 || !is_gimple_min_invariant (max)
2302 || (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
2303 break;
2304
2305 if (val[i])
2306 {
2307 if (!is_gimple_min_invariant (val[i])
2308 || (TREE_OVERFLOW (val[i])
2309 && !is_overflow_infinity (val[i])))
2310 {
2311 /* If we found an overflowed value, set MIN and MAX
2312 to it so that we set the resulting range to
2313 VARYING. */
2314 min = max = val[i];
2315 break;
2316 }
2317
2318 if (compare_values (val[i], min) == -1)
2319 min = val[i];
2320
2321 if (compare_values (val[i], max) == 1)
2322 max = val[i];
2323 }
2324 }
2325
2326 /* If either MIN or MAX overflowed, then set the resulting range to
2327 VARYING. But we do accept an overflow infinity
2328 representation. */
2329 if (min == NULL_TREE
2330 || !is_gimple_min_invariant (min)
2331 || (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
2332 || max == NULL_TREE
2333 || !is_gimple_min_invariant (max)
2334 || (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
2335 {
2336 set_value_range_to_varying (vr);
2337 return;
2338 }
2339
2340 /* We punt if:
2341 1) [-INF, +INF]
2342 2) [-INF, +-INF(OVF)]
2343 3) [+-INF(OVF), +INF]
2344 4) [+-INF(OVF), +-INF(OVF)]
2345 We learn nothing when we have INF and INF(OVF) on both sides.
2346 Note that we do accept [-INF, -INF] and [+INF, +INF] without
2347 overflow. */
2348 if ((vrp_val_is_min (min) || is_overflow_infinity (min))
2349 && (vrp_val_is_max (max) || is_overflow_infinity (max)))
2350 {
2351 set_value_range_to_varying (vr);
2352 return;
2353 }
2354
2355 cmp = compare_values (min, max);
2356 if (cmp == -2 || cmp == 1)
2357 {
2358 /* If the new range has its limits swapped around (MIN > MAX),
2359 then the operation caused one of them to wrap around, mark
2360 the new range VARYING. */
2361 set_value_range_to_varying (vr);
2362 }
2363 else
2364 set_value_range (vr, type, min, max, NULL);
2365 }
2366
2367 /* Extract range information from a binary operation CODE based on
2368 the ranges of each of its operands, *VR0 and *VR1 with resulting
2369 type EXPR_TYPE. The resulting range is stored in *VR. */
2370
2371 static void
2372 extract_range_from_binary_expr_1 (value_range_t *vr,
2373 enum tree_code code, tree expr_type,
2374 value_range_t *vr0_, value_range_t *vr1_)
2375 {
2376 value_range_t vr0 = *vr0_, vr1 = *vr1_;
2377 enum value_range_type type;
2378 tree min = NULL_TREE, max = NULL_TREE;
2379 int cmp;
2380
2381 if (!INTEGRAL_TYPE_P (expr_type)
2382 && !POINTER_TYPE_P (expr_type))
2383 {
2384 set_value_range_to_varying (vr);
2385 return;
2386 }
2387
2388 /* Not all binary expressions can be applied to ranges in a
2389 meaningful way. Handle only arithmetic operations. */
2390 if (code != PLUS_EXPR
2391 && code != MINUS_EXPR
2392 && code != POINTER_PLUS_EXPR
2393 && code != MULT_EXPR
2394 && code != TRUNC_DIV_EXPR
2395 && code != FLOOR_DIV_EXPR
2396 && code != CEIL_DIV_EXPR
2397 && code != EXACT_DIV_EXPR
2398 && code != ROUND_DIV_EXPR
2399 && code != TRUNC_MOD_EXPR
2400 && code != RSHIFT_EXPR
2401 && code != MIN_EXPR
2402 && code != MAX_EXPR
2403 && code != BIT_AND_EXPR
2404 && code != BIT_IOR_EXPR
2405 && code != BIT_XOR_EXPR)
2406 {
2407 set_value_range_to_varying (vr);
2408 return;
2409 }
2410
2411 /* If both ranges are UNDEFINED, so is the result. */
2412 if (vr0.type == VR_UNDEFINED && vr1.type == VR_UNDEFINED)
2413 {
2414 set_value_range_to_undefined (vr);
2415 return;
2416 }
2417 /* If one of the ranges is UNDEFINED drop it to VARYING for the following
2418 code. At some point we may want to special-case operations that
2419 have UNDEFINED result for all or some value-ranges of the not UNDEFINED
2420 operand. */
2421 else if (vr0.type == VR_UNDEFINED)
2422 set_value_range_to_varying (&vr0);
2423 else if (vr1.type == VR_UNDEFINED)
2424 set_value_range_to_varying (&vr1);
2425
2426 /* The type of the resulting value range defaults to VR0.TYPE. */
2427 type = vr0.type;
2428
2429 /* Refuse to operate on VARYING ranges, ranges of different kinds
2430 and symbolic ranges. As an exception, we allow BIT_AND_EXPR
2431 because we may be able to derive a useful range even if one of
2432 the operands is VR_VARYING or symbolic range. Similarly for
2433 divisions. TODO, we may be able to derive anti-ranges in
2434 some cases. */
2435 if (code != BIT_AND_EXPR
2436 && code != BIT_IOR_EXPR
2437 && code != TRUNC_DIV_EXPR
2438 && code != FLOOR_DIV_EXPR
2439 && code != CEIL_DIV_EXPR
2440 && code != EXACT_DIV_EXPR
2441 && code != ROUND_DIV_EXPR
2442 && code != TRUNC_MOD_EXPR
2443 && (vr0.type == VR_VARYING
2444 || vr1.type == VR_VARYING
2445 || vr0.type != vr1.type
2446 || symbolic_range_p (&vr0)
2447 || symbolic_range_p (&vr1)))
2448 {
2449 set_value_range_to_varying (vr);
2450 return;
2451 }
2452
2453 /* Now evaluate the expression to determine the new range. */
2454 if (POINTER_TYPE_P (expr_type))
2455 {
2456 if (code == MIN_EXPR || code == MAX_EXPR)
2457 {
2458 /* For MIN/MAX expressions with pointers, we only care about
2459 nullness, if both are non null, then the result is nonnull.
2460 If both are null, then the result is null. Otherwise they
2461 are varying. */
2462 if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1))
2463 set_value_range_to_nonnull (vr, expr_type);
2464 else if (range_is_null (&vr0) && range_is_null (&vr1))
2465 set_value_range_to_null (vr, expr_type);
2466 else
2467 set_value_range_to_varying (vr);
2468 }
2469 else if (code == POINTER_PLUS_EXPR)
2470 {
2471 /* For pointer types, we are really only interested in asserting
2472 whether the expression evaluates to non-NULL. */
2473 if (range_is_nonnull (&vr0) || range_is_nonnull (&vr1))
2474 set_value_range_to_nonnull (vr, expr_type);
2475 else if (range_is_null (&vr0) && range_is_null (&vr1))
2476 set_value_range_to_null (vr, expr_type);
2477 else
2478 set_value_range_to_varying (vr);
2479 }
2480 else if (code == BIT_AND_EXPR)
2481 {
2482 /* For pointer types, we are really only interested in asserting
2483 whether the expression evaluates to non-NULL. */
2484 if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1))
2485 set_value_range_to_nonnull (vr, expr_type);
2486 else if (range_is_null (&vr0) || range_is_null (&vr1))
2487 set_value_range_to_null (vr, expr_type);
2488 else
2489 set_value_range_to_varying (vr);
2490 }
2491 else
2492 set_value_range_to_varying (vr);
2493
2494 return;
2495 }
2496
2497 /* For integer ranges, apply the operation to each end of the
2498 range and see what we end up with. */
2499 if (code == PLUS_EXPR)
2500 {
2501 /* If we have a PLUS_EXPR with two VR_ANTI_RANGEs, drop to
2502 VR_VARYING. It would take more effort to compute a precise
2503 range for such a case. For example, if we have op0 == 1 and
2504 op1 == -1 with their ranges both being ~[0,0], we would have
2505 op0 + op1 == 0, so we cannot claim that the sum is in ~[0,0].
2506 Note that we are guaranteed to have vr0.type == vr1.type at
2507 this point. */
2508 if (vr0.type == VR_ANTI_RANGE)
2509 {
2510 set_value_range_to_varying (vr);
2511 return;
2512 }
2513
2514 /* For operations that make the resulting range directly
2515 proportional to the original ranges, apply the operation to
2516 the same end of each range. */
2517 min = vrp_int_const_binop (code, vr0.min, vr1.min);
2518 max = vrp_int_const_binop (code, vr0.max, vr1.max);
2519
2520 /* If both additions overflowed the range kind is still correct.
2521 This happens regularly with subtracting something in unsigned
2522 arithmetic.
2523 ??? See PR30318 for all the cases we do not handle. */
2524 if ((TREE_OVERFLOW (min) && !is_overflow_infinity (min))
2525 && (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
2526 {
2527 min = build_int_cst_wide (TREE_TYPE (min),
2528 TREE_INT_CST_LOW (min),
2529 TREE_INT_CST_HIGH (min));
2530 max = build_int_cst_wide (TREE_TYPE (max),
2531 TREE_INT_CST_LOW (max),
2532 TREE_INT_CST_HIGH (max));
2533 }
2534 }
2535 else if (code == MIN_EXPR
2536 || code == MAX_EXPR)
2537 {
2538 if (vr0.type == VR_ANTI_RANGE)
2539 {
2540 /* For MIN_EXPR and MAX_EXPR with two VR_ANTI_RANGEs,
2541 the resulting VR_ANTI_RANGE is the same - intersection
2542 of the two ranges. */
2543 min = vrp_int_const_binop (MAX_EXPR, vr0.min, vr1.min);
2544 max = vrp_int_const_binop (MIN_EXPR, vr0.max, vr1.max);
2545 }
2546 else
2547 {
2548 /* For operations that make the resulting range directly
2549 proportional to the original ranges, apply the operation to
2550 the same end of each range. */
2551 min = vrp_int_const_binop (code, vr0.min, vr1.min);
2552 max = vrp_int_const_binop (code, vr0.max, vr1.max);
2553 }
2554 }
2555 else if (code == MULT_EXPR)
2556 {
2557 /* If we have an unsigned MULT_EXPR with two VR_ANTI_RANGEs,
2558 drop to VR_VARYING. It would take more effort to compute a
2559 precise range for such a case. For example, if we have
2560 op0 == 65536 and op1 == 65536 with their ranges both being
2561 ~[0,0] on a 32-bit machine, we would have op0 * op1 == 0, so
2562 we cannot claim that the product is in ~[0,0]. Note that we
2563 are guaranteed to have vr0.type == vr1.type at this
2564 point. */
2565 if (vr0.type == VR_ANTI_RANGE
2566 && !TYPE_OVERFLOW_UNDEFINED (expr_type))
2567 {
2568 set_value_range_to_varying (vr);
2569 return;
2570 }
2571
2572 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2573 return;
2574 }
2575 else if (code == RSHIFT_EXPR)
2576 {
2577 /* If we have a RSHIFT_EXPR with any shift values outside [0..prec-1],
2578 then drop to VR_VARYING. Outside of this range we get undefined
2579 behavior from the shift operation. We cannot even trust
2580 SHIFT_COUNT_TRUNCATED at this stage, because that applies to rtl
2581 shifts, and the operation at the tree level may be widened. */
2582 if (vr1.type != VR_RANGE
2583 || !value_range_nonnegative_p (&vr1)
2584 || TREE_CODE (vr1.max) != INTEGER_CST
2585 || compare_tree_int (vr1.max, TYPE_PRECISION (expr_type) - 1) == 1)
2586 {
2587 set_value_range_to_varying (vr);
2588 return;
2589 }
2590
2591 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2592 return;
2593 }
2594 else if (code == TRUNC_DIV_EXPR
2595 || code == FLOOR_DIV_EXPR
2596 || code == CEIL_DIV_EXPR
2597 || code == EXACT_DIV_EXPR
2598 || code == ROUND_DIV_EXPR)
2599 {
2600 if (vr0.type != VR_RANGE || symbolic_range_p (&vr0))
2601 {
2602 /* For division, if op1 has VR_RANGE but op0 does not, something
2603 can be deduced just from that range. Say [min, max] / [4, max]
2604 gives [min / 4, max / 4] range. */
2605 if (vr1.type == VR_RANGE
2606 && !symbolic_range_p (&vr1)
2607 && !range_includes_zero_p (&vr1))
2608 {
2609 vr0.type = type = VR_RANGE;
2610 vr0.min = vrp_val_min (expr_type);
2611 vr0.max = vrp_val_max (expr_type);
2612 }
2613 else
2614 {
2615 set_value_range_to_varying (vr);
2616 return;
2617 }
2618 }
2619
2620 /* For divisions, if flag_non_call_exceptions is true, we must
2621 not eliminate a division by zero. */
2622 if (cfun->can_throw_non_call_exceptions
2623 && (vr1.type != VR_RANGE
2624 || symbolic_range_p (&vr1)
2625 || range_includes_zero_p (&vr1)))
2626 {
2627 set_value_range_to_varying (vr);
2628 return;
2629 }
2630
2631 /* For divisions, if op0 is VR_RANGE, we can deduce a range
2632 even if op1 is VR_VARYING, VR_ANTI_RANGE, symbolic or can
2633 include 0. */
2634 if (vr0.type == VR_RANGE
2635 && (vr1.type != VR_RANGE
2636 || symbolic_range_p (&vr1)
2637 || range_includes_zero_p (&vr1)))
2638 {
2639 tree zero = build_int_cst (TREE_TYPE (vr0.min), 0);
2640 int cmp;
2641
2642 min = NULL_TREE;
2643 max = NULL_TREE;
2644 if (TYPE_UNSIGNED (expr_type)
2645 || value_range_nonnegative_p (&vr1))
2646 {
2647 /* For unsigned division or when divisor is known
2648 to be non-negative, the range has to cover
2649 all numbers from 0 to max for positive max
2650 and all numbers from min to 0 for negative min. */
2651 cmp = compare_values (vr0.max, zero);
2652 if (cmp == -1)
2653 max = zero;
2654 else if (cmp == 0 || cmp == 1)
2655 max = vr0.max;
2656 else
2657 type = VR_VARYING;
2658 cmp = compare_values (vr0.min, zero);
2659 if (cmp == 1)
2660 min = zero;
2661 else if (cmp == 0 || cmp == -1)
2662 min = vr0.min;
2663 else
2664 type = VR_VARYING;
2665 }
2666 else
2667 {
2668 /* Otherwise the range is -max .. max or min .. -min
2669 depending on which bound is bigger in absolute value,
2670 as the division can change the sign. */
2671 abs_extent_range (vr, vr0.min, vr0.max);
2672 return;
2673 }
2674 if (type == VR_VARYING)
2675 {
2676 set_value_range_to_varying (vr);
2677 return;
2678 }
2679 }
2680 else
2681 {
2682 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2683 return;
2684 }
2685 }
2686 else if (code == TRUNC_MOD_EXPR)
2687 {
2688 if (vr1.type != VR_RANGE
2689 || symbolic_range_p (&vr1)
2690 || range_includes_zero_p (&vr1)
2691 || vrp_val_is_min (vr1.min))
2692 {
2693 set_value_range_to_varying (vr);
2694 return;
2695 }
2696 type = VR_RANGE;
2697 /* Compute MAX <|vr1.min|, |vr1.max|> - 1. */
2698 max = fold_unary_to_constant (ABS_EXPR, expr_type, vr1.min);
2699 if (tree_int_cst_lt (max, vr1.max))
2700 max = vr1.max;
2701 max = int_const_binop (MINUS_EXPR, max, integer_one_node);
2702 /* If the dividend is non-negative the modulus will be
2703 non-negative as well. */
2704 if (TYPE_UNSIGNED (expr_type)
2705 || value_range_nonnegative_p (&vr0))
2706 min = build_int_cst (TREE_TYPE (max), 0);
2707 else
2708 min = fold_unary_to_constant (NEGATE_EXPR, expr_type, max);
2709 }
2710 else if (code == MINUS_EXPR)
2711 {
2712 /* If we have a MINUS_EXPR with two VR_ANTI_RANGEs, drop to
2713 VR_VARYING. It would take more effort to compute a precise
2714 range for such a case. For example, if we have op0 == 1 and
2715 op1 == 1 with their ranges both being ~[0,0], we would have
2716 op0 - op1 == 0, so we cannot claim that the difference is in
2717 ~[0,0]. Note that we are guaranteed to have
2718 vr0.type == vr1.type at this point. */
2719 if (vr0.type == VR_ANTI_RANGE)
2720 {
2721 set_value_range_to_varying (vr);
2722 return;
2723 }
2724
2725 /* For MINUS_EXPR, apply the operation to the opposite ends of
2726 each range. */
2727 min = vrp_int_const_binop (code, vr0.min, vr1.max);
2728 max = vrp_int_const_binop (code, vr0.max, vr1.min);
2729 }
2730 else if (code == BIT_AND_EXPR || code == BIT_IOR_EXPR || code == BIT_XOR_EXPR)
2731 {
2732 bool int_cst_range0, int_cst_range1;
2733 double_int may_be_nonzero0, may_be_nonzero1;
2734 double_int must_be_nonzero0, must_be_nonzero1;
2735
2736 int_cst_range0 = zero_nonzero_bits_from_vr (&vr0, &may_be_nonzero0,
2737 &must_be_nonzero0);
2738 int_cst_range1 = zero_nonzero_bits_from_vr (&vr1, &may_be_nonzero1,
2739 &must_be_nonzero1);
2740
2741 type = VR_RANGE;
2742 if (code == BIT_AND_EXPR)
2743 {
2744 double_int dmax;
2745 min = double_int_to_tree (expr_type,
2746 double_int_and (must_be_nonzero0,
2747 must_be_nonzero1));
2748 dmax = double_int_and (may_be_nonzero0, may_be_nonzero1);
2749 /* If both input ranges contain only negative values we can
2750 truncate the result range maximum to the minimum of the
2751 input range maxima. */
2752 if (int_cst_range0 && int_cst_range1
2753 && tree_int_cst_sgn (vr0.max) < 0
2754 && tree_int_cst_sgn (vr1.max) < 0)
2755 {
2756 dmax = double_int_min (dmax, tree_to_double_int (vr0.max),
2757 TYPE_UNSIGNED (expr_type));
2758 dmax = double_int_min (dmax, tree_to_double_int (vr1.max),
2759 TYPE_UNSIGNED (expr_type));
2760 }
2761 /* If either input range contains only non-negative values
2762 we can truncate the result range maximum to the respective
2763 maximum of the input range. */
2764 if (int_cst_range0 && tree_int_cst_sgn (vr0.min) >= 0)
2765 dmax = double_int_min (dmax, tree_to_double_int (vr0.max),
2766 TYPE_UNSIGNED (expr_type));
2767 if (int_cst_range1 && tree_int_cst_sgn (vr1.min) >= 0)
2768 dmax = double_int_min (dmax, tree_to_double_int (vr1.max),
2769 TYPE_UNSIGNED (expr_type));
2770 max = double_int_to_tree (expr_type, dmax);
2771 }
2772 else if (code == BIT_IOR_EXPR)
2773 {
2774 double_int dmin;
2775 max = double_int_to_tree (expr_type,
2776 double_int_ior (may_be_nonzero0,
2777 may_be_nonzero1));
2778 dmin = double_int_ior (must_be_nonzero0, must_be_nonzero1);
2779 /* If the input ranges contain only positive values we can
2780 truncate the minimum of the result range to the maximum
2781 of the input range minima. */
2782 if (int_cst_range0 && int_cst_range1
2783 && tree_int_cst_sgn (vr0.min) >= 0
2784 && tree_int_cst_sgn (vr1.min) >= 0)
2785 {
2786 dmin = double_int_max (dmin, tree_to_double_int (vr0.min),
2787 TYPE_UNSIGNED (expr_type));
2788 dmin = double_int_max (dmin, tree_to_double_int (vr1.min),
2789 TYPE_UNSIGNED (expr_type));
2790 }
2791 /* If either input range contains only negative values
2792 we can truncate the minimum of the result range to the
2793 respective minimum range. */
2794 if (int_cst_range0 && tree_int_cst_sgn (vr0.max) < 0)
2795 dmin = double_int_max (dmin, tree_to_double_int (vr0.min),
2796 TYPE_UNSIGNED (expr_type));
2797 if (int_cst_range1 && tree_int_cst_sgn (vr1.max) < 0)
2798 dmin = double_int_max (dmin, tree_to_double_int (vr1.min),
2799 TYPE_UNSIGNED (expr_type));
2800 min = double_int_to_tree (expr_type, dmin);
2801 }
2802 else if (code == BIT_XOR_EXPR)
2803 {
2804 double_int result_zero_bits, result_one_bits;
2805 result_zero_bits
2806 = double_int_ior (double_int_and (must_be_nonzero0,
2807 must_be_nonzero1),
2808 double_int_not
2809 (double_int_ior (may_be_nonzero0,
2810 may_be_nonzero1)));
2811 result_one_bits
2812 = double_int_ior (double_int_and
2813 (must_be_nonzero0,
2814 double_int_not (may_be_nonzero1)),
2815 double_int_and
2816 (must_be_nonzero1,
2817 double_int_not (may_be_nonzero0)));
2818 max = double_int_to_tree (expr_type,
2819 double_int_not (result_zero_bits));
2820 min = double_int_to_tree (expr_type, result_one_bits);
2821 /* If the range has all positive or all negative values the
2822 result is better than VARYING. */
2823 if (tree_int_cst_sgn (min) < 0
2824 || tree_int_cst_sgn (max) >= 0)
2825 ;
2826 else
2827 max = min = NULL_TREE;
2828 }
2829 }
2830 else
2831 gcc_unreachable ();
2832
2833 /* If either MIN or MAX overflowed, then set the resulting range to
2834 VARYING. But we do accept an overflow infinity
2835 representation. */
2836 if (min == NULL_TREE
2837 || !is_gimple_min_invariant (min)
2838 || (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
2839 || max == NULL_TREE
2840 || !is_gimple_min_invariant (max)
2841 || (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
2842 {
2843 set_value_range_to_varying (vr);
2844 return;
2845 }
2846
2847 /* We punt if:
2848 1) [-INF, +INF]
2849 2) [-INF, +-INF(OVF)]
2850 3) [+-INF(OVF), +INF]
2851 4) [+-INF(OVF), +-INF(OVF)]
2852 We learn nothing when we have INF and INF(OVF) on both sides.
2853 Note that we do accept [-INF, -INF] and [+INF, +INF] without
2854 overflow. */
2855 if ((vrp_val_is_min (min) || is_overflow_infinity (min))
2856 && (vrp_val_is_max (max) || is_overflow_infinity (max)))
2857 {
2858 set_value_range_to_varying (vr);
2859 return;
2860 }
2861
2862 cmp = compare_values (min, max);
2863 if (cmp == -2 || cmp == 1)
2864 {
2865 /* If the new range has its limits swapped around (MIN > MAX),
2866 then the operation caused one of them to wrap around, mark
2867 the new range VARYING. */
2868 set_value_range_to_varying (vr);
2869 }
2870 else
2871 set_value_range (vr, type, min, max, NULL);
2872 }
2873
2874 /* Extract range information from a binary expression OP0 CODE OP1 based on
2875 the ranges of each of its operands with resulting type EXPR_TYPE.
2876 The resulting range is stored in *VR. */
2877
2878 static void
2879 extract_range_from_binary_expr (value_range_t *vr,
2880 enum tree_code code,
2881 tree expr_type, tree op0, tree op1)
2882 {
2883 value_range_t vr0 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
2884 value_range_t vr1 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
2885
2886 /* Get value ranges for each operand. For constant operands, create
2887 a new value range with the operand to simplify processing. */
2888 if (TREE_CODE (op0) == SSA_NAME)
2889 vr0 = *(get_value_range (op0));
2890 else if (is_gimple_min_invariant (op0))
2891 set_value_range_to_value (&vr0, op0, NULL);
2892 else
2893 set_value_range_to_varying (&vr0);
2894
2895 if (TREE_CODE (op1) == SSA_NAME)
2896 vr1 = *(get_value_range (op1));
2897 else if (is_gimple_min_invariant (op1))
2898 set_value_range_to_value (&vr1, op1, NULL);
2899 else
2900 set_value_range_to_varying (&vr1);
2901
2902 extract_range_from_binary_expr_1 (vr, code, expr_type, &vr0, &vr1);
2903 }
2904
2905 /* Extract range information from a unary operation CODE based on
2906 the range of its operand *VR0 with type OP0_TYPE with resulting type TYPE.
2907 The The resulting range is stored in *VR. */
2908
2909 static void
2910 extract_range_from_unary_expr_1 (value_range_t *vr,
2911 enum tree_code code, tree type,
2912 value_range_t *vr0_, tree op0_type)
2913 {
2914 value_range_t vr0 = *vr0_;
2915
2916 /* VRP only operates on integral and pointer types. */
2917 if (!(INTEGRAL_TYPE_P (op0_type)
2918 || POINTER_TYPE_P (op0_type))
2919 || !(INTEGRAL_TYPE_P (type)
2920 || POINTER_TYPE_P (type)))
2921 {
2922 set_value_range_to_varying (vr);
2923 return;
2924 }
2925
2926 /* If VR0 is UNDEFINED, so is the result. */
2927 if (vr0.type == VR_UNDEFINED)
2928 {
2929 set_value_range_to_undefined (vr);
2930 return;
2931 }
2932
2933 if (CONVERT_EXPR_CODE_P (code))
2934 {
2935 tree inner_type = op0_type;
2936 tree outer_type = type;
2937
2938 /* If the expression evaluates to a pointer, we are only interested in
2939 determining if it evaluates to NULL [0, 0] or non-NULL (~[0, 0]). */
2940 if (POINTER_TYPE_P (type))
2941 {
2942 if (range_is_nonnull (&vr0))
2943 set_value_range_to_nonnull (vr, type);
2944 else if (range_is_null (&vr0))
2945 set_value_range_to_null (vr, type);
2946 else
2947 set_value_range_to_varying (vr);
2948 return;
2949 }
2950
2951 /* If VR0 is varying and we increase the type precision, assume
2952 a full range for the following transformation. */
2953 if (vr0.type == VR_VARYING
2954 && INTEGRAL_TYPE_P (inner_type)
2955 && TYPE_PRECISION (inner_type) < TYPE_PRECISION (outer_type))
2956 {
2957 vr0.type = VR_RANGE;
2958 vr0.min = TYPE_MIN_VALUE (inner_type);
2959 vr0.max = TYPE_MAX_VALUE (inner_type);
2960 }
2961
2962 /* If VR0 is a constant range or anti-range and the conversion is
2963 not truncating we can convert the min and max values and
2964 canonicalize the resulting range. Otherwise we can do the
2965 conversion if the size of the range is less than what the
2966 precision of the target type can represent and the range is
2967 not an anti-range. */
2968 if ((vr0.type == VR_RANGE
2969 || vr0.type == VR_ANTI_RANGE)
2970 && TREE_CODE (vr0.min) == INTEGER_CST
2971 && TREE_CODE (vr0.max) == INTEGER_CST
2972 && (!is_overflow_infinity (vr0.min)
2973 || (vr0.type == VR_RANGE
2974 && TYPE_PRECISION (outer_type) > TYPE_PRECISION (inner_type)
2975 && needs_overflow_infinity (outer_type)
2976 && supports_overflow_infinity (outer_type)))
2977 && (!is_overflow_infinity (vr0.max)
2978 || (vr0.type == VR_RANGE
2979 && TYPE_PRECISION (outer_type) > TYPE_PRECISION (inner_type)
2980 && needs_overflow_infinity (outer_type)
2981 && supports_overflow_infinity (outer_type)))
2982 && (TYPE_PRECISION (outer_type) >= TYPE_PRECISION (inner_type)
2983 || (vr0.type == VR_RANGE
2984 && integer_zerop (int_const_binop (RSHIFT_EXPR,
2985 int_const_binop (MINUS_EXPR, vr0.max, vr0.min),
2986 size_int (TYPE_PRECISION (outer_type)))))))
2987 {
2988 tree new_min, new_max;
2989 if (is_overflow_infinity (vr0.min))
2990 new_min = negative_overflow_infinity (outer_type);
2991 else
2992 new_min = force_fit_type_double (outer_type,
2993 tree_to_double_int (vr0.min),
2994 0, false);
2995 if (is_overflow_infinity (vr0.max))
2996 new_max = positive_overflow_infinity (outer_type);
2997 else
2998 new_max = force_fit_type_double (outer_type,
2999 tree_to_double_int (vr0.max),
3000 0, false);
3001 set_and_canonicalize_value_range (vr, vr0.type,
3002 new_min, new_max, NULL);
3003 return;
3004 }
3005
3006 set_value_range_to_varying (vr);
3007 return;
3008 }
3009 else if (code == NEGATE_EXPR)
3010 {
3011 /* -X is simply 0 - X, so re-use existing code that also handles
3012 anti-ranges fine. */
3013 value_range_t zero = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
3014 set_value_range_to_value (&zero, build_int_cst (type, 0), NULL);
3015 extract_range_from_binary_expr_1 (vr, MINUS_EXPR, type, &zero, &vr0);
3016 return;
3017 }
3018 else if (code == ABS_EXPR)
3019 {
3020 tree min, max;
3021 int cmp;
3022
3023 /* Pass through vr0 in the easy cases. */
3024 if (TYPE_UNSIGNED (type)
3025 || value_range_nonnegative_p (&vr0))
3026 {
3027 copy_value_range (vr, &vr0);
3028 return;
3029 }
3030
3031 /* For the remaining varying or symbolic ranges we can't do anything
3032 useful. */
3033 if (vr0.type == VR_VARYING
3034 || symbolic_range_p (&vr0))
3035 {
3036 set_value_range_to_varying (vr);
3037 return;
3038 }
3039
3040 /* -TYPE_MIN_VALUE = TYPE_MIN_VALUE with flag_wrapv so we can't get a
3041 useful range. */
3042 if (!TYPE_OVERFLOW_UNDEFINED (type)
3043 && ((vr0.type == VR_RANGE
3044 && vrp_val_is_min (vr0.min))
3045 || (vr0.type == VR_ANTI_RANGE
3046 && !vrp_val_is_min (vr0.min))))
3047 {
3048 set_value_range_to_varying (vr);
3049 return;
3050 }
3051
3052 /* ABS_EXPR may flip the range around, if the original range
3053 included negative values. */
3054 if (is_overflow_infinity (vr0.min))
3055 min = positive_overflow_infinity (type);
3056 else if (!vrp_val_is_min (vr0.min))
3057 min = fold_unary_to_constant (code, type, vr0.min);
3058 else if (!needs_overflow_infinity (type))
3059 min = TYPE_MAX_VALUE (type);
3060 else if (supports_overflow_infinity (type))
3061 min = positive_overflow_infinity (type);
3062 else
3063 {
3064 set_value_range_to_varying (vr);
3065 return;
3066 }
3067
3068 if (is_overflow_infinity (vr0.max))
3069 max = positive_overflow_infinity (type);
3070 else if (!vrp_val_is_min (vr0.max))
3071 max = fold_unary_to_constant (code, type, vr0.max);
3072 else if (!needs_overflow_infinity (type))
3073 max = TYPE_MAX_VALUE (type);
3074 else if (supports_overflow_infinity (type)
3075 /* We shouldn't generate [+INF, +INF] as set_value_range
3076 doesn't like this and ICEs. */
3077 && !is_positive_overflow_infinity (min))
3078 max = positive_overflow_infinity (type);
3079 else
3080 {
3081 set_value_range_to_varying (vr);
3082 return;
3083 }
3084
3085 cmp = compare_values (min, max);
3086
3087 /* If a VR_ANTI_RANGEs contains zero, then we have
3088 ~[-INF, min(MIN, MAX)]. */
3089 if (vr0.type == VR_ANTI_RANGE)
3090 {
3091 if (range_includes_zero_p (&vr0))
3092 {
3093 /* Take the lower of the two values. */
3094 if (cmp != 1)
3095 max = min;
3096
3097 /* Create ~[-INF, min (abs(MIN), abs(MAX))]
3098 or ~[-INF + 1, min (abs(MIN), abs(MAX))] when
3099 flag_wrapv is set and the original anti-range doesn't include
3100 TYPE_MIN_VALUE, remember -TYPE_MIN_VALUE = TYPE_MIN_VALUE. */
3101 if (TYPE_OVERFLOW_WRAPS (type))
3102 {
3103 tree type_min_value = TYPE_MIN_VALUE (type);
3104
3105 min = (vr0.min != type_min_value
3106 ? int_const_binop (PLUS_EXPR, type_min_value,
3107 integer_one_node)
3108 : type_min_value);
3109 }
3110 else
3111 {
3112 if (overflow_infinity_range_p (&vr0))
3113 min = negative_overflow_infinity (type);
3114 else
3115 min = TYPE_MIN_VALUE (type);
3116 }
3117 }
3118 else
3119 {
3120 /* All else has failed, so create the range [0, INF], even for
3121 flag_wrapv since TYPE_MIN_VALUE is in the original
3122 anti-range. */
3123 vr0.type = VR_RANGE;
3124 min = build_int_cst (type, 0);
3125 if (needs_overflow_infinity (type))
3126 {
3127 if (supports_overflow_infinity (type))
3128 max = positive_overflow_infinity (type);
3129 else
3130 {
3131 set_value_range_to_varying (vr);
3132 return;
3133 }
3134 }
3135 else
3136 max = TYPE_MAX_VALUE (type);
3137 }
3138 }
3139
3140 /* If the range contains zero then we know that the minimum value in the
3141 range will be zero. */
3142 else if (range_includes_zero_p (&vr0))
3143 {
3144 if (cmp == 1)
3145 max = min;
3146 min = build_int_cst (type, 0);
3147 }
3148 else
3149 {
3150 /* If the range was reversed, swap MIN and MAX. */
3151 if (cmp == 1)
3152 {
3153 tree t = min;
3154 min = max;
3155 max = t;
3156 }
3157 }
3158
3159 cmp = compare_values (min, max);
3160 if (cmp == -2 || cmp == 1)
3161 {
3162 /* If the new range has its limits swapped around (MIN > MAX),
3163 then the operation caused one of them to wrap around, mark
3164 the new range VARYING. */
3165 set_value_range_to_varying (vr);
3166 }
3167 else
3168 set_value_range (vr, vr0.type, min, max, NULL);
3169 return;
3170 }
3171 else if (code == BIT_NOT_EXPR)
3172 {
3173 /* ~X is simply -1 - X, so re-use existing code that also handles
3174 anti-ranges fine. */
3175 value_range_t minusone = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
3176 set_value_range_to_value (&minusone, build_int_cst (type, -1), NULL);
3177 extract_range_from_binary_expr_1 (vr, MINUS_EXPR,
3178 type, &minusone, &vr0);
3179 return;
3180 }
3181 else if (code == PAREN_EXPR)
3182 {
3183 copy_value_range (vr, &vr0);
3184 return;
3185 }
3186
3187 /* For unhandled operations fall back to varying. */
3188 set_value_range_to_varying (vr);
3189 return;
3190 }
3191
3192
3193 /* Extract range information from a unary expression CODE OP0 based on
3194 the range of its operand with resulting type TYPE.
3195 The resulting range is stored in *VR. */
3196
3197 static void
3198 extract_range_from_unary_expr (value_range_t *vr, enum tree_code code,
3199 tree type, tree op0)
3200 {
3201 value_range_t vr0 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
3202
3203 /* Get value ranges for the operand. For constant operands, create
3204 a new value range with the operand to simplify processing. */
3205 if (TREE_CODE (op0) == SSA_NAME)
3206 vr0 = *(get_value_range (op0));
3207 else if (is_gimple_min_invariant (op0))
3208 set_value_range_to_value (&vr0, op0, NULL);
3209 else
3210 set_value_range_to_varying (&vr0);
3211
3212 extract_range_from_unary_expr_1 (vr, code, type, &vr0, TREE_TYPE (op0));
3213 }
3214
3215
3216 /* Extract range information from a conditional expression STMT based on
3217 the ranges of each of its operands and the expression code. */
3218
3219 static void
3220 extract_range_from_cond_expr (value_range_t *vr, gimple stmt)
3221 {
3222 tree op0, op1;
3223 value_range_t vr0 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
3224 value_range_t vr1 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
3225
3226 /* Get value ranges for each operand. For constant operands, create
3227 a new value range with the operand to simplify processing. */
3228 op0 = gimple_assign_rhs2 (stmt);
3229 if (TREE_CODE (op0) == SSA_NAME)
3230 vr0 = *(get_value_range (op0));
3231 else if (is_gimple_min_invariant (op0))
3232 set_value_range_to_value (&vr0, op0, NULL);
3233 else
3234 set_value_range_to_varying (&vr0);
3235
3236 op1 = gimple_assign_rhs3 (stmt);
3237 if (TREE_CODE (op1) == SSA_NAME)
3238 vr1 = *(get_value_range (op1));
3239 else if (is_gimple_min_invariant (op1))
3240 set_value_range_to_value (&vr1, op1, NULL);
3241 else
3242 set_value_range_to_varying (&vr1);
3243
3244 /* The resulting value range is the union of the operand ranges */
3245 vrp_meet (&vr0, &vr1);
3246 copy_value_range (vr, &vr0);
3247 }
3248
3249
3250 /* Extract range information from a comparison expression EXPR based
3251 on the range of its operand and the expression code. */
3252
3253 static void
3254 extract_range_from_comparison (value_range_t *vr, enum tree_code code,
3255 tree type, tree op0, tree op1)
3256 {
3257 bool sop = false;
3258 tree val;
3259
3260 val = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, false, &sop,
3261 NULL);
3262
3263 /* A disadvantage of using a special infinity as an overflow
3264 representation is that we lose the ability to record overflow
3265 when we don't have an infinity. So we have to ignore a result
3266 which relies on overflow. */
3267
3268 if (val && !is_overflow_infinity (val) && !sop)
3269 {
3270 /* Since this expression was found on the RHS of an assignment,
3271 its type may be different from _Bool. Convert VAL to EXPR's
3272 type. */
3273 val = fold_convert (type, val);
3274 if (is_gimple_min_invariant (val))
3275 set_value_range_to_value (vr, val, vr->equiv);
3276 else
3277 set_value_range (vr, VR_RANGE, val, val, vr->equiv);
3278 }
3279 else
3280 /* The result of a comparison is always true or false. */
3281 set_value_range_to_truthvalue (vr, type);
3282 }
3283
3284 /* Try to derive a nonnegative or nonzero range out of STMT relying
3285 primarily on generic routines in fold in conjunction with range data.
3286 Store the result in *VR */
3287
3288 static void
3289 extract_range_basic (value_range_t *vr, gimple stmt)
3290 {
3291 bool sop = false;
3292 tree type = gimple_expr_type (stmt);
3293
3294 if (INTEGRAL_TYPE_P (type)
3295 && gimple_stmt_nonnegative_warnv_p (stmt, &sop))
3296 set_value_range_to_nonnegative (vr, type,
3297 sop || stmt_overflow_infinity (stmt));
3298 else if (vrp_stmt_computes_nonzero (stmt, &sop)
3299 && !sop)
3300 set_value_range_to_nonnull (vr, type);
3301 else
3302 set_value_range_to_varying (vr);
3303 }
3304
3305
3306 /* Try to compute a useful range out of assignment STMT and store it
3307 in *VR. */
3308
3309 static void
3310 extract_range_from_assignment (value_range_t *vr, gimple stmt)
3311 {
3312 enum tree_code code = gimple_assign_rhs_code (stmt);
3313
3314 if (code == ASSERT_EXPR)
3315 extract_range_from_assert (vr, gimple_assign_rhs1 (stmt));
3316 else if (code == SSA_NAME)
3317 extract_range_from_ssa_name (vr, gimple_assign_rhs1 (stmt));
3318 else if (TREE_CODE_CLASS (code) == tcc_binary)
3319 extract_range_from_binary_expr (vr, gimple_assign_rhs_code (stmt),
3320 gimple_expr_type (stmt),
3321 gimple_assign_rhs1 (stmt),
3322 gimple_assign_rhs2 (stmt));
3323 else if (TREE_CODE_CLASS (code) == tcc_unary)
3324 extract_range_from_unary_expr (vr, gimple_assign_rhs_code (stmt),
3325 gimple_expr_type (stmt),
3326 gimple_assign_rhs1 (stmt));
3327 else if (code == COND_EXPR)
3328 extract_range_from_cond_expr (vr, stmt);
3329 else if (TREE_CODE_CLASS (code) == tcc_comparison)
3330 extract_range_from_comparison (vr, gimple_assign_rhs_code (stmt),
3331 gimple_expr_type (stmt),
3332 gimple_assign_rhs1 (stmt),
3333 gimple_assign_rhs2 (stmt));
3334 else if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS
3335 && is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
3336 set_value_range_to_value (vr, gimple_assign_rhs1 (stmt), NULL);
3337 else
3338 set_value_range_to_varying (vr);
3339
3340 if (vr->type == VR_VARYING)
3341 extract_range_basic (vr, stmt);
3342 }
3343
3344 /* Given a range VR, a LOOP and a variable VAR, determine whether it
3345 would be profitable to adjust VR using scalar evolution information
3346 for VAR. If so, update VR with the new limits. */
3347
3348 static void
3349 adjust_range_with_scev (value_range_t *vr, struct loop *loop,
3350 gimple stmt, tree var)
3351 {
3352 tree init, step, chrec, tmin, tmax, min, max, type, tem;
3353 enum ev_direction dir;
3354
3355 /* TODO. Don't adjust anti-ranges. An anti-range may provide
3356 better opportunities than a regular range, but I'm not sure. */
3357 if (vr->type == VR_ANTI_RANGE)
3358 return;
3359
3360 chrec = instantiate_parameters (loop, analyze_scalar_evolution (loop, var));
3361
3362 /* Like in PR19590, scev can return a constant function. */
3363 if (is_gimple_min_invariant (chrec))
3364 {
3365 set_value_range_to_value (vr, chrec, vr->equiv);
3366 return;
3367 }
3368
3369 if (TREE_CODE (chrec) != POLYNOMIAL_CHREC)
3370 return;
3371
3372 init = initial_condition_in_loop_num (chrec, loop->num);
3373 tem = op_with_constant_singleton_value_range (init);
3374 if (tem)
3375 init = tem;
3376 step = evolution_part_in_loop_num (chrec, loop->num);
3377 tem = op_with_constant_singleton_value_range (step);
3378 if (tem)
3379 step = tem;
3380
3381 /* If STEP is symbolic, we can't know whether INIT will be the
3382 minimum or maximum value in the range. Also, unless INIT is
3383 a simple expression, compare_values and possibly other functions
3384 in tree-vrp won't be able to handle it. */
3385 if (step == NULL_TREE
3386 || !is_gimple_min_invariant (step)
3387 || !valid_value_p (init))
3388 return;
3389
3390 dir = scev_direction (chrec);
3391 if (/* Do not adjust ranges if we do not know whether the iv increases
3392 or decreases, ... */
3393 dir == EV_DIR_UNKNOWN
3394 /* ... or if it may wrap. */
3395 || scev_probably_wraps_p (init, step, stmt, get_chrec_loop (chrec),
3396 true))
3397 return;
3398
3399 /* We use TYPE_MIN_VALUE and TYPE_MAX_VALUE here instead of
3400 negative_overflow_infinity and positive_overflow_infinity,
3401 because we have concluded that the loop probably does not
3402 wrap. */
3403
3404 type = TREE_TYPE (var);
3405 if (POINTER_TYPE_P (type) || !TYPE_MIN_VALUE (type))
3406 tmin = lower_bound_in_type (type, type);
3407 else
3408 tmin = TYPE_MIN_VALUE (type);
3409 if (POINTER_TYPE_P (type) || !TYPE_MAX_VALUE (type))
3410 tmax = upper_bound_in_type (type, type);
3411 else
3412 tmax = TYPE_MAX_VALUE (type);
3413
3414 /* Try to use estimated number of iterations for the loop to constrain the
3415 final value in the evolution. */
3416 if (TREE_CODE (step) == INTEGER_CST
3417 && is_gimple_val (init)
3418 && (TREE_CODE (init) != SSA_NAME
3419 || get_value_range (init)->type == VR_RANGE))
3420 {
3421 double_int nit;
3422
3423 if (estimated_loop_iterations (loop, true, &nit))
3424 {
3425 value_range_t maxvr = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
3426 double_int dtmp;
3427 bool unsigned_p = TYPE_UNSIGNED (TREE_TYPE (step));
3428 int overflow = 0;
3429
3430 dtmp = double_int_mul_with_sign (tree_to_double_int (step), nit,
3431 unsigned_p, &overflow);
3432 /* If the multiplication overflowed we can't do a meaningful
3433 adjustment. Likewise if the result doesn't fit in the type
3434 of the induction variable. For a signed type we have to
3435 check whether the result has the expected signedness which
3436 is that of the step as number of iterations is unsigned. */
3437 if (!overflow
3438 && double_int_fits_to_tree_p (TREE_TYPE (init), dtmp)
3439 && (unsigned_p
3440 || ((dtmp.high ^ TREE_INT_CST_HIGH (step)) >= 0)))
3441 {
3442 tem = double_int_to_tree (TREE_TYPE (init), dtmp);
3443 extract_range_from_binary_expr (&maxvr, PLUS_EXPR,
3444 TREE_TYPE (init), init, tem);
3445 /* Likewise if the addition did. */
3446 if (maxvr.type == VR_RANGE)
3447 {
3448 tmin = maxvr.min;
3449 tmax = maxvr.max;
3450 }
3451 }
3452 }
3453 }
3454
3455 if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED)
3456 {
3457 min = tmin;
3458 max = tmax;
3459
3460 /* For VARYING or UNDEFINED ranges, just about anything we get
3461 from scalar evolutions should be better. */
3462
3463 if (dir == EV_DIR_DECREASES)
3464 max = init;
3465 else
3466 min = init;
3467
3468 /* If we would create an invalid range, then just assume we
3469 know absolutely nothing. This may be over-conservative,
3470 but it's clearly safe, and should happen only in unreachable
3471 parts of code, or for invalid programs. */
3472 if (compare_values (min, max) == 1)
3473 return;
3474
3475 set_value_range (vr, VR_RANGE, min, max, vr->equiv);
3476 }
3477 else if (vr->type == VR_RANGE)
3478 {
3479 min = vr->min;
3480 max = vr->max;
3481
3482 if (dir == EV_DIR_DECREASES)
3483 {
3484 /* INIT is the maximum value. If INIT is lower than VR->MAX
3485 but no smaller than VR->MIN, set VR->MAX to INIT. */
3486 if (compare_values (init, max) == -1)
3487 max = init;
3488
3489 /* According to the loop information, the variable does not
3490 overflow. If we think it does, probably because of an
3491 overflow due to arithmetic on a different INF value,
3492 reset now. */
3493 if (is_negative_overflow_infinity (min)
3494 || compare_values (min, tmin) == -1)
3495 min = tmin;
3496
3497 }
3498 else
3499 {
3500 /* If INIT is bigger than VR->MIN, set VR->MIN to INIT. */
3501 if (compare_values (init, min) == 1)
3502 min = init;
3503
3504 if (is_positive_overflow_infinity (max)
3505 || compare_values (tmax, max) == -1)
3506 max = tmax;
3507 }
3508
3509 /* If we just created an invalid range with the minimum
3510 greater than the maximum, we fail conservatively.
3511 This should happen only in unreachable
3512 parts of code, or for invalid programs. */
3513 if (compare_values (min, max) == 1)
3514 return;
3515
3516 set_value_range (vr, VR_RANGE, min, max, vr->equiv);
3517 }
3518 }
3519
3520 /* Return true if VAR may overflow at STMT. This checks any available
3521 loop information to see if we can determine that VAR does not
3522 overflow. */
3523
3524 static bool
3525 vrp_var_may_overflow (tree var, gimple stmt)
3526 {
3527 struct loop *l;
3528 tree chrec, init, step;
3529
3530 if (current_loops == NULL)
3531 return true;
3532
3533 l = loop_containing_stmt (stmt);
3534 if (l == NULL
3535 || !loop_outer (l))
3536 return true;
3537
3538 chrec = instantiate_parameters (l, analyze_scalar_evolution (l, var));
3539 if (TREE_CODE (chrec) != POLYNOMIAL_CHREC)
3540 return true;
3541
3542 init = initial_condition_in_loop_num (chrec, l->num);
3543 step = evolution_part_in_loop_num (chrec, l->num);
3544
3545 if (step == NULL_TREE
3546 || !is_gimple_min_invariant (step)
3547 || !valid_value_p (init))
3548 return true;
3549
3550 /* If we get here, we know something useful about VAR based on the
3551 loop information. If it wraps, it may overflow. */
3552
3553 if (scev_probably_wraps_p (init, step, stmt, get_chrec_loop (chrec),
3554 true))
3555 return true;
3556
3557 if (dump_file && (dump_flags & TDF_DETAILS) != 0)
3558 {
3559 print_generic_expr (dump_file, var, 0);
3560 fprintf (dump_file, ": loop information indicates does not overflow\n");
3561 }
3562
3563 return false;
3564 }
3565
3566
3567 /* Given two numeric value ranges VR0, VR1 and a comparison code COMP:
3568
3569 - Return BOOLEAN_TRUE_NODE if VR0 COMP VR1 always returns true for
3570 all the values in the ranges.
3571
3572 - Return BOOLEAN_FALSE_NODE if the comparison always returns false.
3573
3574 - Return NULL_TREE if it is not always possible to determine the
3575 value of the comparison.
3576
3577 Also set *STRICT_OVERFLOW_P to indicate whether a range with an
3578 overflow infinity was used in the test. */
3579
3580
3581 static tree
3582 compare_ranges (enum tree_code comp, value_range_t *vr0, value_range_t *vr1,
3583 bool *strict_overflow_p)
3584 {
3585 /* VARYING or UNDEFINED ranges cannot be compared. */
3586 if (vr0->type == VR_VARYING
3587 || vr0->type == VR_UNDEFINED
3588 || vr1->type == VR_VARYING
3589 || vr1->type == VR_UNDEFINED)
3590 return NULL_TREE;
3591
3592 /* Anti-ranges need to be handled separately. */
3593 if (vr0->type == VR_ANTI_RANGE || vr1->type == VR_ANTI_RANGE)
3594 {
3595 /* If both are anti-ranges, then we cannot compute any
3596 comparison. */
3597 if (vr0->type == VR_ANTI_RANGE && vr1->type == VR_ANTI_RANGE)
3598 return NULL_TREE;
3599
3600 /* These comparisons are never statically computable. */
3601 if (comp == GT_EXPR
3602 || comp == GE_EXPR
3603 || comp == LT_EXPR
3604 || comp == LE_EXPR)
3605 return NULL_TREE;
3606
3607 /* Equality can be computed only between a range and an
3608 anti-range. ~[VAL1, VAL2] == [VAL1, VAL2] is always false. */
3609 if (vr0->type == VR_RANGE)
3610 {
3611 /* To simplify processing, make VR0 the anti-range. */
3612 value_range_t *tmp = vr0;
3613 vr0 = vr1;
3614 vr1 = tmp;
3615 }
3616
3617 gcc_assert (comp == NE_EXPR || comp == EQ_EXPR);
3618
3619 if (compare_values_warnv (vr0->min, vr1->min, strict_overflow_p) == 0
3620 && compare_values_warnv (vr0->max, vr1->max, strict_overflow_p) == 0)
3621 return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node;
3622
3623 return NULL_TREE;
3624 }
3625
3626 if (!usable_range_p (vr0, strict_overflow_p)
3627 || !usable_range_p (vr1, strict_overflow_p))
3628 return NULL_TREE;
3629
3630 /* Simplify processing. If COMP is GT_EXPR or GE_EXPR, switch the
3631 operands around and change the comparison code. */
3632 if (comp == GT_EXPR || comp == GE_EXPR)
3633 {
3634 value_range_t *tmp;
3635 comp = (comp == GT_EXPR) ? LT_EXPR : LE_EXPR;
3636 tmp = vr0;
3637 vr0 = vr1;
3638 vr1 = tmp;
3639 }
3640
3641 if (comp == EQ_EXPR)
3642 {
3643 /* Equality may only be computed if both ranges represent
3644 exactly one value. */
3645 if (compare_values_warnv (vr0->min, vr0->max, strict_overflow_p) == 0
3646 && compare_values_warnv (vr1->min, vr1->max, strict_overflow_p) == 0)
3647 {
3648 int cmp_min = compare_values_warnv (vr0->min, vr1->min,
3649 strict_overflow_p);
3650 int cmp_max = compare_values_warnv (vr0->max, vr1->max,
3651 strict_overflow_p);
3652 if (cmp_min == 0 && cmp_max == 0)
3653 return boolean_true_node;
3654 else if (cmp_min != -2 && cmp_max != -2)
3655 return boolean_false_node;
3656 }
3657 /* If [V0_MIN, V1_MAX] < [V1_MIN, V1_MAX] then V0 != V1. */
3658 else if (compare_values_warnv (vr0->min, vr1->max,
3659 strict_overflow_p) == 1
3660 || compare_values_warnv (vr1->min, vr0->max,
3661 strict_overflow_p) == 1)
3662 return boolean_false_node;
3663
3664 return NULL_TREE;
3665 }
3666 else if (comp == NE_EXPR)
3667 {
3668 int cmp1, cmp2;
3669
3670 /* If VR0 is completely to the left or completely to the right
3671 of VR1, they are always different. Notice that we need to
3672 make sure that both comparisons yield similar results to
3673 avoid comparing values that cannot be compared at
3674 compile-time. */
3675 cmp1 = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p);
3676 cmp2 = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p);
3677 if ((cmp1 == -1 && cmp2 == -1) || (cmp1 == 1 && cmp2 == 1))
3678 return boolean_true_node;
3679
3680 /* If VR0 and VR1 represent a single value and are identical,
3681 return false. */
3682 else if (compare_values_warnv (vr0->min, vr0->max,
3683 strict_overflow_p) == 0
3684 && compare_values_warnv (vr1->min, vr1->max,
3685 strict_overflow_p) == 0
3686 && compare_values_warnv (vr0->min, vr1->min,
3687 strict_overflow_p) == 0
3688 && compare_values_warnv (vr0->max, vr1->max,
3689 strict_overflow_p) == 0)
3690 return boolean_false_node;
3691
3692 /* Otherwise, they may or may not be different. */
3693 else
3694 return NULL_TREE;
3695 }
3696 else if (comp == LT_EXPR || comp == LE_EXPR)
3697 {
3698 int tst;
3699
3700 /* If VR0 is to the left of VR1, return true. */
3701 tst = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p);
3702 if ((comp == LT_EXPR && tst == -1)
3703 || (comp == LE_EXPR && (tst == -1 || tst == 0)))
3704 {
3705 if (overflow_infinity_range_p (vr0)
3706 || overflow_infinity_range_p (vr1))
3707 *strict_overflow_p = true;
3708 return boolean_true_node;
3709 }
3710
3711 /* If VR0 is to the right of VR1, return false. */
3712 tst = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p);
3713 if ((comp == LT_EXPR && (tst == 0 || tst == 1))
3714 || (comp == LE_EXPR && tst == 1))
3715 {
3716 if (overflow_infinity_range_p (vr0)
3717 || overflow_infinity_range_p (vr1))
3718 *strict_overflow_p = true;
3719 return boolean_false_node;
3720 }
3721
3722 /* Otherwise, we don't know. */
3723 return NULL_TREE;
3724 }
3725
3726 gcc_unreachable ();
3727 }
3728
3729
3730 /* Given a value range VR, a value VAL and a comparison code COMP, return
3731 BOOLEAN_TRUE_NODE if VR COMP VAL always returns true for all the
3732 values in VR. Return BOOLEAN_FALSE_NODE if the comparison
3733 always returns false. Return NULL_TREE if it is not always
3734 possible to determine the value of the comparison. Also set
3735 *STRICT_OVERFLOW_P to indicate whether a range with an overflow
3736 infinity was used in the test. */
3737
3738 static tree
3739 compare_range_with_value (enum tree_code comp, value_range_t *vr, tree val,
3740 bool *strict_overflow_p)
3741 {
3742 if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED)
3743 return NULL_TREE;
3744
3745 /* Anti-ranges need to be handled separately. */
3746 if (vr->type == VR_ANTI_RANGE)
3747 {
3748 /* For anti-ranges, the only predicates that we can compute at
3749 compile time are equality and inequality. */
3750 if (comp == GT_EXPR
3751 || comp == GE_EXPR
3752 || comp == LT_EXPR
3753 || comp == LE_EXPR)
3754 return NULL_TREE;
3755
3756 /* ~[VAL_1, VAL_2] OP VAL is known if VAL_1 <= VAL <= VAL_2. */
3757 if (value_inside_range (val, vr) == 1)
3758 return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node;
3759
3760 return NULL_TREE;
3761 }
3762
3763 if (!usable_range_p (vr, strict_overflow_p))
3764 return NULL_TREE;
3765
3766 if (comp == EQ_EXPR)
3767 {
3768 /* EQ_EXPR may only be computed if VR represents exactly
3769 one value. */
3770 if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0)
3771 {
3772 int cmp = compare_values_warnv (vr->min, val, strict_overflow_p);
3773 if (cmp == 0)
3774 return boolean_true_node;
3775 else if (cmp == -1 || cmp == 1 || cmp == 2)
3776 return boolean_false_node;
3777 }
3778 else if (compare_values_warnv (val, vr->min, strict_overflow_p) == -1
3779 || compare_values_warnv (vr->max, val, strict_overflow_p) == -1)
3780 return boolean_false_node;
3781
3782 return NULL_TREE;
3783 }
3784 else if (comp == NE_EXPR)
3785 {
3786 /* If VAL is not inside VR, then they are always different. */
3787 if (compare_values_warnv (vr->max, val, strict_overflow_p) == -1
3788 || compare_values_warnv (vr->min, val, strict_overflow_p) == 1)
3789 return boolean_true_node;
3790
3791 /* If VR represents exactly one value equal to VAL, then return
3792 false. */
3793 if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0
3794 && compare_values_warnv (vr->min, val, strict_overflow_p) == 0)
3795 return boolean_false_node;
3796
3797 /* Otherwise, they may or may not be different. */
3798 return NULL_TREE;
3799 }
3800 else if (comp == LT_EXPR || comp == LE_EXPR)
3801 {
3802 int tst;
3803
3804 /* If VR is to the left of VAL, return true. */
3805 tst = compare_values_warnv (vr->max, val, strict_overflow_p);
3806 if ((comp == LT_EXPR && tst == -1)
3807 || (comp == LE_EXPR && (tst == -1 || tst == 0)))
3808 {
3809 if (overflow_infinity_range_p (vr))
3810 *strict_overflow_p = true;
3811 return boolean_true_node;
3812 }
3813
3814 /* If VR is to the right of VAL, return false. */
3815 tst = compare_values_warnv (vr->min, val, strict_overflow_p);
3816 if ((comp == LT_EXPR && (tst == 0 || tst == 1))
3817 || (comp == LE_EXPR && tst == 1))
3818 {
3819 if (overflow_infinity_range_p (vr))
3820 *strict_overflow_p = true;
3821 return boolean_false_node;
3822 }
3823
3824 /* Otherwise, we don't know. */
3825 return NULL_TREE;
3826 }
3827 else if (comp == GT_EXPR || comp == GE_EXPR)
3828 {
3829 int tst;
3830
3831 /* If VR is to the right of VAL, return true. */
3832 tst = compare_values_warnv (vr->min, val, strict_overflow_p);
3833 if ((comp == GT_EXPR && tst == 1)
3834 || (comp == GE_EXPR && (tst == 0 || tst == 1)))
3835 {
3836 if (overflow_infinity_range_p (vr))
3837 *strict_overflow_p = true;
3838 return boolean_true_node;
3839 }
3840
3841 /* If VR is to the left of VAL, return false. */
3842 tst = compare_values_warnv (vr->max, val, strict_overflow_p);
3843 if ((comp == GT_EXPR && (tst == -1 || tst == 0))
3844 || (comp == GE_EXPR && tst == -1))
3845 {
3846 if (overflow_infinity_range_p (vr))
3847 *strict_overflow_p = true;
3848 return boolean_false_node;
3849 }
3850
3851 /* Otherwise, we don't know. */
3852 return NULL_TREE;
3853 }
3854
3855 gcc_unreachable ();
3856 }
3857
3858
3859 /* Debugging dumps. */
3860
3861 void dump_value_range (FILE *, value_range_t *);
3862 void debug_value_range (value_range_t *);
3863 void dump_all_value_ranges (FILE *);
3864 void debug_all_value_ranges (void);
3865 void dump_vr_equiv (FILE *, bitmap);
3866 void debug_vr_equiv (bitmap);
3867
3868
3869 /* Dump value range VR to FILE. */
3870
3871 void
3872 dump_value_range (FILE *file, value_range_t *vr)
3873 {
3874 if (vr == NULL)
3875 fprintf (file, "[]");
3876 else if (vr->type == VR_UNDEFINED)
3877 fprintf (file, "UNDEFINED");
3878 else if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
3879 {
3880 tree type = TREE_TYPE (vr->min);
3881
3882 fprintf (file, "%s[", (vr->type == VR_ANTI_RANGE) ? "~" : "");
3883
3884 if (is_negative_overflow_infinity (vr->min))
3885 fprintf (file, "-INF(OVF)");
3886 else if (INTEGRAL_TYPE_P (type)
3887 && !TYPE_UNSIGNED (type)
3888 && vrp_val_is_min (vr->min))
3889 fprintf (file, "-INF");
3890 else
3891 print_generic_expr (file, vr->min, 0);
3892
3893 fprintf (file, ", ");
3894
3895 if (is_positive_overflow_infinity (vr->max))
3896 fprintf (file, "+INF(OVF)");
3897 else if (INTEGRAL_TYPE_P (type)
3898 && vrp_val_is_max (vr->max))
3899 fprintf (file, "+INF");
3900 else
3901 print_generic_expr (file, vr->max, 0);
3902
3903 fprintf (file, "]");
3904
3905 if (vr->equiv)
3906 {
3907 bitmap_iterator bi;
3908 unsigned i, c = 0;
3909
3910 fprintf (file, " EQUIVALENCES: { ");
3911
3912 EXECUTE_IF_SET_IN_BITMAP (vr->equiv, 0, i, bi)
3913 {
3914 print_generic_expr (file, ssa_name (i), 0);
3915 fprintf (file, " ");
3916 c++;
3917 }
3918
3919 fprintf (file, "} (%u elements)", c);
3920 }
3921 }
3922 else if (vr->type == VR_VARYING)
3923 fprintf (file, "VARYING");
3924 else
3925 fprintf (file, "INVALID RANGE");
3926 }
3927
3928
3929 /* Dump value range VR to stderr. */
3930
3931 DEBUG_FUNCTION void
3932 debug_value_range (value_range_t *vr)
3933 {
3934 dump_value_range (stderr, vr);
3935 fprintf (stderr, "\n");
3936 }
3937
3938
3939 /* Dump value ranges of all SSA_NAMEs to FILE. */
3940
3941 void
3942 dump_all_value_ranges (FILE *file)
3943 {
3944 size_t i;
3945
3946 for (i = 0; i < num_vr_values; i++)
3947 {
3948 if (vr_value[i])
3949 {
3950 print_generic_expr (file, ssa_name (i), 0);
3951 fprintf (file, ": ");
3952 dump_value_range (file, vr_value[i]);
3953 fprintf (file, "\n");
3954 }
3955 }
3956
3957 fprintf (file, "\n");
3958 }
3959
3960
3961 /* Dump all value ranges to stderr. */
3962
3963 DEBUG_FUNCTION void
3964 debug_all_value_ranges (void)
3965 {
3966 dump_all_value_ranges (stderr);
3967 }
3968
3969
3970 /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
3971 create a new SSA name N and return the assertion assignment
3972 'V = ASSERT_EXPR <V, V OP W>'. */
3973
3974 static gimple
3975 build_assert_expr_for (tree cond, tree v)
3976 {
3977 tree n;
3978 gimple assertion;
3979
3980 gcc_assert (TREE_CODE (v) == SSA_NAME);
3981 n = duplicate_ssa_name (v, NULL);
3982
3983 if (COMPARISON_CLASS_P (cond))
3984 {
3985 tree a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond);
3986 assertion = gimple_build_assign (n, a);
3987 }
3988 else if (TREE_CODE (cond) == SSA_NAME)
3989 {
3990 /* Given V, build the assignment N = true. */
3991 gcc_assert (v == cond);
3992 assertion = gimple_build_assign (n, boolean_true_node);
3993 }
3994 else
3995 gcc_unreachable ();
3996
3997 SSA_NAME_DEF_STMT (n) = assertion;
3998
3999 /* The new ASSERT_EXPR, creates a new SSA name that replaces the
4000 operand of the ASSERT_EXPR. Register the new name and the old one
4001 in the replacement table so that we can fix the SSA web after
4002 adding all the ASSERT_EXPRs. */
4003 register_new_name_mapping (n, v);
4004
4005 return assertion;
4006 }
4007
4008
4009 /* Return false if EXPR is a predicate expression involving floating
4010 point values. */
4011
4012 static inline bool
4013 fp_predicate (gimple stmt)
4014 {
4015 GIMPLE_CHECK (stmt, GIMPLE_COND);
4016
4017 return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)));
4018 }
4019
4020
4021 /* If the range of values taken by OP can be inferred after STMT executes,
4022 return the comparison code (COMP_CODE_P) and value (VAL_P) that
4023 describes the inferred range. Return true if a range could be
4024 inferred. */
4025
4026 static bool
4027 infer_value_range (gimple stmt, tree op, enum tree_code *comp_code_p, tree *val_p)
4028 {
4029 *val_p = NULL_TREE;
4030 *comp_code_p = ERROR_MARK;
4031
4032 /* Do not attempt to infer anything in names that flow through
4033 abnormal edges. */
4034 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op))
4035 return false;
4036
4037 /* Similarly, don't infer anything from statements that may throw
4038 exceptions. */
4039 if (stmt_could_throw_p (stmt))
4040 return false;
4041
4042 /* If STMT is the last statement of a basic block with no
4043 successors, there is no point inferring anything about any of its
4044 operands. We would not be able to find a proper insertion point
4045 for the assertion, anyway. */
4046 if (stmt_ends_bb_p (stmt) && EDGE_COUNT (gimple_bb (stmt)->succs) == 0)
4047 return false;
4048
4049 /* We can only assume that a pointer dereference will yield
4050 non-NULL if -fdelete-null-pointer-checks is enabled. */
4051 if (flag_delete_null_pointer_checks
4052 && POINTER_TYPE_P (TREE_TYPE (op))
4053 && gimple_code (stmt) != GIMPLE_ASM)
4054 {
4055 unsigned num_uses, num_loads, num_stores;
4056
4057 count_uses_and_derefs (op, stmt, &num_uses, &num_loads, &num_stores);
4058 if (num_loads + num_stores > 0)
4059 {
4060 *val_p = build_int_cst (TREE_TYPE (op), 0);
4061 *comp_code_p = NE_EXPR;
4062 return true;
4063 }
4064 }
4065
4066 return false;
4067 }
4068
4069
4070 void dump_asserts_for (FILE *, tree);
4071 void debug_asserts_for (tree);
4072 void dump_all_asserts (FILE *);
4073 void debug_all_asserts (void);
4074
4075 /* Dump all the registered assertions for NAME to FILE. */
4076
4077 void
4078 dump_asserts_for (FILE *file, tree name)
4079 {
4080 assert_locus_t loc;
4081
4082 fprintf (file, "Assertions to be inserted for ");
4083 print_generic_expr (file, name, 0);
4084 fprintf (file, "\n");
4085
4086 loc = asserts_for[SSA_NAME_VERSION (name)];
4087 while (loc)
4088 {
4089 fprintf (file, "\t");
4090 print_gimple_stmt (file, gsi_stmt (loc->si), 0, 0);
4091 fprintf (file, "\n\tBB #%d", loc->bb->index);
4092 if (loc->e)
4093 {
4094 fprintf (file, "\n\tEDGE %d->%d", loc->e->src->index,
4095 loc->e->dest->index);
4096 dump_edge_info (file, loc->e, 0);
4097 }
4098 fprintf (file, "\n\tPREDICATE: ");
4099 print_generic_expr (file, name, 0);
4100 fprintf (file, " %s ", tree_code_name[(int)loc->comp_code]);
4101 print_generic_expr (file, loc->val, 0);
4102 fprintf (file, "\n\n");
4103 loc = loc->next;
4104 }
4105
4106 fprintf (file, "\n");
4107 }
4108
4109
4110 /* Dump all the registered assertions for NAME to stderr. */
4111
4112 DEBUG_FUNCTION void
4113 debug_asserts_for (tree name)
4114 {
4115 dump_asserts_for (stderr, name);
4116 }
4117
4118
4119 /* Dump all the registered assertions for all the names to FILE. */
4120
4121 void
4122 dump_all_asserts (FILE *file)
4123 {
4124 unsigned i;
4125 bitmap_iterator bi;
4126
4127 fprintf (file, "\nASSERT_EXPRs to be inserted\n\n");
4128 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
4129 dump_asserts_for (file, ssa_name (i));
4130 fprintf (file, "\n");
4131 }
4132
4133
4134 /* Dump all the registered assertions for all the names to stderr. */
4135
4136 DEBUG_FUNCTION void
4137 debug_all_asserts (void)
4138 {
4139 dump_all_asserts (stderr);
4140 }
4141
4142
4143 /* If NAME doesn't have an ASSERT_EXPR registered for asserting
4144 'EXPR COMP_CODE VAL' at a location that dominates block BB or
4145 E->DEST, then register this location as a possible insertion point
4146 for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
4147
4148 BB, E and SI provide the exact insertion point for the new
4149 ASSERT_EXPR. If BB is NULL, then the ASSERT_EXPR is to be inserted
4150 on edge E. Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
4151 BB. If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
4152 must not be NULL. */
4153
4154 static void
4155 register_new_assert_for (tree name, tree expr,
4156 enum tree_code comp_code,
4157 tree val,
4158 basic_block bb,
4159 edge e,
4160 gimple_stmt_iterator si)
4161 {
4162 assert_locus_t n, loc, last_loc;
4163 basic_block dest_bb;
4164
4165 gcc_checking_assert (bb == NULL || e == NULL);
4166
4167 if (e == NULL)
4168 gcc_checking_assert (gimple_code (gsi_stmt (si)) != GIMPLE_COND
4169 && gimple_code (gsi_stmt (si)) != GIMPLE_SWITCH);
4170
4171 /* Never build an assert comparing against an integer constant with
4172 TREE_OVERFLOW set. This confuses our undefined overflow warning
4173 machinery. */
4174 if (TREE_CODE (val) == INTEGER_CST
4175 && TREE_OVERFLOW (val))
4176 val = build_int_cst_wide (TREE_TYPE (val),
4177 TREE_INT_CST_LOW (val), TREE_INT_CST_HIGH (val));
4178
4179 /* The new assertion A will be inserted at BB or E. We need to
4180 determine if the new location is dominated by a previously
4181 registered location for A. If we are doing an edge insertion,
4182 assume that A will be inserted at E->DEST. Note that this is not
4183 necessarily true.
4184
4185 If E is a critical edge, it will be split. But even if E is
4186 split, the new block will dominate the same set of blocks that
4187 E->DEST dominates.
4188
4189 The reverse, however, is not true, blocks dominated by E->DEST
4190 will not be dominated by the new block created to split E. So,
4191 if the insertion location is on a critical edge, we will not use
4192 the new location to move another assertion previously registered
4193 at a block dominated by E->DEST. */
4194 dest_bb = (bb) ? bb : e->dest;
4195
4196 /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and
4197 VAL at a block dominating DEST_BB, then we don't need to insert a new
4198 one. Similarly, if the same assertion already exists at a block
4199 dominated by DEST_BB and the new location is not on a critical
4200 edge, then update the existing location for the assertion (i.e.,
4201 move the assertion up in the dominance tree).
4202
4203 Note, this is implemented as a simple linked list because there
4204 should not be more than a handful of assertions registered per
4205 name. If this becomes a performance problem, a table hashed by
4206 COMP_CODE and VAL could be implemented. */
4207 loc = asserts_for[SSA_NAME_VERSION (name)];
4208 last_loc = loc;
4209 while (loc)
4210 {
4211 if (loc->comp_code == comp_code
4212 && (loc->val == val
4213 || operand_equal_p (loc->val, val, 0))
4214 && (loc->expr == expr
4215 || operand_equal_p (loc->expr, expr, 0)))
4216 {
4217 /* If the assertion NAME COMP_CODE VAL has already been
4218 registered at a basic block that dominates DEST_BB, then
4219 we don't need to insert the same assertion again. Note
4220 that we don't check strict dominance here to avoid
4221 replicating the same assertion inside the same basic
4222 block more than once (e.g., when a pointer is
4223 dereferenced several times inside a block).
4224
4225 An exception to this rule are edge insertions. If the
4226 new assertion is to be inserted on edge E, then it will
4227 dominate all the other insertions that we may want to
4228 insert in DEST_BB. So, if we are doing an edge
4229 insertion, don't do this dominance check. */
4230 if (e == NULL
4231 && dominated_by_p (CDI_DOMINATORS, dest_bb, loc->bb))
4232 return;
4233
4234 /* Otherwise, if E is not a critical edge and DEST_BB
4235 dominates the existing location for the assertion, move
4236 the assertion up in the dominance tree by updating its
4237 location information. */
4238 if ((e == NULL || !EDGE_CRITICAL_P (e))
4239 && dominated_by_p (CDI_DOMINATORS, loc->bb, dest_bb))
4240 {
4241 loc->bb = dest_bb;
4242 loc->e = e;
4243 loc->si = si;
4244 return;
4245 }
4246 }
4247
4248 /* Update the last node of the list and move to the next one. */
4249 last_loc = loc;
4250 loc = loc->next;
4251 }
4252
4253 /* If we didn't find an assertion already registered for
4254 NAME COMP_CODE VAL, add a new one at the end of the list of
4255 assertions associated with NAME. */
4256 n = XNEW (struct assert_locus_d);
4257 n->bb = dest_bb;
4258 n->e = e;
4259 n->si = si;
4260 n->comp_code = comp_code;
4261 n->val = val;
4262 n->expr = expr;
4263 n->next = NULL;
4264
4265 if (last_loc)
4266 last_loc->next = n;
4267 else
4268 asserts_for[SSA_NAME_VERSION (name)] = n;
4269
4270 bitmap_set_bit (need_assert_for, SSA_NAME_VERSION (name));
4271 }
4272
4273 /* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME.
4274 Extract a suitable test code and value and store them into *CODE_P and
4275 *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P.
4276
4277 If no extraction was possible, return FALSE, otherwise return TRUE.
4278
4279 If INVERT is true, then we invert the result stored into *CODE_P. */
4280
4281 static bool
4282 extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code,
4283 tree cond_op0, tree cond_op1,
4284 bool invert, enum tree_code *code_p,
4285 tree *val_p)
4286 {
4287 enum tree_code comp_code;
4288 tree val;
4289
4290 /* Otherwise, we have a comparison of the form NAME COMP VAL
4291 or VAL COMP NAME. */
4292 if (name == cond_op1)
4293 {
4294 /* If the predicate is of the form VAL COMP NAME, flip
4295 COMP around because we need to register NAME as the
4296 first operand in the predicate. */
4297 comp_code = swap_tree_comparison (cond_code);
4298 val = cond_op0;
4299 }
4300 else
4301 {
4302 /* The comparison is of the form NAME COMP VAL, so the
4303 comparison code remains unchanged. */
4304 comp_code = cond_code;
4305 val = cond_op1;
4306 }
4307
4308 /* Invert the comparison code as necessary. */
4309 if (invert)
4310 comp_code = invert_tree_comparison (comp_code, 0);
4311
4312 /* VRP does not handle float types. */
4313 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (val)))
4314 return false;
4315
4316 /* Do not register always-false predicates.
4317 FIXME: this works around a limitation in fold() when dealing with
4318 enumerations. Given 'enum { N1, N2 } x;', fold will not
4319 fold 'if (x > N2)' to 'if (0)'. */
4320 if ((comp_code == GT_EXPR || comp_code == LT_EXPR)
4321 && INTEGRAL_TYPE_P (TREE_TYPE (val)))
4322 {
4323 tree min = TYPE_MIN_VALUE (TREE_TYPE (val));
4324 tree max = TYPE_MAX_VALUE (TREE_TYPE (val));
4325
4326 if (comp_code == GT_EXPR
4327 && (!max
4328 || compare_values (val, max) == 0))
4329 return false;
4330
4331 if (comp_code == LT_EXPR
4332 && (!min
4333 || compare_values (val, min) == 0))
4334 return false;
4335 }
4336 *code_p = comp_code;
4337 *val_p = val;
4338 return true;
4339 }
4340
4341 /* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any
4342 (otherwise return VAL). VAL and MASK must be zero-extended for
4343 precision PREC. If SGNBIT is non-zero, first xor VAL with SGNBIT
4344 (to transform signed values into unsigned) and at the end xor
4345 SGNBIT back. */
4346
4347 static double_int
4348 masked_increment (double_int val, double_int mask, double_int sgnbit,
4349 unsigned int prec)
4350 {
4351 double_int bit = double_int_one, res;
4352 unsigned int i;
4353
4354 val = double_int_xor (val, sgnbit);
4355 for (i = 0; i < prec; i++, bit = double_int_add (bit, bit))
4356 {
4357 res = mask;
4358 if (double_int_zero_p (double_int_and (res, bit)))
4359 continue;
4360 res = double_int_sub (bit, double_int_one);
4361 res = double_int_and_not (double_int_add (val, bit), res);
4362 res = double_int_and (res, mask);
4363 if (double_int_ucmp (res, val) > 0)
4364 return double_int_xor (res, sgnbit);
4365 }
4366 return double_int_xor (val, sgnbit);
4367 }
4368
4369 /* Try to register an edge assertion for SSA name NAME on edge E for
4370 the condition COND contributing to the conditional jump pointed to by BSI.
4371 Invert the condition COND if INVERT is true.
4372 Return true if an assertion for NAME could be registered. */
4373
4374 static bool
4375 register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi,
4376 enum tree_code cond_code,
4377 tree cond_op0, tree cond_op1, bool invert)
4378 {
4379 tree val;
4380 enum tree_code comp_code;
4381 bool retval = false;
4382
4383 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
4384 cond_op0,
4385 cond_op1,
4386 invert, &comp_code, &val))
4387 return false;
4388
4389 /* Only register an ASSERT_EXPR if NAME was found in the sub-graph
4390 reachable from E. */
4391 if (live_on_edge (e, name)
4392 && !has_single_use (name))
4393 {
4394 register_new_assert_for (name, name, comp_code, val, NULL, e, bsi);
4395 retval = true;
4396 }
4397
4398 /* In the case of NAME <= CST and NAME being defined as
4399 NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2
4400 and NAME2 <= CST - CST2. We can do the same for NAME > CST.
4401 This catches range and anti-range tests. */
4402 if ((comp_code == LE_EXPR
4403 || comp_code == GT_EXPR)
4404 && TREE_CODE (val) == INTEGER_CST
4405 && TYPE_UNSIGNED (TREE_TYPE (val)))
4406 {
4407 gimple def_stmt = SSA_NAME_DEF_STMT (name);
4408 tree cst2 = NULL_TREE, name2 = NULL_TREE, name3 = NULL_TREE;
4409
4410 /* Extract CST2 from the (optional) addition. */
4411 if (is_gimple_assign (def_stmt)
4412 && gimple_assign_rhs_code (def_stmt) == PLUS_EXPR)
4413 {
4414 name2 = gimple_assign_rhs1 (def_stmt);
4415 cst2 = gimple_assign_rhs2 (def_stmt);
4416 if (TREE_CODE (name2) == SSA_NAME
4417 && TREE_CODE (cst2) == INTEGER_CST)
4418 def_stmt = SSA_NAME_DEF_STMT (name2);
4419 }
4420
4421 /* Extract NAME2 from the (optional) sign-changing cast. */
4422 if (gimple_assign_cast_p (def_stmt))
4423 {
4424 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt))
4425 && ! TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
4426 && (TYPE_PRECISION (gimple_expr_type (def_stmt))
4427 == TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))))
4428 name3 = gimple_assign_rhs1 (def_stmt);
4429 }
4430
4431 /* If name3 is used later, create an ASSERT_EXPR for it. */
4432 if (name3 != NULL_TREE
4433 && TREE_CODE (name3) == SSA_NAME
4434 && (cst2 == NULL_TREE
4435 || TREE_CODE (cst2) == INTEGER_CST)
4436 && INTEGRAL_TYPE_P (TREE_TYPE (name3))
4437 && live_on_edge (e, name3)
4438 && !has_single_use (name3))
4439 {
4440 tree tmp;
4441
4442 /* Build an expression for the range test. */
4443 tmp = build1 (NOP_EXPR, TREE_TYPE (name), name3);
4444 if (cst2 != NULL_TREE)
4445 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
4446
4447 if (dump_file)
4448 {
4449 fprintf (dump_file, "Adding assert for ");
4450 print_generic_expr (dump_file, name3, 0);
4451 fprintf (dump_file, " from ");
4452 print_generic_expr (dump_file, tmp, 0);
4453 fprintf (dump_file, "\n");
4454 }
4455
4456 register_new_assert_for (name3, tmp, comp_code, val, NULL, e, bsi);
4457
4458 retval = true;
4459 }
4460
4461 /* If name2 is used later, create an ASSERT_EXPR for it. */
4462 if (name2 != NULL_TREE
4463 && TREE_CODE (name2) == SSA_NAME
4464 && TREE_CODE (cst2) == INTEGER_CST
4465 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
4466 && live_on_edge (e, name2)
4467 && !has_single_use (name2))
4468 {
4469 tree tmp;
4470
4471 /* Build an expression for the range test. */
4472 tmp = name2;
4473 if (TREE_TYPE (name) != TREE_TYPE (name2))
4474 tmp = build1 (NOP_EXPR, TREE_TYPE (name), tmp);
4475 if (cst2 != NULL_TREE)
4476 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
4477
4478 if (dump_file)
4479 {
4480 fprintf (dump_file, "Adding assert for ");
4481 print_generic_expr (dump_file, name2, 0);
4482 fprintf (dump_file, " from ");
4483 print_generic_expr (dump_file, tmp, 0);
4484 fprintf (dump_file, "\n");
4485 }
4486
4487 register_new_assert_for (name2, tmp, comp_code, val, NULL, e, bsi);
4488
4489 retval = true;
4490 }
4491 }
4492
4493 if (TREE_CODE_CLASS (comp_code) == tcc_comparison
4494 && TREE_CODE (val) == INTEGER_CST)
4495 {
4496 gimple def_stmt = SSA_NAME_DEF_STMT (name);
4497 tree name2 = NULL_TREE, names[2], cst2 = NULL_TREE;
4498 tree val2 = NULL_TREE;
4499 double_int mask = double_int_zero;
4500 unsigned int prec = TYPE_PRECISION (TREE_TYPE (val));
4501
4502 /* Add asserts for NAME cmp CST and NAME being defined
4503 as NAME = (int) NAME2. */
4504 if (!TYPE_UNSIGNED (TREE_TYPE (val))
4505 && (comp_code == LE_EXPR || comp_code == LT_EXPR
4506 || comp_code == GT_EXPR || comp_code == GE_EXPR)
4507 && gimple_assign_cast_p (def_stmt))
4508 {
4509 name2 = gimple_assign_rhs1 (def_stmt);
4510 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt))
4511 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
4512 && TYPE_UNSIGNED (TREE_TYPE (name2))
4513 && prec == TYPE_PRECISION (TREE_TYPE (name2))
4514 && (comp_code == LE_EXPR || comp_code == GT_EXPR
4515 || !tree_int_cst_equal (val,
4516 TYPE_MIN_VALUE (TREE_TYPE (val))))
4517 && live_on_edge (e, name2)
4518 && !has_single_use (name2))
4519 {
4520 tree tmp, cst;
4521 enum tree_code new_comp_code = comp_code;
4522
4523 cst = fold_convert (TREE_TYPE (name2),
4524 TYPE_MIN_VALUE (TREE_TYPE (val)));
4525 /* Build an expression for the range test. */
4526 tmp = build2 (PLUS_EXPR, TREE_TYPE (name2), name2, cst);
4527 cst = fold_build2 (PLUS_EXPR, TREE_TYPE (name2), cst,
4528 fold_convert (TREE_TYPE (name2), val));
4529 if (comp_code == LT_EXPR || comp_code == GE_EXPR)
4530 {
4531 new_comp_code = comp_code == LT_EXPR ? LE_EXPR : GT_EXPR;
4532 cst = fold_build2 (MINUS_EXPR, TREE_TYPE (name2), cst,
4533 build_int_cst (TREE_TYPE (name2), 1));
4534 }
4535
4536 if (dump_file)
4537 {
4538 fprintf (dump_file, "Adding assert for ");
4539 print_generic_expr (dump_file, name2, 0);
4540 fprintf (dump_file, " from ");
4541 print_generic_expr (dump_file, tmp, 0);
4542 fprintf (dump_file, "\n");
4543 }
4544
4545 register_new_assert_for (name2, tmp, new_comp_code, cst, NULL,
4546 e, bsi);
4547
4548 retval = true;
4549 }
4550 }
4551
4552 /* Add asserts for NAME cmp CST and NAME being defined as
4553 NAME = NAME2 >> CST2.
4554
4555 Extract CST2 from the right shift. */
4556 if (is_gimple_assign (def_stmt)
4557 && gimple_assign_rhs_code (def_stmt) == RSHIFT_EXPR)
4558 {
4559 name2 = gimple_assign_rhs1 (def_stmt);
4560 cst2 = gimple_assign_rhs2 (def_stmt);
4561 if (TREE_CODE (name2) == SSA_NAME
4562 && host_integerp (cst2, 1)
4563 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
4564 && IN_RANGE (tree_low_cst (cst2, 1), 1, prec - 1)
4565 && prec <= 2 * HOST_BITS_PER_WIDE_INT
4566 && live_on_edge (e, name2)
4567 && !has_single_use (name2))
4568 {
4569 mask = double_int_mask (tree_low_cst (cst2, 1));
4570 val2 = fold_binary (LSHIFT_EXPR, TREE_TYPE (val), val, cst2);
4571 }
4572 }
4573 if (val2 != NULL_TREE
4574 && TREE_CODE (val2) == INTEGER_CST
4575 && simple_cst_equal (fold_build2 (RSHIFT_EXPR,
4576 TREE_TYPE (val),
4577 val2, cst2), val))
4578 {
4579 enum tree_code new_comp_code = comp_code;
4580 tree tmp, new_val;
4581
4582 tmp = name2;
4583 if (comp_code == EQ_EXPR || comp_code == NE_EXPR)
4584 {
4585 if (!TYPE_UNSIGNED (TREE_TYPE (val)))
4586 {
4587 tree type = build_nonstandard_integer_type (prec, 1);
4588 tmp = build1 (NOP_EXPR, type, name2);
4589 val2 = fold_convert (type, val2);
4590 }
4591 tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (tmp), tmp, val2);
4592 new_val = double_int_to_tree (TREE_TYPE (tmp), mask);
4593 new_comp_code = comp_code == EQ_EXPR ? LE_EXPR : GT_EXPR;
4594 }
4595 else if (comp_code == LT_EXPR || comp_code == GE_EXPR)
4596 new_val = val2;
4597 else
4598 {
4599 mask = double_int_ior (tree_to_double_int (val2), mask);
4600 if (double_int_minus_one_p (double_int_sext (mask, prec)))
4601 new_val = NULL_TREE;
4602 else
4603 new_val = double_int_to_tree (TREE_TYPE (val2), mask);
4604 }
4605
4606 if (new_val)
4607 {
4608 if (dump_file)
4609 {
4610 fprintf (dump_file, "Adding assert for ");
4611 print_generic_expr (dump_file, name2, 0);
4612 fprintf (dump_file, " from ");
4613 print_generic_expr (dump_file, tmp, 0);
4614 fprintf (dump_file, "\n");
4615 }
4616
4617 register_new_assert_for (name2, tmp, new_comp_code, new_val,
4618 NULL, e, bsi);
4619 retval = true;
4620 }
4621 }
4622
4623 /* Add asserts for NAME cmp CST and NAME being defined as
4624 NAME = NAME2 & CST2.
4625
4626 Extract CST2 from the and. */
4627 names[0] = NULL_TREE;
4628 names[1] = NULL_TREE;
4629 cst2 = NULL_TREE;
4630 if (is_gimple_assign (def_stmt)
4631 && gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR)
4632 {
4633 name2 = gimple_assign_rhs1 (def_stmt);
4634 cst2 = gimple_assign_rhs2 (def_stmt);
4635 if (TREE_CODE (name2) == SSA_NAME
4636 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
4637 && TREE_CODE (cst2) == INTEGER_CST
4638 && !integer_zerop (cst2)
4639 && prec <= 2 * HOST_BITS_PER_WIDE_INT
4640 && (prec > 1
4641 || TYPE_UNSIGNED (TREE_TYPE (val))))
4642 {
4643 gimple def_stmt2 = SSA_NAME_DEF_STMT (name2);
4644 if (gimple_assign_cast_p (def_stmt2))
4645 {
4646 names[1] = gimple_assign_rhs1 (def_stmt2);
4647 if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt2))
4648 || !INTEGRAL_TYPE_P (TREE_TYPE (names[1]))
4649 || (TYPE_PRECISION (TREE_TYPE (name2))
4650 != TYPE_PRECISION (TREE_TYPE (names[1])))
4651 || !live_on_edge (e, names[1])
4652 || has_single_use (names[1]))
4653 names[1] = NULL_TREE;
4654 }
4655 if (live_on_edge (e, name2)
4656 && !has_single_use (name2))
4657 names[0] = name2;
4658 }
4659 }
4660 if (names[0] || names[1])
4661 {
4662 double_int minv, maxv = double_int_zero, valv, cst2v;
4663 double_int tem, sgnbit;
4664 bool valid_p = false, valn = false, cst2n = false;
4665 enum tree_code ccode = comp_code;
4666
4667 valv = double_int_zext (tree_to_double_int (val), prec);
4668 cst2v = double_int_zext (tree_to_double_int (cst2), prec);
4669 if (!TYPE_UNSIGNED (TREE_TYPE (val)))
4670 {
4671 valn = double_int_negative_p (double_int_sext (valv, prec));
4672 cst2n = double_int_negative_p (double_int_sext (cst2v, prec));
4673 }
4674 /* If CST2 doesn't have most significant bit set,
4675 but VAL is negative, we have comparison like
4676 if ((x & 0x123) > -4) (always true). Just give up. */
4677 if (!cst2n && valn)
4678 ccode = ERROR_MARK;
4679 if (cst2n)
4680 sgnbit = double_int_zext (double_int_lshift (double_int_one,
4681 prec - 1, prec,
4682 false), prec);
4683 else
4684 sgnbit = double_int_zero;
4685 minv = double_int_and (valv, cst2v);
4686 switch (ccode)
4687 {
4688 case EQ_EXPR:
4689 /* Minimum unsigned value for equality is VAL & CST2
4690 (should be equal to VAL, otherwise we probably should
4691 have folded the comparison into false) and
4692 maximum unsigned value is VAL | ~CST2. */
4693 maxv = double_int_ior (valv, double_int_not (cst2v));
4694 maxv = double_int_zext (maxv, prec);
4695 valid_p = true;
4696 break;
4697 case NE_EXPR:
4698 tem = double_int_ior (valv, double_int_not (cst2v));
4699 tem = double_int_zext (tem, prec);
4700 /* If VAL is 0, handle (X & CST2) != 0 as (X & CST2) > 0U. */
4701 if (double_int_zero_p (valv))
4702 {
4703 cst2n = false;
4704 sgnbit = double_int_zero;
4705 goto gt_expr;
4706 }
4707 /* If (VAL | ~CST2) is all ones, handle it as
4708 (X & CST2) < VAL. */
4709 if (double_int_equal_p (tem, double_int_mask (prec)))
4710 {
4711 cst2n = false;
4712 valn = false;
4713 sgnbit = double_int_zero;
4714 goto lt_expr;
4715 }
4716 if (!cst2n
4717 && double_int_negative_p (double_int_sext (cst2v, prec)))
4718 sgnbit = double_int_zext (double_int_lshift (double_int_one,
4719 prec - 1, prec,
4720 false), prec);
4721 if (!double_int_zero_p (sgnbit))
4722 {
4723 if (double_int_equal_p (valv, sgnbit))
4724 {
4725 cst2n = true;
4726 valn = true;
4727 goto gt_expr;
4728 }
4729 if (double_int_equal_p (tem, double_int_mask (prec - 1)))
4730 {
4731 cst2n = true;
4732 goto lt_expr;
4733 }
4734 if (!cst2n)
4735 sgnbit = double_int_zero;
4736 }
4737 break;
4738 case GE_EXPR:
4739 /* Minimum unsigned value for >= if (VAL & CST2) == VAL
4740 is VAL and maximum unsigned value is ~0. For signed
4741 comparison, if CST2 doesn't have most significant bit
4742 set, handle it similarly. If CST2 has MSB set,
4743 the minimum is the same, and maximum is ~0U/2. */
4744 if (!double_int_equal_p (minv, valv))
4745 {
4746 /* If (VAL & CST2) != VAL, X & CST2 can't be equal to
4747 VAL. */
4748 minv = masked_increment (valv, cst2v, sgnbit, prec);
4749 if (double_int_equal_p (minv, valv))
4750 break;
4751 }
4752 maxv = double_int_mask (prec - (cst2n ? 1 : 0));
4753 valid_p = true;
4754 break;
4755 case GT_EXPR:
4756 gt_expr:
4757 /* Find out smallest MINV where MINV > VAL
4758 && (MINV & CST2) == MINV, if any. If VAL is signed and
4759 CST2 has MSB set, compute it biased by 1 << (prec - 1). */
4760 minv = masked_increment (valv, cst2v, sgnbit, prec);
4761 if (double_int_equal_p (minv, valv))
4762 break;
4763 maxv = double_int_mask (prec - (cst2n ? 1 : 0));
4764 valid_p = true;
4765 break;
4766 case LE_EXPR:
4767 /* Minimum unsigned value for <= is 0 and maximum
4768 unsigned value is VAL | ~CST2 if (VAL & CST2) == VAL.
4769 Otherwise, find smallest VAL2 where VAL2 > VAL
4770 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
4771 as maximum.
4772 For signed comparison, if CST2 doesn't have most
4773 significant bit set, handle it similarly. If CST2 has
4774 MSB set, the maximum is the same and minimum is INT_MIN. */
4775 if (double_int_equal_p (minv, valv))
4776 maxv = valv;
4777 else
4778 {
4779 maxv = masked_increment (valv, cst2v, sgnbit, prec);
4780 if (double_int_equal_p (maxv, valv))
4781 break;
4782 maxv = double_int_sub (maxv, double_int_one);
4783 }
4784 maxv = double_int_ior (maxv, double_int_not (cst2v));
4785 maxv = double_int_zext (maxv, prec);
4786 minv = sgnbit;
4787 valid_p = true;
4788 break;
4789 case LT_EXPR:
4790 lt_expr:
4791 /* Minimum unsigned value for < is 0 and maximum
4792 unsigned value is (VAL-1) | ~CST2 if (VAL & CST2) == VAL.
4793 Otherwise, find smallest VAL2 where VAL2 > VAL
4794 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
4795 as maximum.
4796 For signed comparison, if CST2 doesn't have most
4797 significant bit set, handle it similarly. If CST2 has
4798 MSB set, the maximum is the same and minimum is INT_MIN. */
4799 if (double_int_equal_p (minv, valv))
4800 {
4801 if (double_int_equal_p (valv, sgnbit))
4802 break;
4803 maxv = valv;
4804 }
4805 else
4806 {
4807 maxv = masked_increment (valv, cst2v, sgnbit, prec);
4808 if (double_int_equal_p (maxv, valv))
4809 break;
4810 }
4811 maxv = double_int_sub (maxv, double_int_one);
4812 maxv = double_int_ior (maxv, double_int_not (cst2v));
4813 maxv = double_int_zext (maxv, prec);
4814 minv = sgnbit;
4815 valid_p = true;
4816 break;
4817 default:
4818 break;
4819 }
4820 if (valid_p
4821 && !double_int_equal_p (double_int_zext (double_int_sub (maxv,
4822 minv),
4823 prec),
4824 double_int_mask (prec)))
4825 {
4826 tree tmp, new_val, type;
4827 int i;
4828
4829 for (i = 0; i < 2; i++)
4830 if (names[i])
4831 {
4832 double_int maxv2 = maxv;
4833 tmp = names[i];
4834 type = TREE_TYPE (names[i]);
4835 if (!TYPE_UNSIGNED (type))
4836 {
4837 type = build_nonstandard_integer_type (prec, 1);
4838 tmp = build1 (NOP_EXPR, type, names[i]);
4839 }
4840 if (!double_int_zero_p (minv))
4841 {
4842 tmp = build2 (PLUS_EXPR, type, tmp,
4843 double_int_to_tree (type,
4844 double_int_neg (minv)));
4845 maxv2 = double_int_sub (maxv, minv);
4846 }
4847 new_val = double_int_to_tree (type, maxv2);
4848
4849 if (dump_file)
4850 {
4851 fprintf (dump_file, "Adding assert for ");
4852 print_generic_expr (dump_file, names[i], 0);
4853 fprintf (dump_file, " from ");
4854 print_generic_expr (dump_file, tmp, 0);
4855 fprintf (dump_file, "\n");
4856 }
4857
4858 register_new_assert_for (names[i], tmp, LE_EXPR,
4859 new_val, NULL, e, bsi);
4860 retval = true;
4861 }
4862 }
4863 }
4864 }
4865
4866 return retval;
4867 }
4868
4869 /* OP is an operand of a truth value expression which is known to have
4870 a particular value. Register any asserts for OP and for any
4871 operands in OP's defining statement.
4872
4873 If CODE is EQ_EXPR, then we want to register OP is zero (false),
4874 if CODE is NE_EXPR, then we want to register OP is nonzero (true). */
4875
4876 static bool
4877 register_edge_assert_for_1 (tree op, enum tree_code code,
4878 edge e, gimple_stmt_iterator bsi)
4879 {
4880 bool retval = false;
4881 gimple op_def;
4882 tree val;
4883 enum tree_code rhs_code;
4884
4885 /* We only care about SSA_NAMEs. */
4886 if (TREE_CODE (op) != SSA_NAME)
4887 return false;
4888
4889 /* We know that OP will have a zero or nonzero value. If OP is used
4890 more than once go ahead and register an assert for OP.
4891
4892 The FOUND_IN_SUBGRAPH support is not helpful in this situation as
4893 it will always be set for OP (because OP is used in a COND_EXPR in
4894 the subgraph). */
4895 if (!has_single_use (op))
4896 {
4897 val = build_int_cst (TREE_TYPE (op), 0);
4898 register_new_assert_for (op, op, code, val, NULL, e, bsi);
4899 retval = true;
4900 }
4901
4902 /* Now look at how OP is set. If it's set from a comparison,
4903 a truth operation or some bit operations, then we may be able
4904 to register information about the operands of that assignment. */
4905 op_def = SSA_NAME_DEF_STMT (op);
4906 if (gimple_code (op_def) != GIMPLE_ASSIGN)
4907 return retval;
4908
4909 rhs_code = gimple_assign_rhs_code (op_def);
4910
4911 if (TREE_CODE_CLASS (rhs_code) == tcc_comparison)
4912 {
4913 bool invert = (code == EQ_EXPR ? true : false);
4914 tree op0 = gimple_assign_rhs1 (op_def);
4915 tree op1 = gimple_assign_rhs2 (op_def);
4916
4917 if (TREE_CODE (op0) == SSA_NAME)
4918 retval |= register_edge_assert_for_2 (op0, e, bsi, rhs_code, op0, op1,
4919 invert);
4920 if (TREE_CODE (op1) == SSA_NAME)
4921 retval |= register_edge_assert_for_2 (op1, e, bsi, rhs_code, op0, op1,
4922 invert);
4923 }
4924 else if ((code == NE_EXPR
4925 && gimple_assign_rhs_code (op_def) == BIT_AND_EXPR)
4926 || (code == EQ_EXPR
4927 && gimple_assign_rhs_code (op_def) == BIT_IOR_EXPR))
4928 {
4929 /* Recurse on each operand. */
4930 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def),
4931 code, e, bsi);
4932 retval |= register_edge_assert_for_1 (gimple_assign_rhs2 (op_def),
4933 code, e, bsi);
4934 }
4935 else if (gimple_assign_rhs_code (op_def) == BIT_NOT_EXPR
4936 && TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (op_def))) == 1)
4937 {
4938 /* Recurse, flipping CODE. */
4939 code = invert_tree_comparison (code, false);
4940 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def),
4941 code, e, bsi);
4942 }
4943 else if (gimple_assign_rhs_code (op_def) == SSA_NAME)
4944 {
4945 /* Recurse through the copy. */
4946 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def),
4947 code, e, bsi);
4948 }
4949 else if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (op_def)))
4950 {
4951 /* Recurse through the type conversion. */
4952 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def),
4953 code, e, bsi);
4954 }
4955
4956 return retval;
4957 }
4958
4959 /* Try to register an edge assertion for SSA name NAME on edge E for
4960 the condition COND contributing to the conditional jump pointed to by SI.
4961 Return true if an assertion for NAME could be registered. */
4962
4963 static bool
4964 register_edge_assert_for (tree name, edge e, gimple_stmt_iterator si,
4965 enum tree_code cond_code, tree cond_op0,
4966 tree cond_op1)
4967 {
4968 tree val;
4969 enum tree_code comp_code;
4970 bool retval = false;
4971 bool is_else_edge = (e->flags & EDGE_FALSE_VALUE) != 0;
4972
4973 /* Do not attempt to infer anything in names that flow through
4974 abnormal edges. */
4975 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
4976 return false;
4977
4978 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
4979 cond_op0, cond_op1,
4980 is_else_edge,
4981 &comp_code, &val))
4982 return false;
4983
4984 /* Register ASSERT_EXPRs for name. */
4985 retval |= register_edge_assert_for_2 (name, e, si, cond_code, cond_op0,
4986 cond_op1, is_else_edge);
4987
4988
4989 /* If COND is effectively an equality test of an SSA_NAME against
4990 the value zero or one, then we may be able to assert values
4991 for SSA_NAMEs which flow into COND. */
4992
4993 /* In the case of NAME == 1 or NAME != 0, for BIT_AND_EXPR defining
4994 statement of NAME we can assert both operands of the BIT_AND_EXPR
4995 have nonzero value. */
4996 if (((comp_code == EQ_EXPR && integer_onep (val))
4997 || (comp_code == NE_EXPR && integer_zerop (val))))
4998 {
4999 gimple def_stmt = SSA_NAME_DEF_STMT (name);
5000
5001 if (is_gimple_assign (def_stmt)
5002 && gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR)
5003 {
5004 tree op0 = gimple_assign_rhs1 (def_stmt);
5005 tree op1 = gimple_assign_rhs2 (def_stmt);
5006 retval |= register_edge_assert_for_1 (op0, NE_EXPR, e, si);
5007 retval |= register_edge_assert_for_1 (op1, NE_EXPR, e, si);
5008 }
5009 }
5010
5011 /* In the case of NAME == 0 or NAME != 1, for BIT_IOR_EXPR defining
5012 statement of NAME we can assert both operands of the BIT_IOR_EXPR
5013 have zero value. */
5014 if (((comp_code == EQ_EXPR && integer_zerop (val))
5015 || (comp_code == NE_EXPR && integer_onep (val))))
5016 {
5017 gimple def_stmt = SSA_NAME_DEF_STMT (name);
5018
5019 /* For BIT_IOR_EXPR only if NAME == 0 both operands have
5020 necessarily zero value, or if type-precision is one. */
5021 if (is_gimple_assign (def_stmt)
5022 && (gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR
5023 && (TYPE_PRECISION (TREE_TYPE (name)) == 1
5024 || comp_code == EQ_EXPR)))
5025 {
5026 tree op0 = gimple_assign_rhs1 (def_stmt);
5027 tree op1 = gimple_assign_rhs2 (def_stmt);
5028 retval |= register_edge_assert_for_1 (op0, EQ_EXPR, e, si);
5029 retval |= register_edge_assert_for_1 (op1, EQ_EXPR, e, si);
5030 }
5031 }
5032
5033 return retval;
5034 }
5035
5036
5037 /* Determine whether the outgoing edges of BB should receive an
5038 ASSERT_EXPR for each of the operands of BB's LAST statement.
5039 The last statement of BB must be a COND_EXPR.
5040
5041 If any of the sub-graphs rooted at BB have an interesting use of
5042 the predicate operands, an assert location node is added to the
5043 list of assertions for the corresponding operands. */
5044
5045 static bool
5046 find_conditional_asserts (basic_block bb, gimple last)
5047 {
5048 bool need_assert;
5049 gimple_stmt_iterator bsi;
5050 tree op;
5051 edge_iterator ei;
5052 edge e;
5053 ssa_op_iter iter;
5054
5055 need_assert = false;
5056 bsi = gsi_for_stmt (last);
5057
5058 /* Look for uses of the operands in each of the sub-graphs
5059 rooted at BB. We need to check each of the outgoing edges
5060 separately, so that we know what kind of ASSERT_EXPR to
5061 insert. */
5062 FOR_EACH_EDGE (e, ei, bb->succs)
5063 {
5064 if (e->dest == bb)
5065 continue;
5066
5067 /* Register the necessary assertions for each operand in the
5068 conditional predicate. */
5069 FOR_EACH_SSA_TREE_OPERAND (op, last, iter, SSA_OP_USE)
5070 {
5071 need_assert |= register_edge_assert_for (op, e, bsi,
5072 gimple_cond_code (last),
5073 gimple_cond_lhs (last),
5074 gimple_cond_rhs (last));
5075 }
5076 }
5077
5078 return need_assert;
5079 }
5080
5081 struct case_info
5082 {
5083 tree expr;
5084 basic_block bb;
5085 };
5086
5087 /* Compare two case labels sorting first by the destination bb index
5088 and then by the case value. */
5089
5090 static int
5091 compare_case_labels (const void *p1, const void *p2)
5092 {
5093 const struct case_info *ci1 = (const struct case_info *) p1;
5094 const struct case_info *ci2 = (const struct case_info *) p2;
5095 int idx1 = ci1->bb->index;
5096 int idx2 = ci2->bb->index;
5097
5098 if (idx1 < idx2)
5099 return -1;
5100 else if (idx1 == idx2)
5101 {
5102 /* Make sure the default label is first in a group. */
5103 if (!CASE_LOW (ci1->expr))
5104 return -1;
5105 else if (!CASE_LOW (ci2->expr))
5106 return 1;
5107 else
5108 return tree_int_cst_compare (CASE_LOW (ci1->expr),
5109 CASE_LOW (ci2->expr));
5110 }
5111 else
5112 return 1;
5113 }
5114
5115 /* Determine whether the outgoing edges of BB should receive an
5116 ASSERT_EXPR for each of the operands of BB's LAST statement.
5117 The last statement of BB must be a SWITCH_EXPR.
5118
5119 If any of the sub-graphs rooted at BB have an interesting use of
5120 the predicate operands, an assert location node is added to the
5121 list of assertions for the corresponding operands. */
5122
5123 static bool
5124 find_switch_asserts (basic_block bb, gimple last)
5125 {
5126 bool need_assert;
5127 gimple_stmt_iterator bsi;
5128 tree op;
5129 edge e;
5130 struct case_info *ci;
5131 size_t n = gimple_switch_num_labels (last);
5132 #if GCC_VERSION >= 4000
5133 unsigned int idx;
5134 #else
5135 /* Work around GCC 3.4 bug (PR 37086). */
5136 volatile unsigned int idx;
5137 #endif
5138
5139 need_assert = false;
5140 bsi = gsi_for_stmt (last);
5141 op = gimple_switch_index (last);
5142 if (TREE_CODE (op) != SSA_NAME)
5143 return false;
5144
5145 /* Build a vector of case labels sorted by destination label. */
5146 ci = XNEWVEC (struct case_info, n);
5147 for (idx = 0; idx < n; ++idx)
5148 {
5149 ci[idx].expr = gimple_switch_label (last, idx);
5150 ci[idx].bb = label_to_block (CASE_LABEL (ci[idx].expr));
5151 }
5152 qsort (ci, n, sizeof (struct case_info), compare_case_labels);
5153
5154 for (idx = 0; idx < n; ++idx)
5155 {
5156 tree min, max;
5157 tree cl = ci[idx].expr;
5158 basic_block cbb = ci[idx].bb;
5159
5160 min = CASE_LOW (cl);
5161 max = CASE_HIGH (cl);
5162
5163 /* If there are multiple case labels with the same destination
5164 we need to combine them to a single value range for the edge. */
5165 if (idx + 1 < n && cbb == ci[idx + 1].bb)
5166 {
5167 /* Skip labels until the last of the group. */
5168 do {
5169 ++idx;
5170 } while (idx < n && cbb == ci[idx].bb);
5171 --idx;
5172
5173 /* Pick up the maximum of the case label range. */
5174 if (CASE_HIGH (ci[idx].expr))
5175 max = CASE_HIGH (ci[idx].expr);
5176 else
5177 max = CASE_LOW (ci[idx].expr);
5178 }
5179
5180 /* Nothing to do if the range includes the default label until we
5181 can register anti-ranges. */
5182 if (min == NULL_TREE)
5183 continue;
5184
5185 /* Find the edge to register the assert expr on. */
5186 e = find_edge (bb, cbb);
5187
5188 /* Register the necessary assertions for the operand in the
5189 SWITCH_EXPR. */
5190 need_assert |= register_edge_assert_for (op, e, bsi,
5191 max ? GE_EXPR : EQ_EXPR,
5192 op,
5193 fold_convert (TREE_TYPE (op),
5194 min));
5195 if (max)
5196 {
5197 need_assert |= register_edge_assert_for (op, e, bsi, LE_EXPR,
5198 op,
5199 fold_convert (TREE_TYPE (op),
5200 max));
5201 }
5202 }
5203
5204 XDELETEVEC (ci);
5205 return need_assert;
5206 }
5207
5208
5209 /* Traverse all the statements in block BB looking for statements that
5210 may generate useful assertions for the SSA names in their operand.
5211 If a statement produces a useful assertion A for name N_i, then the
5212 list of assertions already generated for N_i is scanned to
5213 determine if A is actually needed.
5214
5215 If N_i already had the assertion A at a location dominating the
5216 current location, then nothing needs to be done. Otherwise, the
5217 new location for A is recorded instead.
5218
5219 1- For every statement S in BB, all the variables used by S are
5220 added to bitmap FOUND_IN_SUBGRAPH.
5221
5222 2- If statement S uses an operand N in a way that exposes a known
5223 value range for N, then if N was not already generated by an
5224 ASSERT_EXPR, create a new assert location for N. For instance,
5225 if N is a pointer and the statement dereferences it, we can
5226 assume that N is not NULL.
5227
5228 3- COND_EXPRs are a special case of #2. We can derive range
5229 information from the predicate but need to insert different
5230 ASSERT_EXPRs for each of the sub-graphs rooted at the
5231 conditional block. If the last statement of BB is a conditional
5232 expression of the form 'X op Y', then
5233
5234 a) Remove X and Y from the set FOUND_IN_SUBGRAPH.
5235
5236 b) If the conditional is the only entry point to the sub-graph
5237 corresponding to the THEN_CLAUSE, recurse into it. On
5238 return, if X and/or Y are marked in FOUND_IN_SUBGRAPH, then
5239 an ASSERT_EXPR is added for the corresponding variable.
5240
5241 c) Repeat step (b) on the ELSE_CLAUSE.
5242
5243 d) Mark X and Y in FOUND_IN_SUBGRAPH.
5244
5245 For instance,
5246
5247 if (a == 9)
5248 b = a;
5249 else
5250 b = c + 1;
5251
5252 In this case, an assertion on the THEN clause is useful to
5253 determine that 'a' is always 9 on that edge. However, an assertion
5254 on the ELSE clause would be unnecessary.
5255
5256 4- If BB does not end in a conditional expression, then we recurse
5257 into BB's dominator children.
5258
5259 At the end of the recursive traversal, every SSA name will have a
5260 list of locations where ASSERT_EXPRs should be added. When a new
5261 location for name N is found, it is registered by calling
5262 register_new_assert_for. That function keeps track of all the
5263 registered assertions to prevent adding unnecessary assertions.
5264 For instance, if a pointer P_4 is dereferenced more than once in a
5265 dominator tree, only the location dominating all the dereference of
5266 P_4 will receive an ASSERT_EXPR.
5267
5268 If this function returns true, then it means that there are names
5269 for which we need to generate ASSERT_EXPRs. Those assertions are
5270 inserted by process_assert_insertions. */
5271
5272 static bool
5273 find_assert_locations_1 (basic_block bb, sbitmap live)
5274 {
5275 gimple_stmt_iterator si;
5276 gimple last;
5277 gimple phi;
5278 bool need_assert;
5279
5280 need_assert = false;
5281 last = last_stmt (bb);
5282
5283 /* If BB's last statement is a conditional statement involving integer
5284 operands, determine if we need to add ASSERT_EXPRs. */
5285 if (last
5286 && gimple_code (last) == GIMPLE_COND
5287 && !fp_predicate (last)
5288 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
5289 need_assert |= find_conditional_asserts (bb, last);
5290
5291 /* If BB's last statement is a switch statement involving integer
5292 operands, determine if we need to add ASSERT_EXPRs. */
5293 if (last
5294 && gimple_code (last) == GIMPLE_SWITCH
5295 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
5296 need_assert |= find_switch_asserts (bb, last);
5297
5298 /* Traverse all the statements in BB marking used names and looking
5299 for statements that may infer assertions for their used operands. */
5300 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
5301 {
5302 gimple stmt;
5303 tree op;
5304 ssa_op_iter i;
5305
5306 stmt = gsi_stmt (si);
5307
5308 if (is_gimple_debug (stmt))
5309 continue;
5310
5311 /* See if we can derive an assertion for any of STMT's operands. */
5312 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
5313 {
5314 tree value;
5315 enum tree_code comp_code;
5316
5317 /* Mark OP in our live bitmap. */
5318 SET_BIT (live, SSA_NAME_VERSION (op));
5319
5320 /* If OP is used in such a way that we can infer a value
5321 range for it, and we don't find a previous assertion for
5322 it, create a new assertion location node for OP. */
5323 if (infer_value_range (stmt, op, &comp_code, &value))
5324 {
5325 /* If we are able to infer a nonzero value range for OP,
5326 then walk backwards through the use-def chain to see if OP
5327 was set via a typecast.
5328
5329 If so, then we can also infer a nonzero value range
5330 for the operand of the NOP_EXPR. */
5331 if (comp_code == NE_EXPR && integer_zerop (value))
5332 {
5333 tree t = op;
5334 gimple def_stmt = SSA_NAME_DEF_STMT (t);
5335
5336 while (is_gimple_assign (def_stmt)
5337 && gimple_assign_rhs_code (def_stmt) == NOP_EXPR
5338 && TREE_CODE
5339 (gimple_assign_rhs1 (def_stmt)) == SSA_NAME
5340 && POINTER_TYPE_P
5341 (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))
5342 {
5343 t = gimple_assign_rhs1 (def_stmt);
5344 def_stmt = SSA_NAME_DEF_STMT (t);
5345
5346 /* Note we want to register the assert for the
5347 operand of the NOP_EXPR after SI, not after the
5348 conversion. */
5349 if (! has_single_use (t))
5350 {
5351 register_new_assert_for (t, t, comp_code, value,
5352 bb, NULL, si);
5353 need_assert = true;
5354 }
5355 }
5356 }
5357
5358 /* If OP is used only once, namely in this STMT, don't
5359 bother creating an ASSERT_EXPR for it. Such an
5360 ASSERT_EXPR would do nothing but increase compile time. */
5361 if (!has_single_use (op))
5362 {
5363 register_new_assert_for (op, op, comp_code, value,
5364 bb, NULL, si);
5365 need_assert = true;
5366 }
5367 }
5368 }
5369 }
5370
5371 /* Traverse all PHI nodes in BB marking used operands. */
5372 for (si = gsi_start_phis (bb); !gsi_end_p(si); gsi_next (&si))
5373 {
5374 use_operand_p arg_p;
5375 ssa_op_iter i;
5376 phi = gsi_stmt (si);
5377
5378 FOR_EACH_PHI_ARG (arg_p, phi, i, SSA_OP_USE)
5379 {
5380 tree arg = USE_FROM_PTR (arg_p);
5381 if (TREE_CODE (arg) == SSA_NAME)
5382 SET_BIT (live, SSA_NAME_VERSION (arg));
5383 }
5384 }
5385
5386 return need_assert;
5387 }
5388
5389 /* Do an RPO walk over the function computing SSA name liveness
5390 on-the-fly and deciding on assert expressions to insert.
5391 Returns true if there are assert expressions to be inserted. */
5392
5393 static bool
5394 find_assert_locations (void)
5395 {
5396 int *rpo = XCNEWVEC (int, last_basic_block + NUM_FIXED_BLOCKS);
5397 int *bb_rpo = XCNEWVEC (int, last_basic_block + NUM_FIXED_BLOCKS);
5398 int *last_rpo = XCNEWVEC (int, last_basic_block + NUM_FIXED_BLOCKS);
5399 int rpo_cnt, i;
5400 bool need_asserts;
5401
5402 live = XCNEWVEC (sbitmap, last_basic_block + NUM_FIXED_BLOCKS);
5403 rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
5404 for (i = 0; i < rpo_cnt; ++i)
5405 bb_rpo[rpo[i]] = i;
5406
5407 need_asserts = false;
5408 for (i = rpo_cnt-1; i >= 0; --i)
5409 {
5410 basic_block bb = BASIC_BLOCK (rpo[i]);
5411 edge e;
5412 edge_iterator ei;
5413
5414 if (!live[rpo[i]])
5415 {
5416 live[rpo[i]] = sbitmap_alloc (num_ssa_names);
5417 sbitmap_zero (live[rpo[i]]);
5418 }
5419
5420 /* Process BB and update the live information with uses in
5421 this block. */
5422 need_asserts |= find_assert_locations_1 (bb, live[rpo[i]]);
5423
5424 /* Merge liveness into the predecessor blocks and free it. */
5425 if (!sbitmap_empty_p (live[rpo[i]]))
5426 {
5427 int pred_rpo = i;
5428 FOR_EACH_EDGE (e, ei, bb->preds)
5429 {
5430 int pred = e->src->index;
5431 if (e->flags & EDGE_DFS_BACK)
5432 continue;
5433
5434 if (!live[pred])
5435 {
5436 live[pred] = sbitmap_alloc (num_ssa_names);
5437 sbitmap_zero (live[pred]);
5438 }
5439 sbitmap_a_or_b (live[pred], live[pred], live[rpo[i]]);
5440
5441 if (bb_rpo[pred] < pred_rpo)
5442 pred_rpo = bb_rpo[pred];
5443 }
5444
5445 /* Record the RPO number of the last visited block that needs
5446 live information from this block. */
5447 last_rpo[rpo[i]] = pred_rpo;
5448 }
5449 else
5450 {
5451 sbitmap_free (live[rpo[i]]);
5452 live[rpo[i]] = NULL;
5453 }
5454
5455 /* We can free all successors live bitmaps if all their
5456 predecessors have been visited already. */
5457 FOR_EACH_EDGE (e, ei, bb->succs)
5458 if (last_rpo[e->dest->index] == i
5459 && live[e->dest->index])
5460 {
5461 sbitmap_free (live[e->dest->index]);
5462 live[e->dest->index] = NULL;
5463 }
5464 }
5465
5466 XDELETEVEC (rpo);
5467 XDELETEVEC (bb_rpo);
5468 XDELETEVEC (last_rpo);
5469 for (i = 0; i < last_basic_block + NUM_FIXED_BLOCKS; ++i)
5470 if (live[i])
5471 sbitmap_free (live[i]);
5472 XDELETEVEC (live);
5473
5474 return need_asserts;
5475 }
5476
5477 /* Create an ASSERT_EXPR for NAME and insert it in the location
5478 indicated by LOC. Return true if we made any edge insertions. */
5479
5480 static bool
5481 process_assert_insertions_for (tree name, assert_locus_t loc)
5482 {
5483 /* Build the comparison expression NAME_i COMP_CODE VAL. */
5484 gimple stmt;
5485 tree cond;
5486 gimple assert_stmt;
5487 edge_iterator ei;
5488 edge e;
5489
5490 /* If we have X <=> X do not insert an assert expr for that. */
5491 if (loc->expr == loc->val)
5492 return false;
5493
5494 cond = build2 (loc->comp_code, boolean_type_node, loc->expr, loc->val);
5495 assert_stmt = build_assert_expr_for (cond, name);
5496 if (loc->e)
5497 {
5498 /* We have been asked to insert the assertion on an edge. This
5499 is used only by COND_EXPR and SWITCH_EXPR assertions. */
5500 gcc_checking_assert (gimple_code (gsi_stmt (loc->si)) == GIMPLE_COND
5501 || (gimple_code (gsi_stmt (loc->si))
5502 == GIMPLE_SWITCH));
5503
5504 gsi_insert_on_edge (loc->e, assert_stmt);
5505 return true;
5506 }
5507
5508 /* Otherwise, we can insert right after LOC->SI iff the
5509 statement must not be the last statement in the block. */
5510 stmt = gsi_stmt (loc->si);
5511 if (!stmt_ends_bb_p (stmt))
5512 {
5513 gsi_insert_after (&loc->si, assert_stmt, GSI_SAME_STMT);
5514 return false;
5515 }
5516
5517 /* If STMT must be the last statement in BB, we can only insert new
5518 assertions on the non-abnormal edge out of BB. Note that since
5519 STMT is not control flow, there may only be one non-abnormal edge
5520 out of BB. */
5521 FOR_EACH_EDGE (e, ei, loc->bb->succs)
5522 if (!(e->flags & EDGE_ABNORMAL))
5523 {
5524 gsi_insert_on_edge (e, assert_stmt);
5525 return true;
5526 }
5527
5528 gcc_unreachable ();
5529 }
5530
5531
5532 /* Process all the insertions registered for every name N_i registered
5533 in NEED_ASSERT_FOR. The list of assertions to be inserted are
5534 found in ASSERTS_FOR[i]. */
5535
5536 static void
5537 process_assert_insertions (void)
5538 {
5539 unsigned i;
5540 bitmap_iterator bi;
5541 bool update_edges_p = false;
5542 int num_asserts = 0;
5543
5544 if (dump_file && (dump_flags & TDF_DETAILS))
5545 dump_all_asserts (dump_file);
5546
5547 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
5548 {
5549 assert_locus_t loc = asserts_for[i];
5550 gcc_assert (loc);
5551
5552 while (loc)
5553 {
5554 assert_locus_t next = loc->next;
5555 update_edges_p |= process_assert_insertions_for (ssa_name (i), loc);
5556 free (loc);
5557 loc = next;
5558 num_asserts++;
5559 }
5560 }
5561
5562 if (update_edges_p)
5563 gsi_commit_edge_inserts ();
5564
5565 statistics_counter_event (cfun, "Number of ASSERT_EXPR expressions inserted",
5566 num_asserts);
5567 }
5568
5569
5570 /* Traverse the flowgraph looking for conditional jumps to insert range
5571 expressions. These range expressions are meant to provide information
5572 to optimizations that need to reason in terms of value ranges. They
5573 will not be expanded into RTL. For instance, given:
5574
5575 x = ...
5576 y = ...
5577 if (x < y)
5578 y = x - 2;
5579 else
5580 x = y + 3;
5581
5582 this pass will transform the code into:
5583
5584 x = ...
5585 y = ...
5586 if (x < y)
5587 {
5588 x = ASSERT_EXPR <x, x < y>
5589 y = x - 2
5590 }
5591 else
5592 {
5593 y = ASSERT_EXPR <y, x <= y>
5594 x = y + 3
5595 }
5596
5597 The idea is that once copy and constant propagation have run, other
5598 optimizations will be able to determine what ranges of values can 'x'
5599 take in different paths of the code, simply by checking the reaching
5600 definition of 'x'. */
5601
5602 static void
5603 insert_range_assertions (void)
5604 {
5605 need_assert_for = BITMAP_ALLOC (NULL);
5606 asserts_for = XCNEWVEC (assert_locus_t, num_ssa_names);
5607
5608 calculate_dominance_info (CDI_DOMINATORS);
5609
5610 if (find_assert_locations ())
5611 {
5612 process_assert_insertions ();
5613 update_ssa (TODO_update_ssa_no_phi);
5614 }
5615
5616 if (dump_file && (dump_flags & TDF_DETAILS))
5617 {
5618 fprintf (dump_file, "\nSSA form after inserting ASSERT_EXPRs\n");
5619 dump_function_to_file (current_function_decl, dump_file, dump_flags);
5620 }
5621
5622 free (asserts_for);
5623 BITMAP_FREE (need_assert_for);
5624 }
5625
5626 /* Checks one ARRAY_REF in REF, located at LOCUS. Ignores flexible arrays
5627 and "struct" hacks. If VRP can determine that the
5628 array subscript is a constant, check if it is outside valid
5629 range. If the array subscript is a RANGE, warn if it is
5630 non-overlapping with valid range.
5631 IGNORE_OFF_BY_ONE is true if the ARRAY_REF is inside a ADDR_EXPR. */
5632
5633 static void
5634 check_array_ref (location_t location, tree ref, bool ignore_off_by_one)
5635 {
5636 value_range_t* vr = NULL;
5637 tree low_sub, up_sub;
5638 tree low_bound, up_bound, up_bound_p1;
5639 tree base;
5640
5641 if (TREE_NO_WARNING (ref))
5642 return;
5643
5644 low_sub = up_sub = TREE_OPERAND (ref, 1);
5645 up_bound = array_ref_up_bound (ref);
5646
5647 /* Can not check flexible arrays. */
5648 if (!up_bound
5649 || TREE_CODE (up_bound) != INTEGER_CST)
5650 return;
5651
5652 /* Accesses to trailing arrays via pointers may access storage
5653 beyond the types array bounds. */
5654 base = get_base_address (ref);
5655 if (base && TREE_CODE (base) == MEM_REF)
5656 {
5657 tree cref, next = NULL_TREE;
5658
5659 if (TREE_CODE (TREE_OPERAND (ref, 0)) != COMPONENT_REF)
5660 return;
5661
5662 cref = TREE_OPERAND (ref, 0);
5663 if (TREE_CODE (TREE_TYPE (TREE_OPERAND (cref, 0))) == RECORD_TYPE)
5664 for (next = DECL_CHAIN (TREE_OPERAND (cref, 1));
5665 next && TREE_CODE (next) != FIELD_DECL;
5666 next = DECL_CHAIN (next))
5667 ;
5668
5669 /* If this is the last field in a struct type or a field in a
5670 union type do not warn. */
5671 if (!next)
5672 return;
5673 }
5674
5675 low_bound = array_ref_low_bound (ref);
5676 up_bound_p1 = int_const_binop (PLUS_EXPR, up_bound, integer_one_node);
5677
5678 if (TREE_CODE (low_sub) == SSA_NAME)
5679 {
5680 vr = get_value_range (low_sub);
5681 if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
5682 {
5683 low_sub = vr->type == VR_RANGE ? vr->max : vr->min;
5684 up_sub = vr->type == VR_RANGE ? vr->min : vr->max;
5685 }
5686 }
5687
5688 if (vr && vr->type == VR_ANTI_RANGE)
5689 {
5690 if (TREE_CODE (up_sub) == INTEGER_CST
5691 && tree_int_cst_lt (up_bound, up_sub)
5692 && TREE_CODE (low_sub) == INTEGER_CST
5693 && tree_int_cst_lt (low_sub, low_bound))
5694 {
5695 warning_at (location, OPT_Warray_bounds,
5696 "array subscript is outside array bounds");
5697 TREE_NO_WARNING (ref) = 1;
5698 }
5699 }
5700 else if (TREE_CODE (up_sub) == INTEGER_CST
5701 && (ignore_off_by_one
5702 ? (tree_int_cst_lt (up_bound, up_sub)
5703 && !tree_int_cst_equal (up_bound_p1, up_sub))
5704 : (tree_int_cst_lt (up_bound, up_sub)
5705 || tree_int_cst_equal (up_bound_p1, up_sub))))
5706 {
5707 warning_at (location, OPT_Warray_bounds,
5708 "array subscript is above array bounds");
5709 TREE_NO_WARNING (ref) = 1;
5710 }
5711 else if (TREE_CODE (low_sub) == INTEGER_CST
5712 && tree_int_cst_lt (low_sub, low_bound))
5713 {
5714 warning_at (location, OPT_Warray_bounds,
5715 "array subscript is below array bounds");
5716 TREE_NO_WARNING (ref) = 1;
5717 }
5718 }
5719
5720 /* Searches if the expr T, located at LOCATION computes
5721 address of an ARRAY_REF, and call check_array_ref on it. */
5722
5723 static void
5724 search_for_addr_array (tree t, location_t location)
5725 {
5726 while (TREE_CODE (t) == SSA_NAME)
5727 {
5728 gimple g = SSA_NAME_DEF_STMT (t);
5729
5730 if (gimple_code (g) != GIMPLE_ASSIGN)
5731 return;
5732
5733 if (get_gimple_rhs_class (gimple_assign_rhs_code (g))
5734 != GIMPLE_SINGLE_RHS)
5735 return;
5736
5737 t = gimple_assign_rhs1 (g);
5738 }
5739
5740
5741 /* We are only interested in addresses of ARRAY_REF's. */
5742 if (TREE_CODE (t) != ADDR_EXPR)
5743 return;
5744
5745 /* Check each ARRAY_REFs in the reference chain. */
5746 do
5747 {
5748 if (TREE_CODE (t) == ARRAY_REF)
5749 check_array_ref (location, t, true /*ignore_off_by_one*/);
5750
5751 t = TREE_OPERAND (t, 0);
5752 }
5753 while (handled_component_p (t));
5754
5755 if (TREE_CODE (t) == MEM_REF
5756 && TREE_CODE (TREE_OPERAND (t, 0)) == ADDR_EXPR
5757 && !TREE_NO_WARNING (t))
5758 {
5759 tree tem = TREE_OPERAND (TREE_OPERAND (t, 0), 0);
5760 tree low_bound, up_bound, el_sz;
5761 double_int idx;
5762 if (TREE_CODE (TREE_TYPE (tem)) != ARRAY_TYPE
5763 || TREE_CODE (TREE_TYPE (TREE_TYPE (tem))) == ARRAY_TYPE
5764 || !TYPE_DOMAIN (TREE_TYPE (tem)))
5765 return;
5766
5767 low_bound = TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
5768 up_bound = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
5769 el_sz = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (tem)));
5770 if (!low_bound
5771 || TREE_CODE (low_bound) != INTEGER_CST
5772 || !up_bound
5773 || TREE_CODE (up_bound) != INTEGER_CST
5774 || !el_sz
5775 || TREE_CODE (el_sz) != INTEGER_CST)
5776 return;
5777
5778 idx = mem_ref_offset (t);
5779 idx = double_int_sdiv (idx, tree_to_double_int (el_sz), TRUNC_DIV_EXPR);
5780 if (double_int_scmp (idx, double_int_zero) < 0)
5781 {
5782 warning_at (location, OPT_Warray_bounds,
5783 "array subscript is below array bounds");
5784 TREE_NO_WARNING (t) = 1;
5785 }
5786 else if (double_int_scmp (idx,
5787 double_int_add
5788 (double_int_add
5789 (tree_to_double_int (up_bound),
5790 double_int_neg
5791 (tree_to_double_int (low_bound))),
5792 double_int_one)) > 0)
5793 {
5794 warning_at (location, OPT_Warray_bounds,
5795 "array subscript is above array bounds");
5796 TREE_NO_WARNING (t) = 1;
5797 }
5798 }
5799 }
5800
5801 /* walk_tree() callback that checks if *TP is
5802 an ARRAY_REF inside an ADDR_EXPR (in which an array
5803 subscript one outside the valid range is allowed). Call
5804 check_array_ref for each ARRAY_REF found. The location is
5805 passed in DATA. */
5806
5807 static tree
5808 check_array_bounds (tree *tp, int *walk_subtree, void *data)
5809 {
5810 tree t = *tp;
5811 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
5812 location_t location;
5813
5814 if (EXPR_HAS_LOCATION (t))
5815 location = EXPR_LOCATION (t);
5816 else
5817 {
5818 location_t *locp = (location_t *) wi->info;
5819 location = *locp;
5820 }
5821
5822 *walk_subtree = TRUE;
5823
5824 if (TREE_CODE (t) == ARRAY_REF)
5825 check_array_ref (location, t, false /*ignore_off_by_one*/);
5826
5827 if (TREE_CODE (t) == MEM_REF
5828 || (TREE_CODE (t) == RETURN_EXPR && TREE_OPERAND (t, 0)))
5829 search_for_addr_array (TREE_OPERAND (t, 0), location);
5830
5831 if (TREE_CODE (t) == ADDR_EXPR)
5832 *walk_subtree = FALSE;
5833
5834 return NULL_TREE;
5835 }
5836
5837 /* Walk over all statements of all reachable BBs and call check_array_bounds
5838 on them. */
5839
5840 static void
5841 check_all_array_refs (void)
5842 {
5843 basic_block bb;
5844 gimple_stmt_iterator si;
5845
5846 FOR_EACH_BB (bb)
5847 {
5848 edge_iterator ei;
5849 edge e;
5850 bool executable = false;
5851
5852 /* Skip blocks that were found to be unreachable. */
5853 FOR_EACH_EDGE (e, ei, bb->preds)
5854 executable |= !!(e->flags & EDGE_EXECUTABLE);
5855 if (!executable)
5856 continue;
5857
5858 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
5859 {
5860 gimple stmt = gsi_stmt (si);
5861 struct walk_stmt_info wi;
5862 if (!gimple_has_location (stmt))
5863 continue;
5864
5865 if (is_gimple_call (stmt))
5866 {
5867 size_t i;
5868 size_t n = gimple_call_num_args (stmt);
5869 for (i = 0; i < n; i++)
5870 {
5871 tree arg = gimple_call_arg (stmt, i);
5872 search_for_addr_array (arg, gimple_location (stmt));
5873 }
5874 }
5875 else
5876 {
5877 memset (&wi, 0, sizeof (wi));
5878 wi.info = CONST_CAST (void *, (const void *)
5879 gimple_location_ptr (stmt));
5880
5881 walk_gimple_op (gsi_stmt (si),
5882 check_array_bounds,
5883 &wi);
5884 }
5885 }
5886 }
5887 }
5888
5889 /* Convert range assertion expressions into the implied copies and
5890 copy propagate away the copies. Doing the trivial copy propagation
5891 here avoids the need to run the full copy propagation pass after
5892 VRP.
5893
5894 FIXME, this will eventually lead to copy propagation removing the
5895 names that had useful range information attached to them. For
5896 instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>,
5897 then N_i will have the range [3, +INF].
5898
5899 However, by converting the assertion into the implied copy
5900 operation N_i = N_j, we will then copy-propagate N_j into the uses
5901 of N_i and lose the range information. We may want to hold on to
5902 ASSERT_EXPRs a little while longer as the ranges could be used in
5903 things like jump threading.
5904
5905 The problem with keeping ASSERT_EXPRs around is that passes after
5906 VRP need to handle them appropriately.
5907
5908 Another approach would be to make the range information a first
5909 class property of the SSA_NAME so that it can be queried from
5910 any pass. This is made somewhat more complex by the need for
5911 multiple ranges to be associated with one SSA_NAME. */
5912
5913 static void
5914 remove_range_assertions (void)
5915 {
5916 basic_block bb;
5917 gimple_stmt_iterator si;
5918
5919 /* Note that the BSI iterator bump happens at the bottom of the
5920 loop and no bump is necessary if we're removing the statement
5921 referenced by the current BSI. */
5922 FOR_EACH_BB (bb)
5923 for (si = gsi_start_bb (bb); !gsi_end_p (si);)
5924 {
5925 gimple stmt = gsi_stmt (si);
5926 gimple use_stmt;
5927
5928 if (is_gimple_assign (stmt)
5929 && gimple_assign_rhs_code (stmt) == ASSERT_EXPR)
5930 {
5931 tree rhs = gimple_assign_rhs1 (stmt);
5932 tree var;
5933 tree cond = fold (ASSERT_EXPR_COND (rhs));
5934 use_operand_p use_p;
5935 imm_use_iterator iter;
5936
5937 gcc_assert (cond != boolean_false_node);
5938
5939 /* Propagate the RHS into every use of the LHS. */
5940 var = ASSERT_EXPR_VAR (rhs);
5941 FOR_EACH_IMM_USE_STMT (use_stmt, iter,
5942 gimple_assign_lhs (stmt))
5943 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
5944 {
5945 SET_USE (use_p, var);
5946 gcc_assert (TREE_CODE (var) == SSA_NAME);
5947 }
5948
5949 /* And finally, remove the copy, it is not needed. */
5950 gsi_remove (&si, true);
5951 release_defs (stmt);
5952 }
5953 else
5954 gsi_next (&si);
5955 }
5956 }
5957
5958
5959 /* Return true if STMT is interesting for VRP. */
5960
5961 static bool
5962 stmt_interesting_for_vrp (gimple stmt)
5963 {
5964 if (gimple_code (stmt) == GIMPLE_PHI
5965 && is_gimple_reg (gimple_phi_result (stmt))
5966 && (INTEGRAL_TYPE_P (TREE_TYPE (gimple_phi_result (stmt)))
5967 || POINTER_TYPE_P (TREE_TYPE (gimple_phi_result (stmt)))))
5968 return true;
5969 else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
5970 {
5971 tree lhs = gimple_get_lhs (stmt);
5972
5973 /* In general, assignments with virtual operands are not useful
5974 for deriving ranges, with the obvious exception of calls to
5975 builtin functions. */
5976 if (lhs && TREE_CODE (lhs) == SSA_NAME
5977 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
5978 || POINTER_TYPE_P (TREE_TYPE (lhs)))
5979 && ((is_gimple_call (stmt)
5980 && gimple_call_fndecl (stmt) != NULL_TREE
5981 && DECL_BUILT_IN (gimple_call_fndecl (stmt)))
5982 || !gimple_vuse (stmt)))
5983 return true;
5984 }
5985 else if (gimple_code (stmt) == GIMPLE_COND
5986 || gimple_code (stmt) == GIMPLE_SWITCH)
5987 return true;
5988
5989 return false;
5990 }
5991
5992
5993 /* Initialize local data structures for VRP. */
5994
5995 static void
5996 vrp_initialize (void)
5997 {
5998 basic_block bb;
5999
6000 values_propagated = false;
6001 num_vr_values = num_ssa_names;
6002 vr_value = XCNEWVEC (value_range_t *, num_vr_values);
6003 vr_phi_edge_counts = XCNEWVEC (int, num_ssa_names);
6004
6005 FOR_EACH_BB (bb)
6006 {
6007 gimple_stmt_iterator si;
6008
6009 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
6010 {
6011 gimple phi = gsi_stmt (si);
6012 if (!stmt_interesting_for_vrp (phi))
6013 {
6014 tree lhs = PHI_RESULT (phi);
6015 set_value_range_to_varying (get_value_range (lhs));
6016 prop_set_simulate_again (phi, false);
6017 }
6018 else
6019 prop_set_simulate_again (phi, true);
6020 }
6021
6022 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
6023 {
6024 gimple stmt = gsi_stmt (si);
6025
6026 /* If the statement is a control insn, then we do not
6027 want to avoid simulating the statement once. Failure
6028 to do so means that those edges will never get added. */
6029 if (stmt_ends_bb_p (stmt))
6030 prop_set_simulate_again (stmt, true);
6031 else if (!stmt_interesting_for_vrp (stmt))
6032 {
6033 ssa_op_iter i;
6034 tree def;
6035 FOR_EACH_SSA_TREE_OPERAND (def, stmt, i, SSA_OP_DEF)
6036 set_value_range_to_varying (get_value_range (def));
6037 prop_set_simulate_again (stmt, false);
6038 }
6039 else
6040 prop_set_simulate_again (stmt, true);
6041 }
6042 }
6043 }
6044
6045 /* Return the singleton value-range for NAME or NAME. */
6046
6047 static inline tree
6048 vrp_valueize (tree name)
6049 {
6050 if (TREE_CODE (name) == SSA_NAME)
6051 {
6052 value_range_t *vr = get_value_range (name);
6053 if (vr->type == VR_RANGE
6054 && (vr->min == vr->max
6055 || operand_equal_p (vr->min, vr->max, 0)))
6056 return vr->min;
6057 }
6058 return name;
6059 }
6060
6061 /* Visit assignment STMT. If it produces an interesting range, record
6062 the SSA name in *OUTPUT_P. */
6063
6064 static enum ssa_prop_result
6065 vrp_visit_assignment_or_call (gimple stmt, tree *output_p)
6066 {
6067 tree def, lhs;
6068 ssa_op_iter iter;
6069 enum gimple_code code = gimple_code (stmt);
6070 lhs = gimple_get_lhs (stmt);
6071
6072 /* We only keep track of ranges in integral and pointer types. */
6073 if (TREE_CODE (lhs) == SSA_NAME
6074 && ((INTEGRAL_TYPE_P (TREE_TYPE (lhs))
6075 /* It is valid to have NULL MIN/MAX values on a type. See
6076 build_range_type. */
6077 && TYPE_MIN_VALUE (TREE_TYPE (lhs))
6078 && TYPE_MAX_VALUE (TREE_TYPE (lhs)))
6079 || POINTER_TYPE_P (TREE_TYPE (lhs))))
6080 {
6081 value_range_t new_vr = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
6082
6083 /* Try folding the statement to a constant first. */
6084 tree tem = gimple_fold_stmt_to_constant (stmt, vrp_valueize);
6085 if (tem && !is_overflow_infinity (tem))
6086 set_value_range (&new_vr, VR_RANGE, tem, tem, NULL);
6087 /* Then dispatch to value-range extracting functions. */
6088 else if (code == GIMPLE_CALL)
6089 extract_range_basic (&new_vr, stmt);
6090 else
6091 extract_range_from_assignment (&new_vr, stmt);
6092
6093 if (update_value_range (lhs, &new_vr))
6094 {
6095 *output_p = lhs;
6096
6097 if (dump_file && (dump_flags & TDF_DETAILS))
6098 {
6099 fprintf (dump_file, "Found new range for ");
6100 print_generic_expr (dump_file, lhs, 0);
6101 fprintf (dump_file, ": ");
6102 dump_value_range (dump_file, &new_vr);
6103 fprintf (dump_file, "\n\n");
6104 }
6105
6106 if (new_vr.type == VR_VARYING)
6107 return SSA_PROP_VARYING;
6108
6109 return SSA_PROP_INTERESTING;
6110 }
6111
6112 return SSA_PROP_NOT_INTERESTING;
6113 }
6114
6115 /* Every other statement produces no useful ranges. */
6116 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_DEF)
6117 set_value_range_to_varying (get_value_range (def));
6118
6119 return SSA_PROP_VARYING;
6120 }
6121
6122 /* Helper that gets the value range of the SSA_NAME with version I
6123 or a symbolic range containing the SSA_NAME only if the value range
6124 is varying or undefined. */
6125
6126 static inline value_range_t
6127 get_vr_for_comparison (int i)
6128 {
6129 value_range_t vr = *get_value_range (ssa_name (i));
6130
6131 /* If name N_i does not have a valid range, use N_i as its own
6132 range. This allows us to compare against names that may
6133 have N_i in their ranges. */
6134 if (vr.type == VR_VARYING || vr.type == VR_UNDEFINED)
6135 {
6136 vr.type = VR_RANGE;
6137 vr.min = ssa_name (i);
6138 vr.max = ssa_name (i);
6139 }
6140
6141 return vr;
6142 }
6143
6144 /* Compare all the value ranges for names equivalent to VAR with VAL
6145 using comparison code COMP. Return the same value returned by
6146 compare_range_with_value, including the setting of
6147 *STRICT_OVERFLOW_P. */
6148
6149 static tree
6150 compare_name_with_value (enum tree_code comp, tree var, tree val,
6151 bool *strict_overflow_p)
6152 {
6153 bitmap_iterator bi;
6154 unsigned i;
6155 bitmap e;
6156 tree retval, t;
6157 int used_strict_overflow;
6158 bool sop;
6159 value_range_t equiv_vr;
6160
6161 /* Get the set of equivalences for VAR. */
6162 e = get_value_range (var)->equiv;
6163
6164 /* Start at -1. Set it to 0 if we do a comparison without relying
6165 on overflow, or 1 if all comparisons rely on overflow. */
6166 used_strict_overflow = -1;
6167
6168 /* Compare vars' value range with val. */
6169 equiv_vr = get_vr_for_comparison (SSA_NAME_VERSION (var));
6170 sop = false;
6171 retval = compare_range_with_value (comp, &equiv_vr, val, &sop);
6172 if (retval)
6173 used_strict_overflow = sop ? 1 : 0;
6174
6175 /* If the equiv set is empty we have done all work we need to do. */
6176 if (e == NULL)
6177 {
6178 if (retval
6179 && used_strict_overflow > 0)
6180 *strict_overflow_p = true;
6181 return retval;
6182 }
6183
6184 EXECUTE_IF_SET_IN_BITMAP (e, 0, i, bi)
6185 {
6186 equiv_vr = get_vr_for_comparison (i);
6187 sop = false;
6188 t = compare_range_with_value (comp, &equiv_vr, val, &sop);
6189 if (t)
6190 {
6191 /* If we get different answers from different members
6192 of the equivalence set this check must be in a dead
6193 code region. Folding it to a trap representation
6194 would be correct here. For now just return don't-know. */
6195 if (retval != NULL
6196 && t != retval)
6197 {
6198 retval = NULL_TREE;
6199 break;
6200 }
6201 retval = t;
6202
6203 if (!sop)
6204 used_strict_overflow = 0;
6205 else if (used_strict_overflow < 0)
6206 used_strict_overflow = 1;
6207 }
6208 }
6209
6210 if (retval
6211 && used_strict_overflow > 0)
6212 *strict_overflow_p = true;
6213
6214 return retval;
6215 }
6216
6217
6218 /* Given a comparison code COMP and names N1 and N2, compare all the
6219 ranges equivalent to N1 against all the ranges equivalent to N2
6220 to determine the value of N1 COMP N2. Return the same value
6221 returned by compare_ranges. Set *STRICT_OVERFLOW_P to indicate
6222 whether we relied on an overflow infinity in the comparison. */
6223
6224
6225 static tree
6226 compare_names (enum tree_code comp, tree n1, tree n2,
6227 bool *strict_overflow_p)
6228 {
6229 tree t, retval;
6230 bitmap e1, e2;
6231 bitmap_iterator bi1, bi2;
6232 unsigned i1, i2;
6233 int used_strict_overflow;
6234 static bitmap_obstack *s_obstack = NULL;
6235 static bitmap s_e1 = NULL, s_e2 = NULL;
6236
6237 /* Compare the ranges of every name equivalent to N1 against the
6238 ranges of every name equivalent to N2. */
6239 e1 = get_value_range (n1)->equiv;
6240 e2 = get_value_range (n2)->equiv;
6241
6242 /* Use the fake bitmaps if e1 or e2 are not available. */
6243 if (s_obstack == NULL)
6244 {
6245 s_obstack = XNEW (bitmap_obstack);
6246 bitmap_obstack_initialize (s_obstack);
6247 s_e1 = BITMAP_ALLOC (s_obstack);
6248 s_e2 = BITMAP_ALLOC (s_obstack);
6249 }
6250 if (e1 == NULL)
6251 e1 = s_e1;
6252 if (e2 == NULL)
6253 e2 = s_e2;
6254
6255 /* Add N1 and N2 to their own set of equivalences to avoid
6256 duplicating the body of the loop just to check N1 and N2
6257 ranges. */
6258 bitmap_set_bit (e1, SSA_NAME_VERSION (n1));
6259 bitmap_set_bit (e2, SSA_NAME_VERSION (n2));
6260
6261 /* If the equivalence sets have a common intersection, then the two
6262 names can be compared without checking their ranges. */
6263 if (bitmap_intersect_p (e1, e2))
6264 {
6265 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
6266 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
6267
6268 return (comp == EQ_EXPR || comp == GE_EXPR || comp == LE_EXPR)
6269 ? boolean_true_node
6270 : boolean_false_node;
6271 }
6272
6273 /* Start at -1. Set it to 0 if we do a comparison without relying
6274 on overflow, or 1 if all comparisons rely on overflow. */
6275 used_strict_overflow = -1;
6276
6277 /* Otherwise, compare all the equivalent ranges. First, add N1 and
6278 N2 to their own set of equivalences to avoid duplicating the body
6279 of the loop just to check N1 and N2 ranges. */
6280 EXECUTE_IF_SET_IN_BITMAP (e1, 0, i1, bi1)
6281 {
6282 value_range_t vr1 = get_vr_for_comparison (i1);
6283
6284 t = retval = NULL_TREE;
6285 EXECUTE_IF_SET_IN_BITMAP (e2, 0, i2, bi2)
6286 {
6287 bool sop = false;
6288
6289 value_range_t vr2 = get_vr_for_comparison (i2);
6290
6291 t = compare_ranges (comp, &vr1, &vr2, &sop);
6292 if (t)
6293 {
6294 /* If we get different answers from different members
6295 of the equivalence set this check must be in a dead
6296 code region. Folding it to a trap representation
6297 would be correct here. For now just return don't-know. */
6298 if (retval != NULL
6299 && t != retval)
6300 {
6301 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
6302 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
6303 return NULL_TREE;
6304 }
6305 retval = t;
6306
6307 if (!sop)
6308 used_strict_overflow = 0;
6309 else if (used_strict_overflow < 0)
6310 used_strict_overflow = 1;
6311 }
6312 }
6313
6314 if (retval)
6315 {
6316 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
6317 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
6318 if (used_strict_overflow > 0)
6319 *strict_overflow_p = true;
6320 return retval;
6321 }
6322 }
6323
6324 /* None of the equivalent ranges are useful in computing this
6325 comparison. */
6326 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
6327 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
6328 return NULL_TREE;
6329 }
6330
6331 /* Helper function for vrp_evaluate_conditional_warnv. */
6332
6333 static tree
6334 vrp_evaluate_conditional_warnv_with_ops_using_ranges (enum tree_code code,
6335 tree op0, tree op1,
6336 bool * strict_overflow_p)
6337 {
6338 value_range_t *vr0, *vr1;
6339
6340 vr0 = (TREE_CODE (op0) == SSA_NAME) ? get_value_range (op0) : NULL;
6341 vr1 = (TREE_CODE (op1) == SSA_NAME) ? get_value_range (op1) : NULL;
6342
6343 if (vr0 && vr1)
6344 return compare_ranges (code, vr0, vr1, strict_overflow_p);
6345 else if (vr0 && vr1 == NULL)
6346 return compare_range_with_value (code, vr0, op1, strict_overflow_p);
6347 else if (vr0 == NULL && vr1)
6348 return (compare_range_with_value
6349 (swap_tree_comparison (code), vr1, op0, strict_overflow_p));
6350 return NULL;
6351 }
6352
6353 /* Helper function for vrp_evaluate_conditional_warnv. */
6354
6355 static tree
6356 vrp_evaluate_conditional_warnv_with_ops (enum tree_code code, tree op0,
6357 tree op1, bool use_equiv_p,
6358 bool *strict_overflow_p, bool *only_ranges)
6359 {
6360 tree ret;
6361 if (only_ranges)
6362 *only_ranges = true;
6363
6364 /* We only deal with integral and pointer types. */
6365 if (!INTEGRAL_TYPE_P (TREE_TYPE (op0))
6366 && !POINTER_TYPE_P (TREE_TYPE (op0)))
6367 return NULL_TREE;
6368
6369 if (use_equiv_p)
6370 {
6371 if (only_ranges
6372 && (ret = vrp_evaluate_conditional_warnv_with_ops_using_ranges
6373 (code, op0, op1, strict_overflow_p)))
6374 return ret;
6375 *only_ranges = false;
6376 if (TREE_CODE (op0) == SSA_NAME && TREE_CODE (op1) == SSA_NAME)
6377 return compare_names (code, op0, op1, strict_overflow_p);
6378 else if (TREE_CODE (op0) == SSA_NAME)
6379 return compare_name_with_value (code, op0, op1, strict_overflow_p);
6380 else if (TREE_CODE (op1) == SSA_NAME)
6381 return (compare_name_with_value
6382 (swap_tree_comparison (code), op1, op0, strict_overflow_p));
6383 }
6384 else
6385 return vrp_evaluate_conditional_warnv_with_ops_using_ranges (code, op0, op1,
6386 strict_overflow_p);
6387 return NULL_TREE;
6388 }
6389
6390 /* Given (CODE OP0 OP1) within STMT, try to simplify it based on value range
6391 information. Return NULL if the conditional can not be evaluated.
6392 The ranges of all the names equivalent with the operands in COND
6393 will be used when trying to compute the value. If the result is
6394 based on undefined signed overflow, issue a warning if
6395 appropriate. */
6396
6397 static tree
6398 vrp_evaluate_conditional (enum tree_code code, tree op0, tree op1, gimple stmt)
6399 {
6400 bool sop;
6401 tree ret;
6402 bool only_ranges;
6403
6404 /* Some passes and foldings leak constants with overflow flag set
6405 into the IL. Avoid doing wrong things with these and bail out. */
6406 if ((TREE_CODE (op0) == INTEGER_CST
6407 && TREE_OVERFLOW (op0))
6408 || (TREE_CODE (op1) == INTEGER_CST
6409 && TREE_OVERFLOW (op1)))
6410 return NULL_TREE;
6411
6412 sop = false;
6413 ret = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, true, &sop,
6414 &only_ranges);
6415
6416 if (ret && sop)
6417 {
6418 enum warn_strict_overflow_code wc;
6419 const char* warnmsg;
6420
6421 if (is_gimple_min_invariant (ret))
6422 {
6423 wc = WARN_STRICT_OVERFLOW_CONDITIONAL;
6424 warnmsg = G_("assuming signed overflow does not occur when "
6425 "simplifying conditional to constant");
6426 }
6427 else
6428 {
6429 wc = WARN_STRICT_OVERFLOW_COMPARISON;
6430 warnmsg = G_("assuming signed overflow does not occur when "
6431 "simplifying conditional");
6432 }
6433
6434 if (issue_strict_overflow_warning (wc))
6435 {
6436 location_t location;
6437
6438 if (!gimple_has_location (stmt))
6439 location = input_location;
6440 else
6441 location = gimple_location (stmt);
6442 warning_at (location, OPT_Wstrict_overflow, "%s", warnmsg);
6443 }
6444 }
6445
6446 if (warn_type_limits
6447 && ret && only_ranges
6448 && TREE_CODE_CLASS (code) == tcc_comparison
6449 && TREE_CODE (op0) == SSA_NAME)
6450 {
6451 /* If the comparison is being folded and the operand on the LHS
6452 is being compared against a constant value that is outside of
6453 the natural range of OP0's type, then the predicate will
6454 always fold regardless of the value of OP0. If -Wtype-limits
6455 was specified, emit a warning. */
6456 tree type = TREE_TYPE (op0);
6457 value_range_t *vr0 = get_value_range (op0);
6458
6459 if (vr0->type != VR_VARYING
6460 && INTEGRAL_TYPE_P (type)
6461 && vrp_val_is_min (vr0->min)
6462 && vrp_val_is_max (vr0->max)
6463 && is_gimple_min_invariant (op1))
6464 {
6465 location_t location;
6466
6467 if (!gimple_has_location (stmt))
6468 location = input_location;
6469 else
6470 location = gimple_location (stmt);
6471
6472 warning_at (location, OPT_Wtype_limits,
6473 integer_zerop (ret)
6474 ? G_("comparison always false "
6475 "due to limited range of data type")
6476 : G_("comparison always true "
6477 "due to limited range of data type"));
6478 }
6479 }
6480
6481 return ret;
6482 }
6483
6484
6485 /* Visit conditional statement STMT. If we can determine which edge
6486 will be taken out of STMT's basic block, record it in
6487 *TAKEN_EDGE_P and return SSA_PROP_INTERESTING. Otherwise, return
6488 SSA_PROP_VARYING. */
6489
6490 static enum ssa_prop_result
6491 vrp_visit_cond_stmt (gimple stmt, edge *taken_edge_p)
6492 {
6493 tree val;
6494 bool sop;
6495
6496 *taken_edge_p = NULL;
6497
6498 if (dump_file && (dump_flags & TDF_DETAILS))
6499 {
6500 tree use;
6501 ssa_op_iter i;
6502
6503 fprintf (dump_file, "\nVisiting conditional with predicate: ");
6504 print_gimple_stmt (dump_file, stmt, 0, 0);
6505 fprintf (dump_file, "\nWith known ranges\n");
6506
6507 FOR_EACH_SSA_TREE_OPERAND (use, stmt, i, SSA_OP_USE)
6508 {
6509 fprintf (dump_file, "\t");
6510 print_generic_expr (dump_file, use, 0);
6511 fprintf (dump_file, ": ");
6512 dump_value_range (dump_file, vr_value[SSA_NAME_VERSION (use)]);
6513 }
6514
6515 fprintf (dump_file, "\n");
6516 }
6517
6518 /* Compute the value of the predicate COND by checking the known
6519 ranges of each of its operands.
6520
6521 Note that we cannot evaluate all the equivalent ranges here
6522 because those ranges may not yet be final and with the current
6523 propagation strategy, we cannot determine when the value ranges
6524 of the names in the equivalence set have changed.
6525
6526 For instance, given the following code fragment
6527
6528 i_5 = PHI <8, i_13>
6529 ...
6530 i_14 = ASSERT_EXPR <i_5, i_5 != 0>
6531 if (i_14 == 1)
6532 ...
6533
6534 Assume that on the first visit to i_14, i_5 has the temporary
6535 range [8, 8] because the second argument to the PHI function is
6536 not yet executable. We derive the range ~[0, 0] for i_14 and the
6537 equivalence set { i_5 }. So, when we visit 'if (i_14 == 1)' for
6538 the first time, since i_14 is equivalent to the range [8, 8], we
6539 determine that the predicate is always false.
6540
6541 On the next round of propagation, i_13 is determined to be
6542 VARYING, which causes i_5 to drop down to VARYING. So, another
6543 visit to i_14 is scheduled. In this second visit, we compute the
6544 exact same range and equivalence set for i_14, namely ~[0, 0] and
6545 { i_5 }. But we did not have the previous range for i_5
6546 registered, so vrp_visit_assignment thinks that the range for
6547 i_14 has not changed. Therefore, the predicate 'if (i_14 == 1)'
6548 is not visited again, which stops propagation from visiting
6549 statements in the THEN clause of that if().
6550
6551 To properly fix this we would need to keep the previous range
6552 value for the names in the equivalence set. This way we would've
6553 discovered that from one visit to the other i_5 changed from
6554 range [8, 8] to VR_VARYING.
6555
6556 However, fixing this apparent limitation may not be worth the
6557 additional checking. Testing on several code bases (GCC, DLV,
6558 MICO, TRAMP3D and SPEC2000) showed that doing this results in
6559 4 more predicates folded in SPEC. */
6560 sop = false;
6561
6562 val = vrp_evaluate_conditional_warnv_with_ops (gimple_cond_code (stmt),
6563 gimple_cond_lhs (stmt),
6564 gimple_cond_rhs (stmt),
6565 false, &sop, NULL);
6566 if (val)
6567 {
6568 if (!sop)
6569 *taken_edge_p = find_taken_edge (gimple_bb (stmt), val);
6570 else
6571 {
6572 if (dump_file && (dump_flags & TDF_DETAILS))
6573 fprintf (dump_file,
6574 "\nIgnoring predicate evaluation because "
6575 "it assumes that signed overflow is undefined");
6576 val = NULL_TREE;
6577 }
6578 }
6579
6580 if (dump_file && (dump_flags & TDF_DETAILS))
6581 {
6582 fprintf (dump_file, "\nPredicate evaluates to: ");
6583 if (val == NULL_TREE)
6584 fprintf (dump_file, "DON'T KNOW\n");
6585 else
6586 print_generic_stmt (dump_file, val, 0);
6587 }
6588
6589 return (*taken_edge_p) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING;
6590 }
6591
6592 /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL
6593 that includes the value VAL. The search is restricted to the range
6594 [START_IDX, n - 1] where n is the size of VEC.
6595
6596 If there is a CASE_LABEL for VAL, its index is placed in IDX and true is
6597 returned.
6598
6599 If there is no CASE_LABEL for VAL and there is one that is larger than VAL,
6600 it is placed in IDX and false is returned.
6601
6602 If VAL is larger than any CASE_LABEL, n is placed on IDX and false is
6603 returned. */
6604
6605 static bool
6606 find_case_label_index (gimple stmt, size_t start_idx, tree val, size_t *idx)
6607 {
6608 size_t n = gimple_switch_num_labels (stmt);
6609 size_t low, high;
6610
6611 /* Find case label for minimum of the value range or the next one.
6612 At each iteration we are searching in [low, high - 1]. */
6613
6614 for (low = start_idx, high = n; high != low; )
6615 {
6616 tree t;
6617 int cmp;
6618 /* Note that i != high, so we never ask for n. */
6619 size_t i = (high + low) / 2;
6620 t = gimple_switch_label (stmt, i);
6621
6622 /* Cache the result of comparing CASE_LOW and val. */
6623 cmp = tree_int_cst_compare (CASE_LOW (t), val);
6624
6625 if (cmp == 0)
6626 {
6627 /* Ranges cannot be empty. */
6628 *idx = i;
6629 return true;
6630 }
6631 else if (cmp > 0)
6632 high = i;
6633 else
6634 {
6635 low = i + 1;
6636 if (CASE_HIGH (t) != NULL
6637 && tree_int_cst_compare (CASE_HIGH (t), val) >= 0)
6638 {
6639 *idx = i;
6640 return true;
6641 }
6642 }
6643 }
6644
6645 *idx = high;
6646 return false;
6647 }
6648
6649 /* Searches the case label vector VEC for the range of CASE_LABELs that is used
6650 for values between MIN and MAX. The first index is placed in MIN_IDX. The
6651 last index is placed in MAX_IDX. If the range of CASE_LABELs is empty
6652 then MAX_IDX < MIN_IDX.
6653 Returns true if the default label is not needed. */
6654
6655 static bool
6656 find_case_label_range (gimple stmt, tree min, tree max, size_t *min_idx,
6657 size_t *max_idx)
6658 {
6659 size_t i, j;
6660 bool min_take_default = !find_case_label_index (stmt, 1, min, &i);
6661 bool max_take_default = !find_case_label_index (stmt, i, max, &j);
6662
6663 if (i == j
6664 && min_take_default
6665 && max_take_default)
6666 {
6667 /* Only the default case label reached.
6668 Return an empty range. */
6669 *min_idx = 1;
6670 *max_idx = 0;
6671 return false;
6672 }
6673 else
6674 {
6675 bool take_default = min_take_default || max_take_default;
6676 tree low, high;
6677 size_t k;
6678
6679 if (max_take_default)
6680 j--;
6681
6682 /* If the case label range is continuous, we do not need
6683 the default case label. Verify that. */
6684 high = CASE_LOW (gimple_switch_label (stmt, i));
6685 if (CASE_HIGH (gimple_switch_label (stmt, i)))
6686 high = CASE_HIGH (gimple_switch_label (stmt, i));
6687 for (k = i + 1; k <= j; ++k)
6688 {
6689 low = CASE_LOW (gimple_switch_label (stmt, k));
6690 if (!integer_onep (int_const_binop (MINUS_EXPR, low, high)))
6691 {
6692 take_default = true;
6693 break;
6694 }
6695 high = low;
6696 if (CASE_HIGH (gimple_switch_label (stmt, k)))
6697 high = CASE_HIGH (gimple_switch_label (stmt, k));
6698 }
6699
6700 *min_idx = i;
6701 *max_idx = j;
6702 return !take_default;
6703 }
6704 }
6705
6706 /* Visit switch statement STMT. If we can determine which edge
6707 will be taken out of STMT's basic block, record it in
6708 *TAKEN_EDGE_P and return SSA_PROP_INTERESTING. Otherwise, return
6709 SSA_PROP_VARYING. */
6710
6711 static enum ssa_prop_result
6712 vrp_visit_switch_stmt (gimple stmt, edge *taken_edge_p)
6713 {
6714 tree op, val;
6715 value_range_t *vr;
6716 size_t i = 0, j = 0;
6717 bool take_default;
6718
6719 *taken_edge_p = NULL;
6720 op = gimple_switch_index (stmt);
6721 if (TREE_CODE (op) != SSA_NAME)
6722 return SSA_PROP_VARYING;
6723
6724 vr = get_value_range (op);
6725 if (dump_file && (dump_flags & TDF_DETAILS))
6726 {
6727 fprintf (dump_file, "\nVisiting switch expression with operand ");
6728 print_generic_expr (dump_file, op, 0);
6729 fprintf (dump_file, " with known range ");
6730 dump_value_range (dump_file, vr);
6731 fprintf (dump_file, "\n");
6732 }
6733
6734 if (vr->type != VR_RANGE
6735 || symbolic_range_p (vr))
6736 return SSA_PROP_VARYING;
6737
6738 /* Find the single edge that is taken from the switch expression. */
6739 take_default = !find_case_label_range (stmt, vr->min, vr->max, &i, &j);
6740
6741 /* Check if the range spans no CASE_LABEL. If so, we only reach the default
6742 label */
6743 if (j < i)
6744 {
6745 gcc_assert (take_default);
6746 val = gimple_switch_default_label (stmt);
6747 }
6748 else
6749 {
6750 /* Check if labels with index i to j and maybe the default label
6751 are all reaching the same label. */
6752
6753 val = gimple_switch_label (stmt, i);
6754 if (take_default
6755 && CASE_LABEL (gimple_switch_default_label (stmt))
6756 != CASE_LABEL (val))
6757 {
6758 if (dump_file && (dump_flags & TDF_DETAILS))
6759 fprintf (dump_file, " not a single destination for this "
6760 "range\n");
6761 return SSA_PROP_VARYING;
6762 }
6763 for (++i; i <= j; ++i)
6764 {
6765 if (CASE_LABEL (gimple_switch_label (stmt, i)) != CASE_LABEL (val))
6766 {
6767 if (dump_file && (dump_flags & TDF_DETAILS))
6768 fprintf (dump_file, " not a single destination for this "
6769 "range\n");
6770 return SSA_PROP_VARYING;
6771 }
6772 }
6773 }
6774
6775 *taken_edge_p = find_edge (gimple_bb (stmt),
6776 label_to_block (CASE_LABEL (val)));
6777
6778 if (dump_file && (dump_flags & TDF_DETAILS))
6779 {
6780 fprintf (dump_file, " will take edge to ");
6781 print_generic_stmt (dump_file, CASE_LABEL (val), 0);
6782 }
6783
6784 return SSA_PROP_INTERESTING;
6785 }
6786
6787
6788 /* Evaluate statement STMT. If the statement produces a useful range,
6789 return SSA_PROP_INTERESTING and record the SSA name with the
6790 interesting range into *OUTPUT_P.
6791
6792 If STMT is a conditional branch and we can determine its truth
6793 value, the taken edge is recorded in *TAKEN_EDGE_P.
6794
6795 If STMT produces a varying value, return SSA_PROP_VARYING. */
6796
6797 static enum ssa_prop_result
6798 vrp_visit_stmt (gimple stmt, edge *taken_edge_p, tree *output_p)
6799 {
6800 tree def;
6801 ssa_op_iter iter;
6802
6803 if (dump_file && (dump_flags & TDF_DETAILS))
6804 {
6805 fprintf (dump_file, "\nVisiting statement:\n");
6806 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
6807 fprintf (dump_file, "\n");
6808 }
6809
6810 if (!stmt_interesting_for_vrp (stmt))
6811 gcc_assert (stmt_ends_bb_p (stmt));
6812 else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
6813 {
6814 /* In general, assignments with virtual operands are not useful
6815 for deriving ranges, with the obvious exception of calls to
6816 builtin functions. */
6817 if ((is_gimple_call (stmt)
6818 && gimple_call_fndecl (stmt) != NULL_TREE
6819 && DECL_BUILT_IN (gimple_call_fndecl (stmt)))
6820 || !gimple_vuse (stmt))
6821 return vrp_visit_assignment_or_call (stmt, output_p);
6822 }
6823 else if (gimple_code (stmt) == GIMPLE_COND)
6824 return vrp_visit_cond_stmt (stmt, taken_edge_p);
6825 else if (gimple_code (stmt) == GIMPLE_SWITCH)
6826 return vrp_visit_switch_stmt (stmt, taken_edge_p);
6827
6828 /* All other statements produce nothing of interest for VRP, so mark
6829 their outputs varying and prevent further simulation. */
6830 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_DEF)
6831 set_value_range_to_varying (get_value_range (def));
6832
6833 return SSA_PROP_VARYING;
6834 }
6835
6836
6837 /* Meet operation for value ranges. Given two value ranges VR0 and
6838 VR1, store in VR0 a range that contains both VR0 and VR1. This
6839 may not be the smallest possible such range. */
6840
6841 static void
6842 vrp_meet (value_range_t *vr0, value_range_t *vr1)
6843 {
6844 if (vr0->type == VR_UNDEFINED)
6845 {
6846 copy_value_range (vr0, vr1);
6847 return;
6848 }
6849
6850 if (vr1->type == VR_UNDEFINED)
6851 {
6852 /* Nothing to do. VR0 already has the resulting range. */
6853 return;
6854 }
6855
6856 if (vr0->type == VR_VARYING)
6857 {
6858 /* Nothing to do. VR0 already has the resulting range. */
6859 return;
6860 }
6861
6862 if (vr1->type == VR_VARYING)
6863 {
6864 set_value_range_to_varying (vr0);
6865 return;
6866 }
6867
6868 if (vr0->type == VR_RANGE && vr1->type == VR_RANGE)
6869 {
6870 int cmp;
6871 tree min, max;
6872
6873 /* Compute the convex hull of the ranges. The lower limit of
6874 the new range is the minimum of the two ranges. If they
6875 cannot be compared, then give up. */
6876 cmp = compare_values (vr0->min, vr1->min);
6877 if (cmp == 0 || cmp == 1)
6878 min = vr1->min;
6879 else if (cmp == -1)
6880 min = vr0->min;
6881 else
6882 goto give_up;
6883
6884 /* Similarly, the upper limit of the new range is the maximum
6885 of the two ranges. If they cannot be compared, then
6886 give up. */
6887 cmp = compare_values (vr0->max, vr1->max);
6888 if (cmp == 0 || cmp == -1)
6889 max = vr1->max;
6890 else if (cmp == 1)
6891 max = vr0->max;
6892 else
6893 goto give_up;
6894
6895 /* Check for useless ranges. */
6896 if (INTEGRAL_TYPE_P (TREE_TYPE (min))
6897 && ((vrp_val_is_min (min) || is_overflow_infinity (min))
6898 && (vrp_val_is_max (max) || is_overflow_infinity (max))))
6899 goto give_up;
6900
6901 /* The resulting set of equivalences is the intersection of
6902 the two sets. */
6903 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
6904 bitmap_and_into (vr0->equiv, vr1->equiv);
6905 else if (vr0->equiv && !vr1->equiv)
6906 bitmap_clear (vr0->equiv);
6907
6908 set_value_range (vr0, vr0->type, min, max, vr0->equiv);
6909 }
6910 else if (vr0->type == VR_ANTI_RANGE && vr1->type == VR_ANTI_RANGE)
6911 {
6912 /* Two anti-ranges meet only if their complements intersect.
6913 Only handle the case of identical ranges. */
6914 if (compare_values (vr0->min, vr1->min) == 0
6915 && compare_values (vr0->max, vr1->max) == 0
6916 && compare_values (vr0->min, vr0->max) == 0)
6917 {
6918 /* The resulting set of equivalences is the intersection of
6919 the two sets. */
6920 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
6921 bitmap_and_into (vr0->equiv, vr1->equiv);
6922 else if (vr0->equiv && !vr1->equiv)
6923 bitmap_clear (vr0->equiv);
6924 }
6925 else
6926 goto give_up;
6927 }
6928 else if (vr0->type == VR_ANTI_RANGE || vr1->type == VR_ANTI_RANGE)
6929 {
6930 /* For a numeric range [VAL1, VAL2] and an anti-range ~[VAL3, VAL4],
6931 only handle the case where the ranges have an empty intersection.
6932 The result of the meet operation is the anti-range. */
6933 if (!symbolic_range_p (vr0)
6934 && !symbolic_range_p (vr1)
6935 && !value_ranges_intersect_p (vr0, vr1))
6936 {
6937 /* Copy most of VR1 into VR0. Don't copy VR1's equivalence
6938 set. We need to compute the intersection of the two
6939 equivalence sets. */
6940 if (vr1->type == VR_ANTI_RANGE)
6941 set_value_range (vr0, vr1->type, vr1->min, vr1->max, vr0->equiv);
6942
6943 /* The resulting set of equivalences is the intersection of
6944 the two sets. */
6945 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
6946 bitmap_and_into (vr0->equiv, vr1->equiv);
6947 else if (vr0->equiv && !vr1->equiv)
6948 bitmap_clear (vr0->equiv);
6949 }
6950 else
6951 goto give_up;
6952 }
6953 else
6954 gcc_unreachable ();
6955
6956 return;
6957
6958 give_up:
6959 /* Failed to find an efficient meet. Before giving up and setting
6960 the result to VARYING, see if we can at least derive a useful
6961 anti-range. FIXME, all this nonsense about distinguishing
6962 anti-ranges from ranges is necessary because of the odd
6963 semantics of range_includes_zero_p and friends. */
6964 if (!symbolic_range_p (vr0)
6965 && ((vr0->type == VR_RANGE && !range_includes_zero_p (vr0))
6966 || (vr0->type == VR_ANTI_RANGE && range_includes_zero_p (vr0)))
6967 && !symbolic_range_p (vr1)
6968 && ((vr1->type == VR_RANGE && !range_includes_zero_p (vr1))
6969 || (vr1->type == VR_ANTI_RANGE && range_includes_zero_p (vr1))))
6970 {
6971 set_value_range_to_nonnull (vr0, TREE_TYPE (vr0->min));
6972
6973 /* Since this meet operation did not result from the meeting of
6974 two equivalent names, VR0 cannot have any equivalences. */
6975 if (vr0->equiv)
6976 bitmap_clear (vr0->equiv);
6977 }
6978 else
6979 set_value_range_to_varying (vr0);
6980 }
6981
6982
6983 /* Visit all arguments for PHI node PHI that flow through executable
6984 edges. If a valid value range can be derived from all the incoming
6985 value ranges, set a new range for the LHS of PHI. */
6986
6987 static enum ssa_prop_result
6988 vrp_visit_phi_node (gimple phi)
6989 {
6990 size_t i;
6991 tree lhs = PHI_RESULT (phi);
6992 value_range_t *lhs_vr = get_value_range (lhs);
6993 value_range_t vr_result = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
6994 int edges, old_edges;
6995 struct loop *l;
6996
6997 if (dump_file && (dump_flags & TDF_DETAILS))
6998 {
6999 fprintf (dump_file, "\nVisiting PHI node: ");
7000 print_gimple_stmt (dump_file, phi, 0, dump_flags);
7001 }
7002
7003 edges = 0;
7004 for (i = 0; i < gimple_phi_num_args (phi); i++)
7005 {
7006 edge e = gimple_phi_arg_edge (phi, i);
7007
7008 if (dump_file && (dump_flags & TDF_DETAILS))
7009 {
7010 fprintf (dump_file,
7011 "\n Argument #%d (%d -> %d %sexecutable)\n",
7012 (int) i, e->src->index, e->dest->index,
7013 (e->flags & EDGE_EXECUTABLE) ? "" : "not ");
7014 }
7015
7016 if (e->flags & EDGE_EXECUTABLE)
7017 {
7018 tree arg = PHI_ARG_DEF (phi, i);
7019 value_range_t vr_arg;
7020
7021 ++edges;
7022
7023 if (TREE_CODE (arg) == SSA_NAME)
7024 {
7025 vr_arg = *(get_value_range (arg));
7026 }
7027 else
7028 {
7029 if (is_overflow_infinity (arg))
7030 {
7031 arg = copy_node (arg);
7032 TREE_OVERFLOW (arg) = 0;
7033 }
7034
7035 vr_arg.type = VR_RANGE;
7036 vr_arg.min = arg;
7037 vr_arg.max = arg;
7038 vr_arg.equiv = NULL;
7039 }
7040
7041 if (dump_file && (dump_flags & TDF_DETAILS))
7042 {
7043 fprintf (dump_file, "\t");
7044 print_generic_expr (dump_file, arg, dump_flags);
7045 fprintf (dump_file, "\n\tValue: ");
7046 dump_value_range (dump_file, &vr_arg);
7047 fprintf (dump_file, "\n");
7048 }
7049
7050 vrp_meet (&vr_result, &vr_arg);
7051
7052 if (vr_result.type == VR_VARYING)
7053 break;
7054 }
7055 }
7056
7057 if (vr_result.type == VR_VARYING)
7058 goto varying;
7059 else if (vr_result.type == VR_UNDEFINED)
7060 goto update_range;
7061
7062 old_edges = vr_phi_edge_counts[SSA_NAME_VERSION (lhs)];
7063 vr_phi_edge_counts[SSA_NAME_VERSION (lhs)] = edges;
7064
7065 /* To prevent infinite iterations in the algorithm, derive ranges
7066 when the new value is slightly bigger or smaller than the
7067 previous one. We don't do this if we have seen a new executable
7068 edge; this helps us avoid an overflow infinity for conditionals
7069 which are not in a loop. */
7070 if (edges > 0
7071 && gimple_phi_num_args (phi) > 1
7072 && edges == old_edges)
7073 {
7074 int cmp_min = compare_values (lhs_vr->min, vr_result.min);
7075 int cmp_max = compare_values (lhs_vr->max, vr_result.max);
7076
7077 /* For non VR_RANGE or for pointers fall back to varying if
7078 the range changed. */
7079 if ((lhs_vr->type != VR_RANGE || vr_result.type != VR_RANGE
7080 || POINTER_TYPE_P (TREE_TYPE (lhs)))
7081 && (cmp_min != 0 || cmp_max != 0))
7082 goto varying;
7083
7084 /* If the new minimum is smaller or larger than the previous
7085 one, go all the way to -INF. In the first case, to avoid
7086 iterating millions of times to reach -INF, and in the
7087 other case to avoid infinite bouncing between different
7088 minimums. */
7089 if (cmp_min > 0 || cmp_min < 0)
7090 {
7091 if (!needs_overflow_infinity (TREE_TYPE (vr_result.min))
7092 || !vrp_var_may_overflow (lhs, phi))
7093 vr_result.min = TYPE_MIN_VALUE (TREE_TYPE (vr_result.min));
7094 else if (supports_overflow_infinity (TREE_TYPE (vr_result.min)))
7095 vr_result.min =
7096 negative_overflow_infinity (TREE_TYPE (vr_result.min));
7097 }
7098
7099 /* Similarly, if the new maximum is smaller or larger than
7100 the previous one, go all the way to +INF. */
7101 if (cmp_max < 0 || cmp_max > 0)
7102 {
7103 if (!needs_overflow_infinity (TREE_TYPE (vr_result.max))
7104 || !vrp_var_may_overflow (lhs, phi))
7105 vr_result.max = TYPE_MAX_VALUE (TREE_TYPE (vr_result.max));
7106 else if (supports_overflow_infinity (TREE_TYPE (vr_result.max)))
7107 vr_result.max =
7108 positive_overflow_infinity (TREE_TYPE (vr_result.max));
7109 }
7110
7111 /* If we dropped either bound to +-INF then if this is a loop
7112 PHI node SCEV may known more about its value-range. */
7113 if ((cmp_min > 0 || cmp_min < 0
7114 || cmp_max < 0 || cmp_max > 0)
7115 && current_loops
7116 && (l = loop_containing_stmt (phi))
7117 && l->header == gimple_bb (phi))
7118 adjust_range_with_scev (&vr_result, l, phi, lhs);
7119
7120 /* If we will end up with a (-INF, +INF) range, set it to
7121 VARYING. Same if the previous max value was invalid for
7122 the type and we end up with vr_result.min > vr_result.max. */
7123 if ((vrp_val_is_max (vr_result.max)
7124 && vrp_val_is_min (vr_result.min))
7125 || compare_values (vr_result.min,
7126 vr_result.max) > 0)
7127 goto varying;
7128 }
7129
7130 /* If the new range is different than the previous value, keep
7131 iterating. */
7132 update_range:
7133 if (update_value_range (lhs, &vr_result))
7134 {
7135 if (dump_file && (dump_flags & TDF_DETAILS))
7136 {
7137 fprintf (dump_file, "Found new range for ");
7138 print_generic_expr (dump_file, lhs, 0);
7139 fprintf (dump_file, ": ");
7140 dump_value_range (dump_file, &vr_result);
7141 fprintf (dump_file, "\n\n");
7142 }
7143
7144 return SSA_PROP_INTERESTING;
7145 }
7146
7147 /* Nothing changed, don't add outgoing edges. */
7148 return SSA_PROP_NOT_INTERESTING;
7149
7150 /* No match found. Set the LHS to VARYING. */
7151 varying:
7152 set_value_range_to_varying (lhs_vr);
7153 return SSA_PROP_VARYING;
7154 }
7155
7156 /* Simplify boolean operations if the source is known
7157 to be already a boolean. */
7158 static bool
7159 simplify_truth_ops_using_ranges (gimple_stmt_iterator *gsi, gimple stmt)
7160 {
7161 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
7162 tree lhs, op0, op1;
7163 bool need_conversion;
7164
7165 /* We handle only !=/== case here. */
7166 gcc_assert (rhs_code == EQ_EXPR || rhs_code == NE_EXPR);
7167
7168 op0 = gimple_assign_rhs1 (stmt);
7169 if (!op_with_boolean_value_range_p (op0))
7170 return false;
7171
7172 op1 = gimple_assign_rhs2 (stmt);
7173 if (!op_with_boolean_value_range_p (op1))
7174 return false;
7175
7176 /* Reduce number of cases to handle to NE_EXPR. As there is no
7177 BIT_XNOR_EXPR we cannot replace A == B with a single statement. */
7178 if (rhs_code == EQ_EXPR)
7179 {
7180 if (TREE_CODE (op1) == INTEGER_CST)
7181 op1 = int_const_binop (BIT_XOR_EXPR, op1, integer_one_node);
7182 else
7183 return false;
7184 }
7185
7186 lhs = gimple_assign_lhs (stmt);
7187 need_conversion
7188 = !useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (op0));
7189
7190 /* Make sure to not sign-extend a 1-bit 1 when converting the result. */
7191 if (need_conversion
7192 && !TYPE_UNSIGNED (TREE_TYPE (op0))
7193 && TYPE_PRECISION (TREE_TYPE (op0)) == 1
7194 && TYPE_PRECISION (TREE_TYPE (lhs)) > 1)
7195 return false;
7196
7197 /* For A != 0 we can substitute A itself. */
7198 if (integer_zerop (op1))
7199 gimple_assign_set_rhs_with_ops (gsi,
7200 need_conversion
7201 ? NOP_EXPR : TREE_CODE (op0),
7202 op0, NULL_TREE);
7203 /* For A != B we substitute A ^ B. Either with conversion. */
7204 else if (need_conversion)
7205 {
7206 gimple newop;
7207 tree tem = create_tmp_reg (TREE_TYPE (op0), NULL);
7208 newop = gimple_build_assign_with_ops (BIT_XOR_EXPR, tem, op0, op1);
7209 tem = make_ssa_name (tem, newop);
7210 gimple_assign_set_lhs (newop, tem);
7211 gsi_insert_before (gsi, newop, GSI_SAME_STMT);
7212 update_stmt (newop);
7213 gimple_assign_set_rhs_with_ops (gsi, NOP_EXPR, tem, NULL_TREE);
7214 }
7215 /* Or without. */
7216 else
7217 gimple_assign_set_rhs_with_ops (gsi, BIT_XOR_EXPR, op0, op1);
7218 update_stmt (gsi_stmt (*gsi));
7219
7220 return true;
7221 }
7222
7223 /* Simplify a division or modulo operator to a right shift or
7224 bitwise and if the first operand is unsigned or is greater
7225 than zero and the second operand is an exact power of two. */
7226
7227 static bool
7228 simplify_div_or_mod_using_ranges (gimple stmt)
7229 {
7230 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
7231 tree val = NULL;
7232 tree op0 = gimple_assign_rhs1 (stmt);
7233 tree op1 = gimple_assign_rhs2 (stmt);
7234 value_range_t *vr = get_value_range (gimple_assign_rhs1 (stmt));
7235
7236 if (TYPE_UNSIGNED (TREE_TYPE (op0)))
7237 {
7238 val = integer_one_node;
7239 }
7240 else
7241 {
7242 bool sop = false;
7243
7244 val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, &sop);
7245
7246 if (val
7247 && sop
7248 && integer_onep (val)
7249 && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
7250 {
7251 location_t location;
7252
7253 if (!gimple_has_location (stmt))
7254 location = input_location;
7255 else
7256 location = gimple_location (stmt);
7257 warning_at (location, OPT_Wstrict_overflow,
7258 "assuming signed overflow does not occur when "
7259 "simplifying %</%> or %<%%%> to %<>>%> or %<&%>");
7260 }
7261 }
7262
7263 if (val && integer_onep (val))
7264 {
7265 tree t;
7266
7267 if (rhs_code == TRUNC_DIV_EXPR)
7268 {
7269 t = build_int_cst (integer_type_node, tree_log2 (op1));
7270 gimple_assign_set_rhs_code (stmt, RSHIFT_EXPR);
7271 gimple_assign_set_rhs1 (stmt, op0);
7272 gimple_assign_set_rhs2 (stmt, t);
7273 }
7274 else
7275 {
7276 t = build_int_cst (TREE_TYPE (op1), 1);
7277 t = int_const_binop (MINUS_EXPR, op1, t);
7278 t = fold_convert (TREE_TYPE (op0), t);
7279
7280 gimple_assign_set_rhs_code (stmt, BIT_AND_EXPR);
7281 gimple_assign_set_rhs1 (stmt, op0);
7282 gimple_assign_set_rhs2 (stmt, t);
7283 }
7284
7285 update_stmt (stmt);
7286 return true;
7287 }
7288
7289 return false;
7290 }
7291
7292 /* If the operand to an ABS_EXPR is >= 0, then eliminate the
7293 ABS_EXPR. If the operand is <= 0, then simplify the
7294 ABS_EXPR into a NEGATE_EXPR. */
7295
7296 static bool
7297 simplify_abs_using_ranges (gimple stmt)
7298 {
7299 tree val = NULL;
7300 tree op = gimple_assign_rhs1 (stmt);
7301 tree type = TREE_TYPE (op);
7302 value_range_t *vr = get_value_range (op);
7303
7304 if (TYPE_UNSIGNED (type))
7305 {
7306 val = integer_zero_node;
7307 }
7308 else if (vr)
7309 {
7310 bool sop = false;
7311
7312 val = compare_range_with_value (LE_EXPR, vr, integer_zero_node, &sop);
7313 if (!val)
7314 {
7315 sop = false;
7316 val = compare_range_with_value (GE_EXPR, vr, integer_zero_node,
7317 &sop);
7318
7319 if (val)
7320 {
7321 if (integer_zerop (val))
7322 val = integer_one_node;
7323 else if (integer_onep (val))
7324 val = integer_zero_node;
7325 }
7326 }
7327
7328 if (val
7329 && (integer_onep (val) || integer_zerop (val)))
7330 {
7331 if (sop && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
7332 {
7333 location_t location;
7334
7335 if (!gimple_has_location (stmt))
7336 location = input_location;
7337 else
7338 location = gimple_location (stmt);
7339 warning_at (location, OPT_Wstrict_overflow,
7340 "assuming signed overflow does not occur when "
7341 "simplifying %<abs (X)%> to %<X%> or %<-X%>");
7342 }
7343
7344 gimple_assign_set_rhs1 (stmt, op);
7345 if (integer_onep (val))
7346 gimple_assign_set_rhs_code (stmt, NEGATE_EXPR);
7347 else
7348 gimple_assign_set_rhs_code (stmt, SSA_NAME);
7349 update_stmt (stmt);
7350 return true;
7351 }
7352 }
7353
7354 return false;
7355 }
7356
7357 /* Optimize away redundant BIT_AND_EXPR and BIT_IOR_EXPR.
7358 If all the bits that are being cleared by & are already
7359 known to be zero from VR, or all the bits that are being
7360 set by | are already known to be one from VR, the bit
7361 operation is redundant. */
7362
7363 static bool
7364 simplify_bit_ops_using_ranges (gimple_stmt_iterator *gsi, gimple stmt)
7365 {
7366 tree op0 = gimple_assign_rhs1 (stmt);
7367 tree op1 = gimple_assign_rhs2 (stmt);
7368 tree op = NULL_TREE;
7369 value_range_t vr0 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
7370 value_range_t vr1 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
7371 double_int may_be_nonzero0, may_be_nonzero1;
7372 double_int must_be_nonzero0, must_be_nonzero1;
7373 double_int mask;
7374
7375 if (TREE_CODE (op0) == SSA_NAME)
7376 vr0 = *(get_value_range (op0));
7377 else if (is_gimple_min_invariant (op0))
7378 set_value_range_to_value (&vr0, op0, NULL);
7379 else
7380 return false;
7381
7382 if (TREE_CODE (op1) == SSA_NAME)
7383 vr1 = *(get_value_range (op1));
7384 else if (is_gimple_min_invariant (op1))
7385 set_value_range_to_value (&vr1, op1, NULL);
7386 else
7387 return false;
7388
7389 if (!zero_nonzero_bits_from_vr (&vr0, &may_be_nonzero0, &must_be_nonzero0))
7390 return false;
7391 if (!zero_nonzero_bits_from_vr (&vr1, &may_be_nonzero1, &must_be_nonzero1))
7392 return false;
7393
7394 switch (gimple_assign_rhs_code (stmt))
7395 {
7396 case BIT_AND_EXPR:
7397 mask = double_int_and_not (may_be_nonzero0, must_be_nonzero1);
7398 if (double_int_zero_p (mask))
7399 {
7400 op = op0;
7401 break;
7402 }
7403 mask = double_int_and_not (may_be_nonzero1, must_be_nonzero0);
7404 if (double_int_zero_p (mask))
7405 {
7406 op = op1;
7407 break;
7408 }
7409 break;
7410 case BIT_IOR_EXPR:
7411 mask = double_int_and_not (may_be_nonzero0, must_be_nonzero1);
7412 if (double_int_zero_p (mask))
7413 {
7414 op = op1;
7415 break;
7416 }
7417 mask = double_int_and_not (may_be_nonzero1, must_be_nonzero0);
7418 if (double_int_zero_p (mask))
7419 {
7420 op = op0;
7421 break;
7422 }
7423 break;
7424 default:
7425 gcc_unreachable ();
7426 }
7427
7428 if (op == NULL_TREE)
7429 return false;
7430
7431 gimple_assign_set_rhs_with_ops (gsi, TREE_CODE (op), op, NULL);
7432 update_stmt (gsi_stmt (*gsi));
7433 return true;
7434 }
7435
7436 /* We are comparing trees OP0 and OP1 using COND_CODE. OP0 has
7437 a known value range VR.
7438
7439 If there is one and only one value which will satisfy the
7440 conditional, then return that value. Else return NULL. */
7441
7442 static tree
7443 test_for_singularity (enum tree_code cond_code, tree op0,
7444 tree op1, value_range_t *vr)
7445 {
7446 tree min = NULL;
7447 tree max = NULL;
7448
7449 /* Extract minimum/maximum values which satisfy the
7450 the conditional as it was written. */
7451 if (cond_code == LE_EXPR || cond_code == LT_EXPR)
7452 {
7453 /* This should not be negative infinity; there is no overflow
7454 here. */
7455 min = TYPE_MIN_VALUE (TREE_TYPE (op0));
7456
7457 max = op1;
7458 if (cond_code == LT_EXPR && !is_overflow_infinity (max))
7459 {
7460 tree one = build_int_cst (TREE_TYPE (op0), 1);
7461 max = fold_build2 (MINUS_EXPR, TREE_TYPE (op0), max, one);
7462 if (EXPR_P (max))
7463 TREE_NO_WARNING (max) = 1;
7464 }
7465 }
7466 else if (cond_code == GE_EXPR || cond_code == GT_EXPR)
7467 {
7468 /* This should not be positive infinity; there is no overflow
7469 here. */
7470 max = TYPE_MAX_VALUE (TREE_TYPE (op0));
7471
7472 min = op1;
7473 if (cond_code == GT_EXPR && !is_overflow_infinity (min))
7474 {
7475 tree one = build_int_cst (TREE_TYPE (op0), 1);
7476 min = fold_build2 (PLUS_EXPR, TREE_TYPE (op0), min, one);
7477 if (EXPR_P (min))
7478 TREE_NO_WARNING (min) = 1;
7479 }
7480 }
7481
7482 /* Now refine the minimum and maximum values using any
7483 value range information we have for op0. */
7484 if (min && max)
7485 {
7486 if (compare_values (vr->min, min) == 1)
7487 min = vr->min;
7488 if (compare_values (vr->max, max) == -1)
7489 max = vr->max;
7490
7491 /* If the new min/max values have converged to a single value,
7492 then there is only one value which can satisfy the condition,
7493 return that value. */
7494 if (operand_equal_p (min, max, 0) && is_gimple_min_invariant (min))
7495 return min;
7496 }
7497 return NULL;
7498 }
7499
7500 /* Simplify a conditional using a relational operator to an equality
7501 test if the range information indicates only one value can satisfy
7502 the original conditional. */
7503
7504 static bool
7505 simplify_cond_using_ranges (gimple stmt)
7506 {
7507 tree op0 = gimple_cond_lhs (stmt);
7508 tree op1 = gimple_cond_rhs (stmt);
7509 enum tree_code cond_code = gimple_cond_code (stmt);
7510
7511 if (cond_code != NE_EXPR
7512 && cond_code != EQ_EXPR
7513 && TREE_CODE (op0) == SSA_NAME
7514 && INTEGRAL_TYPE_P (TREE_TYPE (op0))
7515 && is_gimple_min_invariant (op1))
7516 {
7517 value_range_t *vr = get_value_range (op0);
7518
7519 /* If we have range information for OP0, then we might be
7520 able to simplify this conditional. */
7521 if (vr->type == VR_RANGE)
7522 {
7523 tree new_tree = test_for_singularity (cond_code, op0, op1, vr);
7524
7525 if (new_tree)
7526 {
7527 if (dump_file)
7528 {
7529 fprintf (dump_file, "Simplified relational ");
7530 print_gimple_stmt (dump_file, stmt, 0, 0);
7531 fprintf (dump_file, " into ");
7532 }
7533
7534 gimple_cond_set_code (stmt, EQ_EXPR);
7535 gimple_cond_set_lhs (stmt, op0);
7536 gimple_cond_set_rhs (stmt, new_tree);
7537
7538 update_stmt (stmt);
7539
7540 if (dump_file)
7541 {
7542 print_gimple_stmt (dump_file, stmt, 0, 0);
7543 fprintf (dump_file, "\n");
7544 }
7545
7546 return true;
7547 }
7548
7549 /* Try again after inverting the condition. We only deal
7550 with integral types here, so no need to worry about
7551 issues with inverting FP comparisons. */
7552 cond_code = invert_tree_comparison (cond_code, false);
7553 new_tree = test_for_singularity (cond_code, op0, op1, vr);
7554
7555 if (new_tree)
7556 {
7557 if (dump_file)
7558 {
7559 fprintf (dump_file, "Simplified relational ");
7560 print_gimple_stmt (dump_file, stmt, 0, 0);
7561 fprintf (dump_file, " into ");
7562 }
7563
7564 gimple_cond_set_code (stmt, NE_EXPR);
7565 gimple_cond_set_lhs (stmt, op0);
7566 gimple_cond_set_rhs (stmt, new_tree);
7567
7568 update_stmt (stmt);
7569
7570 if (dump_file)
7571 {
7572 print_gimple_stmt (dump_file, stmt, 0, 0);
7573 fprintf (dump_file, "\n");
7574 }
7575
7576 return true;
7577 }
7578 }
7579 }
7580
7581 return false;
7582 }
7583
7584 /* Simplify a switch statement using the value range of the switch
7585 argument. */
7586
7587 static bool
7588 simplify_switch_using_ranges (gimple stmt)
7589 {
7590 tree op = gimple_switch_index (stmt);
7591 value_range_t *vr;
7592 bool take_default;
7593 edge e;
7594 edge_iterator ei;
7595 size_t i = 0, j = 0, n, n2;
7596 tree vec2;
7597 switch_update su;
7598
7599 if (TREE_CODE (op) == SSA_NAME)
7600 {
7601 vr = get_value_range (op);
7602
7603 /* We can only handle integer ranges. */
7604 if (vr->type != VR_RANGE
7605 || symbolic_range_p (vr))
7606 return false;
7607
7608 /* Find case label for min/max of the value range. */
7609 take_default = !find_case_label_range (stmt, vr->min, vr->max, &i, &j);
7610 }
7611 else if (TREE_CODE (op) == INTEGER_CST)
7612 {
7613 take_default = !find_case_label_index (stmt, 1, op, &i);
7614 if (take_default)
7615 {
7616 i = 1;
7617 j = 0;
7618 }
7619 else
7620 {
7621 j = i;
7622 }
7623 }
7624 else
7625 return false;
7626
7627 n = gimple_switch_num_labels (stmt);
7628
7629 /* Bail out if this is just all edges taken. */
7630 if (i == 1
7631 && j == n - 1
7632 && take_default)
7633 return false;
7634
7635 /* Build a new vector of taken case labels. */
7636 vec2 = make_tree_vec (j - i + 1 + (int)take_default);
7637 n2 = 0;
7638
7639 /* Add the default edge, if necessary. */
7640 if (take_default)
7641 TREE_VEC_ELT (vec2, n2++) = gimple_switch_default_label (stmt);
7642
7643 for (; i <= j; ++i, ++n2)
7644 TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, i);
7645
7646 /* Mark needed edges. */
7647 for (i = 0; i < n2; ++i)
7648 {
7649 e = find_edge (gimple_bb (stmt),
7650 label_to_block (CASE_LABEL (TREE_VEC_ELT (vec2, i))));
7651 e->aux = (void *)-1;
7652 }
7653
7654 /* Queue not needed edges for later removal. */
7655 FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
7656 {
7657 if (e->aux == (void *)-1)
7658 {
7659 e->aux = NULL;
7660 continue;
7661 }
7662
7663 if (dump_file && (dump_flags & TDF_DETAILS))
7664 {
7665 fprintf (dump_file, "removing unreachable case label\n");
7666 }
7667 VEC_safe_push (edge, heap, to_remove_edges, e);
7668 e->flags &= ~EDGE_EXECUTABLE;
7669 }
7670
7671 /* And queue an update for the stmt. */
7672 su.stmt = stmt;
7673 su.vec = vec2;
7674 VEC_safe_push (switch_update, heap, to_update_switch_stmts, &su);
7675 return false;
7676 }
7677
7678 /* Simplify an integral conversion from an SSA name in STMT. */
7679
7680 static bool
7681 simplify_conversion_using_ranges (gimple stmt)
7682 {
7683 tree innerop, middleop, finaltype;
7684 gimple def_stmt;
7685 value_range_t *innervr;
7686 bool inner_unsigned_p, middle_unsigned_p, final_unsigned_p;
7687 unsigned inner_prec, middle_prec, final_prec;
7688 double_int innermin, innermed, innermax, middlemin, middlemed, middlemax;
7689
7690 finaltype = TREE_TYPE (gimple_assign_lhs (stmt));
7691 if (!INTEGRAL_TYPE_P (finaltype))
7692 return false;
7693 middleop = gimple_assign_rhs1 (stmt);
7694 def_stmt = SSA_NAME_DEF_STMT (middleop);
7695 if (!is_gimple_assign (def_stmt)
7696 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)))
7697 return false;
7698 innerop = gimple_assign_rhs1 (def_stmt);
7699 if (TREE_CODE (innerop) != SSA_NAME)
7700 return false;
7701
7702 /* Get the value-range of the inner operand. */
7703 innervr = get_value_range (innerop);
7704 if (innervr->type != VR_RANGE
7705 || TREE_CODE (innervr->min) != INTEGER_CST
7706 || TREE_CODE (innervr->max) != INTEGER_CST)
7707 return false;
7708
7709 /* Simulate the conversion chain to check if the result is equal if
7710 the middle conversion is removed. */
7711 innermin = tree_to_double_int (innervr->min);
7712 innermax = tree_to_double_int (innervr->max);
7713
7714 inner_prec = TYPE_PRECISION (TREE_TYPE (innerop));
7715 middle_prec = TYPE_PRECISION (TREE_TYPE (middleop));
7716 final_prec = TYPE_PRECISION (finaltype);
7717
7718 /* If the first conversion is not injective, the second must not
7719 be widening. */
7720 if (double_int_cmp (double_int_sub (innermax, innermin),
7721 double_int_mask (middle_prec), true) > 0
7722 && middle_prec < final_prec)
7723 return false;
7724 /* We also want a medium value so that we can track the effect that
7725 narrowing conversions with sign change have. */
7726 inner_unsigned_p = TYPE_UNSIGNED (TREE_TYPE (innerop));
7727 if (inner_unsigned_p)
7728 innermed = double_int_rshift (double_int_mask (inner_prec),
7729 1, inner_prec, false);
7730 else
7731 innermed = double_int_zero;
7732 if (double_int_cmp (innermin, innermed, inner_unsigned_p) >= 0
7733 || double_int_cmp (innermed, innermax, inner_unsigned_p) >= 0)
7734 innermed = innermin;
7735
7736 middle_unsigned_p = TYPE_UNSIGNED (TREE_TYPE (middleop));
7737 middlemin = double_int_ext (innermin, middle_prec, middle_unsigned_p);
7738 middlemed = double_int_ext (innermed, middle_prec, middle_unsigned_p);
7739 middlemax = double_int_ext (innermax, middle_prec, middle_unsigned_p);
7740
7741 /* Require that the final conversion applied to both the original
7742 and the intermediate range produces the same result. */
7743 final_unsigned_p = TYPE_UNSIGNED (finaltype);
7744 if (!double_int_equal_p (double_int_ext (middlemin,
7745 final_prec, final_unsigned_p),
7746 double_int_ext (innermin,
7747 final_prec, final_unsigned_p))
7748 || !double_int_equal_p (double_int_ext (middlemed,
7749 final_prec, final_unsigned_p),
7750 double_int_ext (innermed,
7751 final_prec, final_unsigned_p))
7752 || !double_int_equal_p (double_int_ext (middlemax,
7753 final_prec, final_unsigned_p),
7754 double_int_ext (innermax,
7755 final_prec, final_unsigned_p)))
7756 return false;
7757
7758 gimple_assign_set_rhs1 (stmt, innerop);
7759 update_stmt (stmt);
7760 return true;
7761 }
7762
7763 /* Return whether the value range *VR fits in an integer type specified
7764 by PRECISION and UNSIGNED_P. */
7765
7766 static bool
7767 range_fits_type_p (value_range_t *vr, unsigned precision, bool unsigned_p)
7768 {
7769 tree src_type;
7770 unsigned src_precision;
7771 double_int tem;
7772
7773 /* We can only handle integral and pointer types. */
7774 src_type = TREE_TYPE (vr->min);
7775 if (!INTEGRAL_TYPE_P (src_type)
7776 && !POINTER_TYPE_P (src_type))
7777 return false;
7778
7779 /* An extension is always fine, so is an identity transform. */
7780 src_precision = TYPE_PRECISION (TREE_TYPE (vr->min));
7781 if (src_precision < precision
7782 || (src_precision == precision
7783 && TYPE_UNSIGNED (src_type) == unsigned_p))
7784 return true;
7785
7786 /* Now we can only handle ranges with constant bounds. */
7787 if (vr->type != VR_RANGE
7788 || TREE_CODE (vr->min) != INTEGER_CST
7789 || TREE_CODE (vr->max) != INTEGER_CST)
7790 return false;
7791
7792 /* For precision-preserving sign-changes the MSB of the double-int
7793 has to be clear. */
7794 if (src_precision == precision
7795 && (TREE_INT_CST_HIGH (vr->min) | TREE_INT_CST_HIGH (vr->max)) < 0)
7796 return false;
7797
7798 /* Then we can perform the conversion on both ends and compare
7799 the result for equality. */
7800 tem = double_int_ext (tree_to_double_int (vr->min), precision, unsigned_p);
7801 if (!double_int_equal_p (tree_to_double_int (vr->min), tem))
7802 return false;
7803 tem = double_int_ext (tree_to_double_int (vr->max), precision, unsigned_p);
7804 if (!double_int_equal_p (tree_to_double_int (vr->max), tem))
7805 return false;
7806
7807 return true;
7808 }
7809
7810 /* Simplify a conversion from integral SSA name to float in STMT. */
7811
7812 static bool
7813 simplify_float_conversion_using_ranges (gimple_stmt_iterator *gsi, gimple stmt)
7814 {
7815 tree rhs1 = gimple_assign_rhs1 (stmt);
7816 value_range_t *vr = get_value_range (rhs1);
7817 enum machine_mode fltmode = TYPE_MODE (TREE_TYPE (gimple_assign_lhs (stmt)));
7818 enum machine_mode mode;
7819 tree tem;
7820 gimple conv;
7821
7822 /* We can only handle constant ranges. */
7823 if (vr->type != VR_RANGE
7824 || TREE_CODE (vr->min) != INTEGER_CST
7825 || TREE_CODE (vr->max) != INTEGER_CST)
7826 return false;
7827
7828 /* First check if we can use a signed type in place of an unsigned. */
7829 if (TYPE_UNSIGNED (TREE_TYPE (rhs1))
7830 && (can_float_p (fltmode, TYPE_MODE (TREE_TYPE (rhs1)), 0)
7831 != CODE_FOR_nothing)
7832 && range_fits_type_p (vr, GET_MODE_PRECISION
7833 (TYPE_MODE (TREE_TYPE (rhs1))), 0))
7834 mode = TYPE_MODE (TREE_TYPE (rhs1));
7835 /* If we can do the conversion in the current input mode do nothing. */
7836 else if (can_float_p (fltmode, TYPE_MODE (TREE_TYPE (rhs1)),
7837 TYPE_UNSIGNED (TREE_TYPE (rhs1))))
7838 return false;
7839 /* Otherwise search for a mode we can use, starting from the narrowest
7840 integer mode available. */
7841 else
7842 {
7843 mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
7844 do
7845 {
7846 /* If we cannot do a signed conversion to float from mode
7847 or if the value-range does not fit in the signed type
7848 try with a wider mode. */
7849 if (can_float_p (fltmode, mode, 0) != CODE_FOR_nothing
7850 && range_fits_type_p (vr, GET_MODE_PRECISION (mode), 0))
7851 break;
7852
7853 mode = GET_MODE_WIDER_MODE (mode);
7854 /* But do not widen the input. Instead leave that to the
7855 optabs expansion code. */
7856 if (GET_MODE_PRECISION (mode) > TYPE_PRECISION (TREE_TYPE (rhs1)))
7857 return false;
7858 }
7859 while (mode != VOIDmode);
7860 if (mode == VOIDmode)
7861 return false;
7862 }
7863
7864 /* It works, insert a truncation or sign-change before the
7865 float conversion. */
7866 tem = create_tmp_var (build_nonstandard_integer_type
7867 (GET_MODE_PRECISION (mode), 0), NULL);
7868 conv = gimple_build_assign_with_ops (NOP_EXPR, tem, rhs1, NULL_TREE);
7869 tem = make_ssa_name (tem, conv);
7870 gimple_assign_set_lhs (conv, tem);
7871 gsi_insert_before (gsi, conv, GSI_SAME_STMT);
7872 gimple_assign_set_rhs1 (stmt, tem);
7873 update_stmt (stmt);
7874
7875 return true;
7876 }
7877
7878 /* Simplify STMT using ranges if possible. */
7879
7880 static bool
7881 simplify_stmt_using_ranges (gimple_stmt_iterator *gsi)
7882 {
7883 gimple stmt = gsi_stmt (*gsi);
7884 if (is_gimple_assign (stmt))
7885 {
7886 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
7887 tree rhs1 = gimple_assign_rhs1 (stmt);
7888
7889 switch (rhs_code)
7890 {
7891 case EQ_EXPR:
7892 case NE_EXPR:
7893 /* Transform EQ_EXPR, NE_EXPR into BIT_XOR_EXPR or identity
7894 if the RHS is zero or one, and the LHS are known to be boolean
7895 values. */
7896 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
7897 return simplify_truth_ops_using_ranges (gsi, stmt);
7898 break;
7899
7900 /* Transform TRUNC_DIV_EXPR and TRUNC_MOD_EXPR into RSHIFT_EXPR
7901 and BIT_AND_EXPR respectively if the first operand is greater
7902 than zero and the second operand is an exact power of two. */
7903 case TRUNC_DIV_EXPR:
7904 case TRUNC_MOD_EXPR:
7905 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
7906 && integer_pow2p (gimple_assign_rhs2 (stmt)))
7907 return simplify_div_or_mod_using_ranges (stmt);
7908 break;
7909
7910 /* Transform ABS (X) into X or -X as appropriate. */
7911 case ABS_EXPR:
7912 if (TREE_CODE (rhs1) == SSA_NAME
7913 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
7914 return simplify_abs_using_ranges (stmt);
7915 break;
7916
7917 case BIT_AND_EXPR:
7918 case BIT_IOR_EXPR:
7919 /* Optimize away BIT_AND_EXPR and BIT_IOR_EXPR
7920 if all the bits being cleared are already cleared or
7921 all the bits being set are already set. */
7922 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
7923 return simplify_bit_ops_using_ranges (gsi, stmt);
7924 break;
7925
7926 CASE_CONVERT:
7927 if (TREE_CODE (rhs1) == SSA_NAME
7928 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
7929 return simplify_conversion_using_ranges (stmt);
7930 break;
7931
7932 case FLOAT_EXPR:
7933 if (TREE_CODE (rhs1) == SSA_NAME
7934 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
7935 return simplify_float_conversion_using_ranges (gsi, stmt);
7936 break;
7937
7938 default:
7939 break;
7940 }
7941 }
7942 else if (gimple_code (stmt) == GIMPLE_COND)
7943 return simplify_cond_using_ranges (stmt);
7944 else if (gimple_code (stmt) == GIMPLE_SWITCH)
7945 return simplify_switch_using_ranges (stmt);
7946
7947 return false;
7948 }
7949
7950 /* If the statement pointed by SI has a predicate whose value can be
7951 computed using the value range information computed by VRP, compute
7952 its value and return true. Otherwise, return false. */
7953
7954 static bool
7955 fold_predicate_in (gimple_stmt_iterator *si)
7956 {
7957 bool assignment_p = false;
7958 tree val;
7959 gimple stmt = gsi_stmt (*si);
7960
7961 if (is_gimple_assign (stmt)
7962 && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison)
7963 {
7964 assignment_p = true;
7965 val = vrp_evaluate_conditional (gimple_assign_rhs_code (stmt),
7966 gimple_assign_rhs1 (stmt),
7967 gimple_assign_rhs2 (stmt),
7968 stmt);
7969 }
7970 else if (gimple_code (stmt) == GIMPLE_COND)
7971 val = vrp_evaluate_conditional (gimple_cond_code (stmt),
7972 gimple_cond_lhs (stmt),
7973 gimple_cond_rhs (stmt),
7974 stmt);
7975 else
7976 return false;
7977
7978 if (val)
7979 {
7980 if (assignment_p)
7981 val = fold_convert (gimple_expr_type (stmt), val);
7982
7983 if (dump_file)
7984 {
7985 fprintf (dump_file, "Folding predicate ");
7986 print_gimple_expr (dump_file, stmt, 0, 0);
7987 fprintf (dump_file, " to ");
7988 print_generic_expr (dump_file, val, 0);
7989 fprintf (dump_file, "\n");
7990 }
7991
7992 if (is_gimple_assign (stmt))
7993 gimple_assign_set_rhs_from_tree (si, val);
7994 else
7995 {
7996 gcc_assert (gimple_code (stmt) == GIMPLE_COND);
7997 if (integer_zerop (val))
7998 gimple_cond_make_false (stmt);
7999 else if (integer_onep (val))
8000 gimple_cond_make_true (stmt);
8001 else
8002 gcc_unreachable ();
8003 }
8004
8005 return true;
8006 }
8007
8008 return false;
8009 }
8010
8011 /* Callback for substitute_and_fold folding the stmt at *SI. */
8012
8013 static bool
8014 vrp_fold_stmt (gimple_stmt_iterator *si)
8015 {
8016 if (fold_predicate_in (si))
8017 return true;
8018
8019 return simplify_stmt_using_ranges (si);
8020 }
8021
8022 /* Stack of dest,src equivalency pairs that need to be restored after
8023 each attempt to thread a block's incoming edge to an outgoing edge.
8024
8025 A NULL entry is used to mark the end of pairs which need to be
8026 restored. */
8027 static VEC(tree,heap) *stack;
8028
8029 /* A trivial wrapper so that we can present the generic jump threading
8030 code with a simple API for simplifying statements. STMT is the
8031 statement we want to simplify, WITHIN_STMT provides the location
8032 for any overflow warnings. */
8033
8034 static tree
8035 simplify_stmt_for_jump_threading (gimple stmt, gimple within_stmt)
8036 {
8037 /* We only use VRP information to simplify conditionals. This is
8038 overly conservative, but it's unclear if doing more would be
8039 worth the compile time cost. */
8040 if (gimple_code (stmt) != GIMPLE_COND)
8041 return NULL;
8042
8043 return vrp_evaluate_conditional (gimple_cond_code (stmt),
8044 gimple_cond_lhs (stmt),
8045 gimple_cond_rhs (stmt), within_stmt);
8046 }
8047
8048 /* Blocks which have more than one predecessor and more than
8049 one successor present jump threading opportunities, i.e.,
8050 when the block is reached from a specific predecessor, we
8051 may be able to determine which of the outgoing edges will
8052 be traversed. When this optimization applies, we are able
8053 to avoid conditionals at runtime and we may expose secondary
8054 optimization opportunities.
8055
8056 This routine is effectively a driver for the generic jump
8057 threading code. It basically just presents the generic code
8058 with edges that may be suitable for jump threading.
8059
8060 Unlike DOM, we do not iterate VRP if jump threading was successful.
8061 While iterating may expose new opportunities for VRP, it is expected
8062 those opportunities would be very limited and the compile time cost
8063 to expose those opportunities would be significant.
8064
8065 As jump threading opportunities are discovered, they are registered
8066 for later realization. */
8067
8068 static void
8069 identify_jump_threads (void)
8070 {
8071 basic_block bb;
8072 gimple dummy;
8073 int i;
8074 edge e;
8075
8076 /* Ugh. When substituting values earlier in this pass we can
8077 wipe the dominance information. So rebuild the dominator
8078 information as we need it within the jump threading code. */
8079 calculate_dominance_info (CDI_DOMINATORS);
8080
8081 /* We do not allow VRP information to be used for jump threading
8082 across a back edge in the CFG. Otherwise it becomes too
8083 difficult to avoid eliminating loop exit tests. Of course
8084 EDGE_DFS_BACK is not accurate at this time so we have to
8085 recompute it. */
8086 mark_dfs_back_edges ();
8087
8088 /* Do not thread across edges we are about to remove. Just marking
8089 them as EDGE_DFS_BACK will do. */
8090 FOR_EACH_VEC_ELT (edge, to_remove_edges, i, e)
8091 e->flags |= EDGE_DFS_BACK;
8092
8093 /* Allocate our unwinder stack to unwind any temporary equivalences
8094 that might be recorded. */
8095 stack = VEC_alloc (tree, heap, 20);
8096
8097 /* To avoid lots of silly node creation, we create a single
8098 conditional and just modify it in-place when attempting to
8099 thread jumps. */
8100 dummy = gimple_build_cond (EQ_EXPR,
8101 integer_zero_node, integer_zero_node,
8102 NULL, NULL);
8103
8104 /* Walk through all the blocks finding those which present a
8105 potential jump threading opportunity. We could set this up
8106 as a dominator walker and record data during the walk, but
8107 I doubt it's worth the effort for the classes of jump
8108 threading opportunities we are trying to identify at this
8109 point in compilation. */
8110 FOR_EACH_BB (bb)
8111 {
8112 gimple last;
8113
8114 /* If the generic jump threading code does not find this block
8115 interesting, then there is nothing to do. */
8116 if (! potentially_threadable_block (bb))
8117 continue;
8118
8119 /* We only care about blocks ending in a COND_EXPR. While there
8120 may be some value in handling SWITCH_EXPR here, I doubt it's
8121 terribly important. */
8122 last = gsi_stmt (gsi_last_bb (bb));
8123
8124 /* We're basically looking for a switch or any kind of conditional with
8125 integral or pointer type arguments. Note the type of the second
8126 argument will be the same as the first argument, so no need to
8127 check it explicitly. */
8128 if (gimple_code (last) == GIMPLE_SWITCH
8129 || (gimple_code (last) == GIMPLE_COND
8130 && TREE_CODE (gimple_cond_lhs (last)) == SSA_NAME
8131 && (INTEGRAL_TYPE_P (TREE_TYPE (gimple_cond_lhs (last)))
8132 || POINTER_TYPE_P (TREE_TYPE (gimple_cond_lhs (last))))
8133 && (TREE_CODE (gimple_cond_rhs (last)) == SSA_NAME
8134 || is_gimple_min_invariant (gimple_cond_rhs (last)))))
8135 {
8136 edge_iterator ei;
8137
8138 /* We've got a block with multiple predecessors and multiple
8139 successors which also ends in a suitable conditional or
8140 switch statement. For each predecessor, see if we can thread
8141 it to a specific successor. */
8142 FOR_EACH_EDGE (e, ei, bb->preds)
8143 {
8144 /* Do not thread across back edges or abnormal edges
8145 in the CFG. */
8146 if (e->flags & (EDGE_DFS_BACK | EDGE_COMPLEX))
8147 continue;
8148
8149 thread_across_edge (dummy, e, true, &stack,
8150 simplify_stmt_for_jump_threading);
8151 }
8152 }
8153 }
8154
8155 /* We do not actually update the CFG or SSA graphs at this point as
8156 ASSERT_EXPRs are still in the IL and cfg cleanup code does not yet
8157 handle ASSERT_EXPRs gracefully. */
8158 }
8159
8160 /* We identified all the jump threading opportunities earlier, but could
8161 not transform the CFG at that time. This routine transforms the
8162 CFG and arranges for the dominator tree to be rebuilt if necessary.
8163
8164 Note the SSA graph update will occur during the normal TODO
8165 processing by the pass manager. */
8166 static void
8167 finalize_jump_threads (void)
8168 {
8169 thread_through_all_blocks (false);
8170 VEC_free (tree, heap, stack);
8171 }
8172
8173
8174 /* Traverse all the blocks folding conditionals with known ranges. */
8175
8176 static void
8177 vrp_finalize (void)
8178 {
8179 size_t i;
8180
8181 values_propagated = true;
8182
8183 if (dump_file)
8184 {
8185 fprintf (dump_file, "\nValue ranges after VRP:\n\n");
8186 dump_all_value_ranges (dump_file);
8187 fprintf (dump_file, "\n");
8188 }
8189
8190 substitute_and_fold (op_with_constant_singleton_value_range,
8191 vrp_fold_stmt, false);
8192
8193 if (warn_array_bounds)
8194 check_all_array_refs ();
8195
8196 /* We must identify jump threading opportunities before we release
8197 the datastructures built by VRP. */
8198 identify_jump_threads ();
8199
8200 /* Free allocated memory. */
8201 for (i = 0; i < num_vr_values; i++)
8202 if (vr_value[i])
8203 {
8204 BITMAP_FREE (vr_value[i]->equiv);
8205 free (vr_value[i]);
8206 }
8207
8208 free (vr_value);
8209 free (vr_phi_edge_counts);
8210
8211 /* So that we can distinguish between VRP data being available
8212 and not available. */
8213 vr_value = NULL;
8214 vr_phi_edge_counts = NULL;
8215 }
8216
8217
8218 /* Main entry point to VRP (Value Range Propagation). This pass is
8219 loosely based on J. R. C. Patterson, ``Accurate Static Branch
8220 Prediction by Value Range Propagation,'' in SIGPLAN Conference on
8221 Programming Language Design and Implementation, pp. 67-78, 1995.
8222 Also available at http://citeseer.ist.psu.edu/patterson95accurate.html
8223
8224 This is essentially an SSA-CCP pass modified to deal with ranges
8225 instead of constants.
8226
8227 While propagating ranges, we may find that two or more SSA name
8228 have equivalent, though distinct ranges. For instance,
8229
8230 1 x_9 = p_3->a;
8231 2 p_4 = ASSERT_EXPR <p_3, p_3 != 0>
8232 3 if (p_4 == q_2)
8233 4 p_5 = ASSERT_EXPR <p_4, p_4 == q_2>;
8234 5 endif
8235 6 if (q_2)
8236
8237 In the code above, pointer p_5 has range [q_2, q_2], but from the
8238 code we can also determine that p_5 cannot be NULL and, if q_2 had
8239 a non-varying range, p_5's range should also be compatible with it.
8240
8241 These equivalences are created by two expressions: ASSERT_EXPR and
8242 copy operations. Since p_5 is an assertion on p_4, and p_4 was the
8243 result of another assertion, then we can use the fact that p_5 and
8244 p_4 are equivalent when evaluating p_5's range.
8245
8246 Together with value ranges, we also propagate these equivalences
8247 between names so that we can take advantage of information from
8248 multiple ranges when doing final replacement. Note that this
8249 equivalency relation is transitive but not symmetric.
8250
8251 In the example above, p_5 is equivalent to p_4, q_2 and p_3, but we
8252 cannot assert that q_2 is equivalent to p_5 because q_2 may be used
8253 in contexts where that assertion does not hold (e.g., in line 6).
8254
8255 TODO, the main difference between this pass and Patterson's is that
8256 we do not propagate edge probabilities. We only compute whether
8257 edges can be taken or not. That is, instead of having a spectrum
8258 of jump probabilities between 0 and 1, we only deal with 0, 1 and
8259 DON'T KNOW. In the future, it may be worthwhile to propagate
8260 probabilities to aid branch prediction. */
8261
8262 static unsigned int
8263 execute_vrp (void)
8264 {
8265 int i;
8266 edge e;
8267 switch_update *su;
8268
8269 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
8270 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
8271 scev_initialize ();
8272
8273 insert_range_assertions ();
8274
8275 /* Estimate number of iterations - but do not use undefined behavior
8276 for this. We can't do this lazily as other functions may compute
8277 this using undefined behavior. */
8278 free_numbers_of_iterations_estimates ();
8279 estimate_numbers_of_iterations (false);
8280
8281 to_remove_edges = VEC_alloc (edge, heap, 10);
8282 to_update_switch_stmts = VEC_alloc (switch_update, heap, 5);
8283 threadedge_initialize_values ();
8284
8285 vrp_initialize ();
8286 ssa_propagate (vrp_visit_stmt, vrp_visit_phi_node);
8287 vrp_finalize ();
8288
8289 free_numbers_of_iterations_estimates ();
8290
8291 /* ASSERT_EXPRs must be removed before finalizing jump threads
8292 as finalizing jump threads calls the CFG cleanup code which
8293 does not properly handle ASSERT_EXPRs. */
8294 remove_range_assertions ();
8295
8296 /* If we exposed any new variables, go ahead and put them into
8297 SSA form now, before we handle jump threading. This simplifies
8298 interactions between rewriting of _DECL nodes into SSA form
8299 and rewriting SSA_NAME nodes into SSA form after block
8300 duplication and CFG manipulation. */
8301 update_ssa (TODO_update_ssa);
8302
8303 finalize_jump_threads ();
8304
8305 /* Remove dead edges from SWITCH_EXPR optimization. This leaves the
8306 CFG in a broken state and requires a cfg_cleanup run. */
8307 FOR_EACH_VEC_ELT (edge, to_remove_edges, i, e)
8308 remove_edge (e);
8309 /* Update SWITCH_EXPR case label vector. */
8310 FOR_EACH_VEC_ELT (switch_update, to_update_switch_stmts, i, su)
8311 {
8312 size_t j;
8313 size_t n = TREE_VEC_LENGTH (su->vec);
8314 tree label;
8315 gimple_switch_set_num_labels (su->stmt, n);
8316 for (j = 0; j < n; j++)
8317 gimple_switch_set_label (su->stmt, j, TREE_VEC_ELT (su->vec, j));
8318 /* As we may have replaced the default label with a regular one
8319 make sure to make it a real default label again. This ensures
8320 optimal expansion. */
8321 label = gimple_switch_default_label (su->stmt);
8322 CASE_LOW (label) = NULL_TREE;
8323 CASE_HIGH (label) = NULL_TREE;
8324 }
8325
8326 if (VEC_length (edge, to_remove_edges) > 0)
8327 free_dominance_info (CDI_DOMINATORS);
8328
8329 VEC_free (edge, heap, to_remove_edges);
8330 VEC_free (switch_update, heap, to_update_switch_stmts);
8331 threadedge_finalize_values ();
8332
8333 scev_finalize ();
8334 loop_optimizer_finalize ();
8335 return 0;
8336 }
8337
8338 static bool
8339 gate_vrp (void)
8340 {
8341 return flag_tree_vrp != 0;
8342 }
8343
8344 struct gimple_opt_pass pass_vrp =
8345 {
8346 {
8347 GIMPLE_PASS,
8348 "vrp", /* name */
8349 gate_vrp, /* gate */
8350 execute_vrp, /* execute */
8351 NULL, /* sub */
8352 NULL, /* next */
8353 0, /* static_pass_number */
8354 TV_TREE_VRP, /* tv_id */
8355 PROP_ssa, /* properties_required */
8356 0, /* properties_provided */
8357 0, /* properties_destroyed */
8358 0, /* todo_flags_start */
8359 TODO_cleanup_cfg
8360 | TODO_update_ssa
8361 | TODO_verify_ssa
8362 | TODO_verify_flow
8363 | TODO_ggc_collect /* todo_flags_finish */
8364 }
8365 };