re PR tree-optimization/51721 (-Warray-bounds false positives and inconsistencies)
[gcc.git] / gcc / tree-vrp.c
1 /* Support routines for Value Range Propagation (VRP).
2 Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
3 Free Software Foundation, Inc.
4 Contributed by Diego Novillo <dnovillo@redhat.com>.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
12
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "ggc.h"
27 #include "flags.h"
28 #include "tree.h"
29 #include "basic-block.h"
30 #include "tree-flow.h"
31 #include "tree-pass.h"
32 #include "tree-dump.h"
33 #include "timevar.h"
34 #include "tree-pretty-print.h"
35 #include "gimple-pretty-print.h"
36 #include "diagnostic-core.h"
37 #include "intl.h"
38 #include "cfgloop.h"
39 #include "tree-scalar-evolution.h"
40 #include "tree-ssa-propagate.h"
41 #include "tree-chrec.h"
42 #include "gimple-fold.h"
43 #include "expr.h"
44 #include "optabs.h"
45
46
47 /* Type of value ranges. See value_range_d for a description of these
48 types. */
49 enum value_range_type { VR_UNDEFINED, VR_RANGE, VR_ANTI_RANGE, VR_VARYING };
50
51 /* Range of values that can be associated with an SSA_NAME after VRP
52 has executed. */
53 struct value_range_d
54 {
55 /* Lattice value represented by this range. */
56 enum value_range_type type;
57
58 /* Minimum and maximum values represented by this range. These
59 values should be interpreted as follows:
60
61 - If TYPE is VR_UNDEFINED or VR_VARYING then MIN and MAX must
62 be NULL.
63
64 - If TYPE == VR_RANGE then MIN holds the minimum value and
65 MAX holds the maximum value of the range [MIN, MAX].
66
67 - If TYPE == ANTI_RANGE the variable is known to NOT
68 take any values in the range [MIN, MAX]. */
69 tree min;
70 tree max;
71
72 /* Set of SSA names whose value ranges are equivalent to this one.
73 This set is only valid when TYPE is VR_RANGE or VR_ANTI_RANGE. */
74 bitmap equiv;
75 };
76
77 typedef struct value_range_d value_range_t;
78
79 /* Set of SSA names found live during the RPO traversal of the function
80 for still active basic-blocks. */
81 static sbitmap *live;
82
83 /* Return true if the SSA name NAME is live on the edge E. */
84
85 static bool
86 live_on_edge (edge e, tree name)
87 {
88 return (live[e->dest->index]
89 && TEST_BIT (live[e->dest->index], SSA_NAME_VERSION (name)));
90 }
91
92 /* Local functions. */
93 static int compare_values (tree val1, tree val2);
94 static int compare_values_warnv (tree val1, tree val2, bool *);
95 static void vrp_meet (value_range_t *, value_range_t *);
96 static tree vrp_evaluate_conditional_warnv_with_ops (enum tree_code,
97 tree, tree, bool, bool *,
98 bool *);
99
100 /* Location information for ASSERT_EXPRs. Each instance of this
101 structure describes an ASSERT_EXPR for an SSA name. Since a single
102 SSA name may have more than one assertion associated with it, these
103 locations are kept in a linked list attached to the corresponding
104 SSA name. */
105 struct assert_locus_d
106 {
107 /* Basic block where the assertion would be inserted. */
108 basic_block bb;
109
110 /* Some assertions need to be inserted on an edge (e.g., assertions
111 generated by COND_EXPRs). In those cases, BB will be NULL. */
112 edge e;
113
114 /* Pointer to the statement that generated this assertion. */
115 gimple_stmt_iterator si;
116
117 /* Predicate code for the ASSERT_EXPR. Must be COMPARISON_CLASS_P. */
118 enum tree_code comp_code;
119
120 /* Value being compared against. */
121 tree val;
122
123 /* Expression to compare. */
124 tree expr;
125
126 /* Next node in the linked list. */
127 struct assert_locus_d *next;
128 };
129
130 typedef struct assert_locus_d *assert_locus_t;
131
132 /* If bit I is present, it means that SSA name N_i has a list of
133 assertions that should be inserted in the IL. */
134 static bitmap need_assert_for;
135
136 /* Array of locations lists where to insert assertions. ASSERTS_FOR[I]
137 holds a list of ASSERT_LOCUS_T nodes that describe where
138 ASSERT_EXPRs for SSA name N_I should be inserted. */
139 static assert_locus_t *asserts_for;
140
141 /* Value range array. After propagation, VR_VALUE[I] holds the range
142 of values that SSA name N_I may take. */
143 static unsigned num_vr_values;
144 static value_range_t **vr_value;
145 static bool values_propagated;
146
147 /* For a PHI node which sets SSA name N_I, VR_COUNTS[I] holds the
148 number of executable edges we saw the last time we visited the
149 node. */
150 static int *vr_phi_edge_counts;
151
152 typedef struct {
153 gimple stmt;
154 tree vec;
155 } switch_update;
156
157 static VEC (edge, heap) *to_remove_edges;
158 DEF_VEC_O(switch_update);
159 DEF_VEC_ALLOC_O(switch_update, heap);
160 static VEC (switch_update, heap) *to_update_switch_stmts;
161
162
163 /* Return the maximum value for TYPE. */
164
165 static inline tree
166 vrp_val_max (const_tree type)
167 {
168 if (!INTEGRAL_TYPE_P (type))
169 return NULL_TREE;
170
171 return TYPE_MAX_VALUE (type);
172 }
173
174 /* Return the minimum value for TYPE. */
175
176 static inline tree
177 vrp_val_min (const_tree type)
178 {
179 if (!INTEGRAL_TYPE_P (type))
180 return NULL_TREE;
181
182 return TYPE_MIN_VALUE (type);
183 }
184
185 /* Return whether VAL is equal to the maximum value of its type. This
186 will be true for a positive overflow infinity. We can't do a
187 simple equality comparison with TYPE_MAX_VALUE because C typedefs
188 and Ada subtypes can produce types whose TYPE_MAX_VALUE is not ==
189 to the integer constant with the same value in the type. */
190
191 static inline bool
192 vrp_val_is_max (const_tree val)
193 {
194 tree type_max = vrp_val_max (TREE_TYPE (val));
195 return (val == type_max
196 || (type_max != NULL_TREE
197 && operand_equal_p (val, type_max, 0)));
198 }
199
200 /* Return whether VAL is equal to the minimum value of its type. This
201 will be true for a negative overflow infinity. */
202
203 static inline bool
204 vrp_val_is_min (const_tree val)
205 {
206 tree type_min = vrp_val_min (TREE_TYPE (val));
207 return (val == type_min
208 || (type_min != NULL_TREE
209 && operand_equal_p (val, type_min, 0)));
210 }
211
212
213 /* Return whether TYPE should use an overflow infinity distinct from
214 TYPE_{MIN,MAX}_VALUE. We use an overflow infinity value to
215 represent a signed overflow during VRP computations. An infinity
216 is distinct from a half-range, which will go from some number to
217 TYPE_{MIN,MAX}_VALUE. */
218
219 static inline bool
220 needs_overflow_infinity (const_tree type)
221 {
222 return INTEGRAL_TYPE_P (type) && !TYPE_OVERFLOW_WRAPS (type);
223 }
224
225 /* Return whether TYPE can support our overflow infinity
226 representation: we use the TREE_OVERFLOW flag, which only exists
227 for constants. If TYPE doesn't support this, we don't optimize
228 cases which would require signed overflow--we drop them to
229 VARYING. */
230
231 static inline bool
232 supports_overflow_infinity (const_tree type)
233 {
234 tree min = vrp_val_min (type), max = vrp_val_max (type);
235 #ifdef ENABLE_CHECKING
236 gcc_assert (needs_overflow_infinity (type));
237 #endif
238 return (min != NULL_TREE
239 && CONSTANT_CLASS_P (min)
240 && max != NULL_TREE
241 && CONSTANT_CLASS_P (max));
242 }
243
244 /* VAL is the maximum or minimum value of a type. Return a
245 corresponding overflow infinity. */
246
247 static inline tree
248 make_overflow_infinity (tree val)
249 {
250 gcc_checking_assert (val != NULL_TREE && CONSTANT_CLASS_P (val));
251 val = copy_node (val);
252 TREE_OVERFLOW (val) = 1;
253 return val;
254 }
255
256 /* Return a negative overflow infinity for TYPE. */
257
258 static inline tree
259 negative_overflow_infinity (tree type)
260 {
261 gcc_checking_assert (supports_overflow_infinity (type));
262 return make_overflow_infinity (vrp_val_min (type));
263 }
264
265 /* Return a positive overflow infinity for TYPE. */
266
267 static inline tree
268 positive_overflow_infinity (tree type)
269 {
270 gcc_checking_assert (supports_overflow_infinity (type));
271 return make_overflow_infinity (vrp_val_max (type));
272 }
273
274 /* Return whether VAL is a negative overflow infinity. */
275
276 static inline bool
277 is_negative_overflow_infinity (const_tree val)
278 {
279 return (needs_overflow_infinity (TREE_TYPE (val))
280 && CONSTANT_CLASS_P (val)
281 && TREE_OVERFLOW (val)
282 && vrp_val_is_min (val));
283 }
284
285 /* Return whether VAL is a positive overflow infinity. */
286
287 static inline bool
288 is_positive_overflow_infinity (const_tree val)
289 {
290 return (needs_overflow_infinity (TREE_TYPE (val))
291 && CONSTANT_CLASS_P (val)
292 && TREE_OVERFLOW (val)
293 && vrp_val_is_max (val));
294 }
295
296 /* Return whether VAL is a positive or negative overflow infinity. */
297
298 static inline bool
299 is_overflow_infinity (const_tree val)
300 {
301 return (needs_overflow_infinity (TREE_TYPE (val))
302 && CONSTANT_CLASS_P (val)
303 && TREE_OVERFLOW (val)
304 && (vrp_val_is_min (val) || vrp_val_is_max (val)));
305 }
306
307 /* Return whether STMT has a constant rhs that is_overflow_infinity. */
308
309 static inline bool
310 stmt_overflow_infinity (gimple stmt)
311 {
312 if (is_gimple_assign (stmt)
313 && get_gimple_rhs_class (gimple_assign_rhs_code (stmt)) ==
314 GIMPLE_SINGLE_RHS)
315 return is_overflow_infinity (gimple_assign_rhs1 (stmt));
316 return false;
317 }
318
319 /* If VAL is now an overflow infinity, return VAL. Otherwise, return
320 the same value with TREE_OVERFLOW clear. This can be used to avoid
321 confusing a regular value with an overflow value. */
322
323 static inline tree
324 avoid_overflow_infinity (tree val)
325 {
326 if (!is_overflow_infinity (val))
327 return val;
328
329 if (vrp_val_is_max (val))
330 return vrp_val_max (TREE_TYPE (val));
331 else
332 {
333 gcc_checking_assert (vrp_val_is_min (val));
334 return vrp_val_min (TREE_TYPE (val));
335 }
336 }
337
338
339 /* Return true if ARG is marked with the nonnull attribute in the
340 current function signature. */
341
342 static bool
343 nonnull_arg_p (const_tree arg)
344 {
345 tree t, attrs, fntype;
346 unsigned HOST_WIDE_INT arg_num;
347
348 gcc_assert (TREE_CODE (arg) == PARM_DECL && POINTER_TYPE_P (TREE_TYPE (arg)));
349
350 /* The static chain decl is always non null. */
351 if (arg == cfun->static_chain_decl)
352 return true;
353
354 fntype = TREE_TYPE (current_function_decl);
355 attrs = lookup_attribute ("nonnull", TYPE_ATTRIBUTES (fntype));
356
357 /* If "nonnull" wasn't specified, we know nothing about the argument. */
358 if (attrs == NULL_TREE)
359 return false;
360
361 /* If "nonnull" applies to all the arguments, then ARG is non-null. */
362 if (TREE_VALUE (attrs) == NULL_TREE)
363 return true;
364
365 /* Get the position number for ARG in the function signature. */
366 for (arg_num = 1, t = DECL_ARGUMENTS (current_function_decl);
367 t;
368 t = DECL_CHAIN (t), arg_num++)
369 {
370 if (t == arg)
371 break;
372 }
373
374 gcc_assert (t == arg);
375
376 /* Now see if ARG_NUM is mentioned in the nonnull list. */
377 for (t = TREE_VALUE (attrs); t; t = TREE_CHAIN (t))
378 {
379 if (compare_tree_int (TREE_VALUE (t), arg_num) == 0)
380 return true;
381 }
382
383 return false;
384 }
385
386
387 /* Set value range VR to VR_VARYING. */
388
389 static inline void
390 set_value_range_to_varying (value_range_t *vr)
391 {
392 vr->type = VR_VARYING;
393 vr->min = vr->max = NULL_TREE;
394 if (vr->equiv)
395 bitmap_clear (vr->equiv);
396 }
397
398
399 /* Set value range VR to {T, MIN, MAX, EQUIV}. */
400
401 static void
402 set_value_range (value_range_t *vr, enum value_range_type t, tree min,
403 tree max, bitmap equiv)
404 {
405 #if defined ENABLE_CHECKING
406 /* Check the validity of the range. */
407 if (t == VR_RANGE || t == VR_ANTI_RANGE)
408 {
409 int cmp;
410
411 gcc_assert (min && max);
412
413 if (INTEGRAL_TYPE_P (TREE_TYPE (min)) && t == VR_ANTI_RANGE)
414 gcc_assert (!vrp_val_is_min (min) || !vrp_val_is_max (max));
415
416 cmp = compare_values (min, max);
417 gcc_assert (cmp == 0 || cmp == -1 || cmp == -2);
418
419 if (needs_overflow_infinity (TREE_TYPE (min)))
420 gcc_assert (!is_overflow_infinity (min)
421 || !is_overflow_infinity (max));
422 }
423
424 if (t == VR_UNDEFINED || t == VR_VARYING)
425 gcc_assert (min == NULL_TREE && max == NULL_TREE);
426
427 if (t == VR_UNDEFINED || t == VR_VARYING)
428 gcc_assert (equiv == NULL || bitmap_empty_p (equiv));
429 #endif
430
431 vr->type = t;
432 vr->min = min;
433 vr->max = max;
434
435 /* Since updating the equivalence set involves deep copying the
436 bitmaps, only do it if absolutely necessary. */
437 if (vr->equiv == NULL
438 && equiv != NULL)
439 vr->equiv = BITMAP_ALLOC (NULL);
440
441 if (equiv != vr->equiv)
442 {
443 if (equiv && !bitmap_empty_p (equiv))
444 bitmap_copy (vr->equiv, equiv);
445 else
446 bitmap_clear (vr->equiv);
447 }
448 }
449
450
451 /* Set value range VR to the canonical form of {T, MIN, MAX, EQUIV}.
452 This means adjusting T, MIN and MAX representing the case of a
453 wrapping range with MAX < MIN covering [MIN, type_max] U [type_min, MAX]
454 as anti-rage ~[MAX+1, MIN-1]. Likewise for wrapping anti-ranges.
455 In corner cases where MAX+1 or MIN-1 wraps this will fall back
456 to varying.
457 This routine exists to ease canonicalization in the case where we
458 extract ranges from var + CST op limit. */
459
460 static void
461 set_and_canonicalize_value_range (value_range_t *vr, enum value_range_type t,
462 tree min, tree max, bitmap equiv)
463 {
464 /* Nothing to canonicalize for symbolic or unknown or varying ranges. */
465 if ((t != VR_RANGE
466 && t != VR_ANTI_RANGE)
467 || TREE_CODE (min) != INTEGER_CST
468 || TREE_CODE (max) != INTEGER_CST)
469 {
470 set_value_range (vr, t, min, max, equiv);
471 return;
472 }
473
474 /* Wrong order for min and max, to swap them and the VR type we need
475 to adjust them. */
476 if (tree_int_cst_lt (max, min))
477 {
478 tree one = build_int_cst (TREE_TYPE (min), 1);
479 tree tmp = int_const_binop (PLUS_EXPR, max, one);
480 max = int_const_binop (MINUS_EXPR, min, one);
481 min = tmp;
482
483 /* There's one corner case, if we had [C+1, C] before we now have
484 that again. But this represents an empty value range, so drop
485 to varying in this case. */
486 if (tree_int_cst_lt (max, min))
487 {
488 set_value_range_to_varying (vr);
489 return;
490 }
491
492 t = t == VR_RANGE ? VR_ANTI_RANGE : VR_RANGE;
493 }
494
495 /* Anti-ranges that can be represented as ranges should be so. */
496 if (t == VR_ANTI_RANGE)
497 {
498 bool is_min = vrp_val_is_min (min);
499 bool is_max = vrp_val_is_max (max);
500
501 if (is_min && is_max)
502 {
503 /* We cannot deal with empty ranges, drop to varying. */
504 set_value_range_to_varying (vr);
505 return;
506 }
507 else if (is_min
508 /* As a special exception preserve non-null ranges. */
509 && !(TYPE_UNSIGNED (TREE_TYPE (min))
510 && integer_zerop (max)))
511 {
512 tree one = build_int_cst (TREE_TYPE (max), 1);
513 min = int_const_binop (PLUS_EXPR, max, one);
514 max = vrp_val_max (TREE_TYPE (max));
515 t = VR_RANGE;
516 }
517 else if (is_max)
518 {
519 tree one = build_int_cst (TREE_TYPE (min), 1);
520 max = int_const_binop (MINUS_EXPR, min, one);
521 min = vrp_val_min (TREE_TYPE (min));
522 t = VR_RANGE;
523 }
524 }
525
526 set_value_range (vr, t, min, max, equiv);
527 }
528
529 /* Copy value range FROM into value range TO. */
530
531 static inline void
532 copy_value_range (value_range_t *to, value_range_t *from)
533 {
534 set_value_range (to, from->type, from->min, from->max, from->equiv);
535 }
536
537 /* Set value range VR to a single value. This function is only called
538 with values we get from statements, and exists to clear the
539 TREE_OVERFLOW flag so that we don't think we have an overflow
540 infinity when we shouldn't. */
541
542 static inline void
543 set_value_range_to_value (value_range_t *vr, tree val, bitmap equiv)
544 {
545 gcc_assert (is_gimple_min_invariant (val));
546 val = avoid_overflow_infinity (val);
547 set_value_range (vr, VR_RANGE, val, val, equiv);
548 }
549
550 /* Set value range VR to a non-negative range of type TYPE.
551 OVERFLOW_INFINITY indicates whether to use an overflow infinity
552 rather than TYPE_MAX_VALUE; this should be true if we determine
553 that the range is nonnegative based on the assumption that signed
554 overflow does not occur. */
555
556 static inline void
557 set_value_range_to_nonnegative (value_range_t *vr, tree type,
558 bool overflow_infinity)
559 {
560 tree zero;
561
562 if (overflow_infinity && !supports_overflow_infinity (type))
563 {
564 set_value_range_to_varying (vr);
565 return;
566 }
567
568 zero = build_int_cst (type, 0);
569 set_value_range (vr, VR_RANGE, zero,
570 (overflow_infinity
571 ? positive_overflow_infinity (type)
572 : TYPE_MAX_VALUE (type)),
573 vr->equiv);
574 }
575
576 /* Set value range VR to a non-NULL range of type TYPE. */
577
578 static inline void
579 set_value_range_to_nonnull (value_range_t *vr, tree type)
580 {
581 tree zero = build_int_cst (type, 0);
582 set_value_range (vr, VR_ANTI_RANGE, zero, zero, vr->equiv);
583 }
584
585
586 /* Set value range VR to a NULL range of type TYPE. */
587
588 static inline void
589 set_value_range_to_null (value_range_t *vr, tree type)
590 {
591 set_value_range_to_value (vr, build_int_cst (type, 0), vr->equiv);
592 }
593
594
595 /* Set value range VR to a range of a truthvalue of type TYPE. */
596
597 static inline void
598 set_value_range_to_truthvalue (value_range_t *vr, tree type)
599 {
600 if (TYPE_PRECISION (type) == 1)
601 set_value_range_to_varying (vr);
602 else
603 set_value_range (vr, VR_RANGE,
604 build_int_cst (type, 0), build_int_cst (type, 1),
605 vr->equiv);
606 }
607
608
609 /* Set value range VR to VR_UNDEFINED. */
610
611 static inline void
612 set_value_range_to_undefined (value_range_t *vr)
613 {
614 vr->type = VR_UNDEFINED;
615 vr->min = vr->max = NULL_TREE;
616 if (vr->equiv)
617 bitmap_clear (vr->equiv);
618 }
619
620
621 /* If abs (min) < abs (max), set VR to [-max, max], if
622 abs (min) >= abs (max), set VR to [-min, min]. */
623
624 static void
625 abs_extent_range (value_range_t *vr, tree min, tree max)
626 {
627 int cmp;
628
629 gcc_assert (TREE_CODE (min) == INTEGER_CST);
630 gcc_assert (TREE_CODE (max) == INTEGER_CST);
631 gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (min)));
632 gcc_assert (!TYPE_UNSIGNED (TREE_TYPE (min)));
633 min = fold_unary (ABS_EXPR, TREE_TYPE (min), min);
634 max = fold_unary (ABS_EXPR, TREE_TYPE (max), max);
635 if (TREE_OVERFLOW (min) || TREE_OVERFLOW (max))
636 {
637 set_value_range_to_varying (vr);
638 return;
639 }
640 cmp = compare_values (min, max);
641 if (cmp == -1)
642 min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), max);
643 else if (cmp == 0 || cmp == 1)
644 {
645 max = min;
646 min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), min);
647 }
648 else
649 {
650 set_value_range_to_varying (vr);
651 return;
652 }
653 set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL);
654 }
655
656
657 /* Return value range information for VAR.
658
659 If we have no values ranges recorded (ie, VRP is not running), then
660 return NULL. Otherwise create an empty range if none existed for VAR. */
661
662 static value_range_t *
663 get_value_range (const_tree var)
664 {
665 static const struct value_range_d vr_const_varying
666 = { VR_VARYING, NULL_TREE, NULL_TREE, NULL };
667 value_range_t *vr;
668 tree sym;
669 unsigned ver = SSA_NAME_VERSION (var);
670
671 /* If we have no recorded ranges, then return NULL. */
672 if (! vr_value)
673 return NULL;
674
675 /* If we query the range for a new SSA name return an unmodifiable VARYING.
676 We should get here at most from the substitute-and-fold stage which
677 will never try to change values. */
678 if (ver >= num_vr_values)
679 return CONST_CAST (value_range_t *, &vr_const_varying);
680
681 vr = vr_value[ver];
682 if (vr)
683 return vr;
684
685 /* After propagation finished do not allocate new value-ranges. */
686 if (values_propagated)
687 return CONST_CAST (value_range_t *, &vr_const_varying);
688
689 /* Create a default value range. */
690 vr_value[ver] = vr = XCNEW (value_range_t);
691
692 /* Defer allocating the equivalence set. */
693 vr->equiv = NULL;
694
695 /* If VAR is a default definition of a parameter, the variable can
696 take any value in VAR's type. */
697 sym = SSA_NAME_VAR (var);
698 if (SSA_NAME_IS_DEFAULT_DEF (var)
699 && TREE_CODE (sym) == PARM_DECL)
700 {
701 /* Try to use the "nonnull" attribute to create ~[0, 0]
702 anti-ranges for pointers. Note that this is only valid with
703 default definitions of PARM_DECLs. */
704 if (POINTER_TYPE_P (TREE_TYPE (sym))
705 && nonnull_arg_p (sym))
706 set_value_range_to_nonnull (vr, TREE_TYPE (sym));
707 else
708 set_value_range_to_varying (vr);
709 }
710
711 return vr;
712 }
713
714 /* Return true, if VAL1 and VAL2 are equal values for VRP purposes. */
715
716 static inline bool
717 vrp_operand_equal_p (const_tree val1, const_tree val2)
718 {
719 if (val1 == val2)
720 return true;
721 if (!val1 || !val2 || !operand_equal_p (val1, val2, 0))
722 return false;
723 if (is_overflow_infinity (val1))
724 return is_overflow_infinity (val2);
725 return true;
726 }
727
728 /* Return true, if the bitmaps B1 and B2 are equal. */
729
730 static inline bool
731 vrp_bitmap_equal_p (const_bitmap b1, const_bitmap b2)
732 {
733 return (b1 == b2
734 || ((!b1 || bitmap_empty_p (b1))
735 && (!b2 || bitmap_empty_p (b2)))
736 || (b1 && b2
737 && bitmap_equal_p (b1, b2)));
738 }
739
740 /* Update the value range and equivalence set for variable VAR to
741 NEW_VR. Return true if NEW_VR is different from VAR's previous
742 value.
743
744 NOTE: This function assumes that NEW_VR is a temporary value range
745 object created for the sole purpose of updating VAR's range. The
746 storage used by the equivalence set from NEW_VR will be freed by
747 this function. Do not call update_value_range when NEW_VR
748 is the range object associated with another SSA name. */
749
750 static inline bool
751 update_value_range (const_tree var, value_range_t *new_vr)
752 {
753 value_range_t *old_vr;
754 bool is_new;
755
756 /* Update the value range, if necessary. */
757 old_vr = get_value_range (var);
758 is_new = old_vr->type != new_vr->type
759 || !vrp_operand_equal_p (old_vr->min, new_vr->min)
760 || !vrp_operand_equal_p (old_vr->max, new_vr->max)
761 || !vrp_bitmap_equal_p (old_vr->equiv, new_vr->equiv);
762
763 if (is_new)
764 set_value_range (old_vr, new_vr->type, new_vr->min, new_vr->max,
765 new_vr->equiv);
766
767 BITMAP_FREE (new_vr->equiv);
768
769 return is_new;
770 }
771
772
773 /* Add VAR and VAR's equivalence set to EQUIV. This is the central
774 point where equivalence processing can be turned on/off. */
775
776 static void
777 add_equivalence (bitmap *equiv, const_tree var)
778 {
779 unsigned ver = SSA_NAME_VERSION (var);
780 value_range_t *vr = vr_value[ver];
781
782 if (*equiv == NULL)
783 *equiv = BITMAP_ALLOC (NULL);
784 bitmap_set_bit (*equiv, ver);
785 if (vr && vr->equiv)
786 bitmap_ior_into (*equiv, vr->equiv);
787 }
788
789
790 /* Return true if VR is ~[0, 0]. */
791
792 static inline bool
793 range_is_nonnull (value_range_t *vr)
794 {
795 return vr->type == VR_ANTI_RANGE
796 && integer_zerop (vr->min)
797 && integer_zerop (vr->max);
798 }
799
800
801 /* Return true if VR is [0, 0]. */
802
803 static inline bool
804 range_is_null (value_range_t *vr)
805 {
806 return vr->type == VR_RANGE
807 && integer_zerop (vr->min)
808 && integer_zerop (vr->max);
809 }
810
811 /* Return true if max and min of VR are INTEGER_CST. It's not necessary
812 a singleton. */
813
814 static inline bool
815 range_int_cst_p (value_range_t *vr)
816 {
817 return (vr->type == VR_RANGE
818 && TREE_CODE (vr->max) == INTEGER_CST
819 && TREE_CODE (vr->min) == INTEGER_CST
820 && !TREE_OVERFLOW (vr->max)
821 && !TREE_OVERFLOW (vr->min));
822 }
823
824 /* Return true if VR is a INTEGER_CST singleton. */
825
826 static inline bool
827 range_int_cst_singleton_p (value_range_t *vr)
828 {
829 return (range_int_cst_p (vr)
830 && tree_int_cst_equal (vr->min, vr->max));
831 }
832
833 /* Return true if value range VR involves at least one symbol. */
834
835 static inline bool
836 symbolic_range_p (value_range_t *vr)
837 {
838 return (!is_gimple_min_invariant (vr->min)
839 || !is_gimple_min_invariant (vr->max));
840 }
841
842 /* Return true if value range VR uses an overflow infinity. */
843
844 static inline bool
845 overflow_infinity_range_p (value_range_t *vr)
846 {
847 return (vr->type == VR_RANGE
848 && (is_overflow_infinity (vr->min)
849 || is_overflow_infinity (vr->max)));
850 }
851
852 /* Return false if we can not make a valid comparison based on VR;
853 this will be the case if it uses an overflow infinity and overflow
854 is not undefined (i.e., -fno-strict-overflow is in effect).
855 Otherwise return true, and set *STRICT_OVERFLOW_P to true if VR
856 uses an overflow infinity. */
857
858 static bool
859 usable_range_p (value_range_t *vr, bool *strict_overflow_p)
860 {
861 gcc_assert (vr->type == VR_RANGE);
862 if (is_overflow_infinity (vr->min))
863 {
864 *strict_overflow_p = true;
865 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr->min)))
866 return false;
867 }
868 if (is_overflow_infinity (vr->max))
869 {
870 *strict_overflow_p = true;
871 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr->max)))
872 return false;
873 }
874 return true;
875 }
876
877
878 /* Return true if the result of assignment STMT is know to be non-negative.
879 If the return value is based on the assumption that signed overflow is
880 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
881 *STRICT_OVERFLOW_P.*/
882
883 static bool
884 gimple_assign_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p)
885 {
886 enum tree_code code = gimple_assign_rhs_code (stmt);
887 switch (get_gimple_rhs_class (code))
888 {
889 case GIMPLE_UNARY_RHS:
890 return tree_unary_nonnegative_warnv_p (gimple_assign_rhs_code (stmt),
891 gimple_expr_type (stmt),
892 gimple_assign_rhs1 (stmt),
893 strict_overflow_p);
894 case GIMPLE_BINARY_RHS:
895 return tree_binary_nonnegative_warnv_p (gimple_assign_rhs_code (stmt),
896 gimple_expr_type (stmt),
897 gimple_assign_rhs1 (stmt),
898 gimple_assign_rhs2 (stmt),
899 strict_overflow_p);
900 case GIMPLE_TERNARY_RHS:
901 return false;
902 case GIMPLE_SINGLE_RHS:
903 return tree_single_nonnegative_warnv_p (gimple_assign_rhs1 (stmt),
904 strict_overflow_p);
905 case GIMPLE_INVALID_RHS:
906 gcc_unreachable ();
907 default:
908 gcc_unreachable ();
909 }
910 }
911
912 /* Return true if return value of call STMT is know to be non-negative.
913 If the return value is based on the assumption that signed overflow is
914 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
915 *STRICT_OVERFLOW_P.*/
916
917 static bool
918 gimple_call_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p)
919 {
920 tree arg0 = gimple_call_num_args (stmt) > 0 ?
921 gimple_call_arg (stmt, 0) : NULL_TREE;
922 tree arg1 = gimple_call_num_args (stmt) > 1 ?
923 gimple_call_arg (stmt, 1) : NULL_TREE;
924
925 return tree_call_nonnegative_warnv_p (gimple_expr_type (stmt),
926 gimple_call_fndecl (stmt),
927 arg0,
928 arg1,
929 strict_overflow_p);
930 }
931
932 /* Return true if STMT is know to to compute a non-negative value.
933 If the return value is based on the assumption that signed overflow is
934 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
935 *STRICT_OVERFLOW_P.*/
936
937 static bool
938 gimple_stmt_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p)
939 {
940 switch (gimple_code (stmt))
941 {
942 case GIMPLE_ASSIGN:
943 return gimple_assign_nonnegative_warnv_p (stmt, strict_overflow_p);
944 case GIMPLE_CALL:
945 return gimple_call_nonnegative_warnv_p (stmt, strict_overflow_p);
946 default:
947 gcc_unreachable ();
948 }
949 }
950
951 /* Return true if the result of assignment STMT is know to be non-zero.
952 If the return value is based on the assumption that signed overflow is
953 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
954 *STRICT_OVERFLOW_P.*/
955
956 static bool
957 gimple_assign_nonzero_warnv_p (gimple stmt, bool *strict_overflow_p)
958 {
959 enum tree_code code = gimple_assign_rhs_code (stmt);
960 switch (get_gimple_rhs_class (code))
961 {
962 case GIMPLE_UNARY_RHS:
963 return tree_unary_nonzero_warnv_p (gimple_assign_rhs_code (stmt),
964 gimple_expr_type (stmt),
965 gimple_assign_rhs1 (stmt),
966 strict_overflow_p);
967 case GIMPLE_BINARY_RHS:
968 return tree_binary_nonzero_warnv_p (gimple_assign_rhs_code (stmt),
969 gimple_expr_type (stmt),
970 gimple_assign_rhs1 (stmt),
971 gimple_assign_rhs2 (stmt),
972 strict_overflow_p);
973 case GIMPLE_TERNARY_RHS:
974 return false;
975 case GIMPLE_SINGLE_RHS:
976 return tree_single_nonzero_warnv_p (gimple_assign_rhs1 (stmt),
977 strict_overflow_p);
978 case GIMPLE_INVALID_RHS:
979 gcc_unreachable ();
980 default:
981 gcc_unreachable ();
982 }
983 }
984
985 /* Return true if STMT is know to to compute a non-zero value.
986 If the return value is based on the assumption that signed overflow is
987 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
988 *STRICT_OVERFLOW_P.*/
989
990 static bool
991 gimple_stmt_nonzero_warnv_p (gimple stmt, bool *strict_overflow_p)
992 {
993 switch (gimple_code (stmt))
994 {
995 case GIMPLE_ASSIGN:
996 return gimple_assign_nonzero_warnv_p (stmt, strict_overflow_p);
997 case GIMPLE_CALL:
998 return gimple_alloca_call_p (stmt);
999 default:
1000 gcc_unreachable ();
1001 }
1002 }
1003
1004 /* Like tree_expr_nonzero_warnv_p, but this function uses value ranges
1005 obtained so far. */
1006
1007 static bool
1008 vrp_stmt_computes_nonzero (gimple stmt, bool *strict_overflow_p)
1009 {
1010 if (gimple_stmt_nonzero_warnv_p (stmt, strict_overflow_p))
1011 return true;
1012
1013 /* If we have an expression of the form &X->a, then the expression
1014 is nonnull if X is nonnull. */
1015 if (is_gimple_assign (stmt)
1016 && gimple_assign_rhs_code (stmt) == ADDR_EXPR)
1017 {
1018 tree expr = gimple_assign_rhs1 (stmt);
1019 tree base = get_base_address (TREE_OPERAND (expr, 0));
1020
1021 if (base != NULL_TREE
1022 && TREE_CODE (base) == MEM_REF
1023 && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME)
1024 {
1025 value_range_t *vr = get_value_range (TREE_OPERAND (base, 0));
1026 if (range_is_nonnull (vr))
1027 return true;
1028 }
1029 }
1030
1031 return false;
1032 }
1033
1034 /* Returns true if EXPR is a valid value (as expected by compare_values) --
1035 a gimple invariant, or SSA_NAME +- CST. */
1036
1037 static bool
1038 valid_value_p (tree expr)
1039 {
1040 if (TREE_CODE (expr) == SSA_NAME)
1041 return true;
1042
1043 if (TREE_CODE (expr) == PLUS_EXPR
1044 || TREE_CODE (expr) == MINUS_EXPR)
1045 return (TREE_CODE (TREE_OPERAND (expr, 0)) == SSA_NAME
1046 && TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST);
1047
1048 return is_gimple_min_invariant (expr);
1049 }
1050
1051 /* Return
1052 1 if VAL < VAL2
1053 0 if !(VAL < VAL2)
1054 -2 if those are incomparable. */
1055 static inline int
1056 operand_less_p (tree val, tree val2)
1057 {
1058 /* LT is folded faster than GE and others. Inline the common case. */
1059 if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
1060 {
1061 if (TYPE_UNSIGNED (TREE_TYPE (val)))
1062 return INT_CST_LT_UNSIGNED (val, val2);
1063 else
1064 {
1065 if (INT_CST_LT (val, val2))
1066 return 1;
1067 }
1068 }
1069 else
1070 {
1071 tree tcmp;
1072
1073 fold_defer_overflow_warnings ();
1074
1075 tcmp = fold_binary_to_constant (LT_EXPR, boolean_type_node, val, val2);
1076
1077 fold_undefer_and_ignore_overflow_warnings ();
1078
1079 if (!tcmp
1080 || TREE_CODE (tcmp) != INTEGER_CST)
1081 return -2;
1082
1083 if (!integer_zerop (tcmp))
1084 return 1;
1085 }
1086
1087 /* val >= val2, not considering overflow infinity. */
1088 if (is_negative_overflow_infinity (val))
1089 return is_negative_overflow_infinity (val2) ? 0 : 1;
1090 else if (is_positive_overflow_infinity (val2))
1091 return is_positive_overflow_infinity (val) ? 0 : 1;
1092
1093 return 0;
1094 }
1095
1096 /* Compare two values VAL1 and VAL2. Return
1097
1098 -2 if VAL1 and VAL2 cannot be compared at compile-time,
1099 -1 if VAL1 < VAL2,
1100 0 if VAL1 == VAL2,
1101 +1 if VAL1 > VAL2, and
1102 +2 if VAL1 != VAL2
1103
1104 This is similar to tree_int_cst_compare but supports pointer values
1105 and values that cannot be compared at compile time.
1106
1107 If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to
1108 true if the return value is only valid if we assume that signed
1109 overflow is undefined. */
1110
1111 static int
1112 compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p)
1113 {
1114 if (val1 == val2)
1115 return 0;
1116
1117 /* Below we rely on the fact that VAL1 and VAL2 are both pointers or
1118 both integers. */
1119 gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1))
1120 == POINTER_TYPE_P (TREE_TYPE (val2)));
1121 /* Convert the two values into the same type. This is needed because
1122 sizetype causes sign extension even for unsigned types. */
1123 val2 = fold_convert (TREE_TYPE (val1), val2);
1124 STRIP_USELESS_TYPE_CONVERSION (val2);
1125
1126 if ((TREE_CODE (val1) == SSA_NAME
1127 || TREE_CODE (val1) == PLUS_EXPR
1128 || TREE_CODE (val1) == MINUS_EXPR)
1129 && (TREE_CODE (val2) == SSA_NAME
1130 || TREE_CODE (val2) == PLUS_EXPR
1131 || TREE_CODE (val2) == MINUS_EXPR))
1132 {
1133 tree n1, c1, n2, c2;
1134 enum tree_code code1, code2;
1135
1136 /* If VAL1 and VAL2 are of the form 'NAME [+-] CST' or 'NAME',
1137 return -1 or +1 accordingly. If VAL1 and VAL2 don't use the
1138 same name, return -2. */
1139 if (TREE_CODE (val1) == SSA_NAME)
1140 {
1141 code1 = SSA_NAME;
1142 n1 = val1;
1143 c1 = NULL_TREE;
1144 }
1145 else
1146 {
1147 code1 = TREE_CODE (val1);
1148 n1 = TREE_OPERAND (val1, 0);
1149 c1 = TREE_OPERAND (val1, 1);
1150 if (tree_int_cst_sgn (c1) == -1)
1151 {
1152 if (is_negative_overflow_infinity (c1))
1153 return -2;
1154 c1 = fold_unary_to_constant (NEGATE_EXPR, TREE_TYPE (c1), c1);
1155 if (!c1)
1156 return -2;
1157 code1 = code1 == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR;
1158 }
1159 }
1160
1161 if (TREE_CODE (val2) == SSA_NAME)
1162 {
1163 code2 = SSA_NAME;
1164 n2 = val2;
1165 c2 = NULL_TREE;
1166 }
1167 else
1168 {
1169 code2 = TREE_CODE (val2);
1170 n2 = TREE_OPERAND (val2, 0);
1171 c2 = TREE_OPERAND (val2, 1);
1172 if (tree_int_cst_sgn (c2) == -1)
1173 {
1174 if (is_negative_overflow_infinity (c2))
1175 return -2;
1176 c2 = fold_unary_to_constant (NEGATE_EXPR, TREE_TYPE (c2), c2);
1177 if (!c2)
1178 return -2;
1179 code2 = code2 == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR;
1180 }
1181 }
1182
1183 /* Both values must use the same name. */
1184 if (n1 != n2)
1185 return -2;
1186
1187 if (code1 == SSA_NAME
1188 && code2 == SSA_NAME)
1189 /* NAME == NAME */
1190 return 0;
1191
1192 /* If overflow is defined we cannot simplify more. */
1193 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1)))
1194 return -2;
1195
1196 if (strict_overflow_p != NULL
1197 && (code1 == SSA_NAME || !TREE_NO_WARNING (val1))
1198 && (code2 == SSA_NAME || !TREE_NO_WARNING (val2)))
1199 *strict_overflow_p = true;
1200
1201 if (code1 == SSA_NAME)
1202 {
1203 if (code2 == PLUS_EXPR)
1204 /* NAME < NAME + CST */
1205 return -1;
1206 else if (code2 == MINUS_EXPR)
1207 /* NAME > NAME - CST */
1208 return 1;
1209 }
1210 else if (code1 == PLUS_EXPR)
1211 {
1212 if (code2 == SSA_NAME)
1213 /* NAME + CST > NAME */
1214 return 1;
1215 else if (code2 == PLUS_EXPR)
1216 /* NAME + CST1 > NAME + CST2, if CST1 > CST2 */
1217 return compare_values_warnv (c1, c2, strict_overflow_p);
1218 else if (code2 == MINUS_EXPR)
1219 /* NAME + CST1 > NAME - CST2 */
1220 return 1;
1221 }
1222 else if (code1 == MINUS_EXPR)
1223 {
1224 if (code2 == SSA_NAME)
1225 /* NAME - CST < NAME */
1226 return -1;
1227 else if (code2 == PLUS_EXPR)
1228 /* NAME - CST1 < NAME + CST2 */
1229 return -1;
1230 else if (code2 == MINUS_EXPR)
1231 /* NAME - CST1 > NAME - CST2, if CST1 < CST2. Notice that
1232 C1 and C2 are swapped in the call to compare_values. */
1233 return compare_values_warnv (c2, c1, strict_overflow_p);
1234 }
1235
1236 gcc_unreachable ();
1237 }
1238
1239 /* We cannot compare non-constants. */
1240 if (!is_gimple_min_invariant (val1) || !is_gimple_min_invariant (val2))
1241 return -2;
1242
1243 if (!POINTER_TYPE_P (TREE_TYPE (val1)))
1244 {
1245 /* We cannot compare overflowed values, except for overflow
1246 infinities. */
1247 if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2))
1248 {
1249 if (strict_overflow_p != NULL)
1250 *strict_overflow_p = true;
1251 if (is_negative_overflow_infinity (val1))
1252 return is_negative_overflow_infinity (val2) ? 0 : -1;
1253 else if (is_negative_overflow_infinity (val2))
1254 return 1;
1255 else if (is_positive_overflow_infinity (val1))
1256 return is_positive_overflow_infinity (val2) ? 0 : 1;
1257 else if (is_positive_overflow_infinity (val2))
1258 return -1;
1259 return -2;
1260 }
1261
1262 return tree_int_cst_compare (val1, val2);
1263 }
1264 else
1265 {
1266 tree t;
1267
1268 /* First see if VAL1 and VAL2 are not the same. */
1269 if (val1 == val2 || operand_equal_p (val1, val2, 0))
1270 return 0;
1271
1272 /* If VAL1 is a lower address than VAL2, return -1. */
1273 if (operand_less_p (val1, val2) == 1)
1274 return -1;
1275
1276 /* If VAL1 is a higher address than VAL2, return +1. */
1277 if (operand_less_p (val2, val1) == 1)
1278 return 1;
1279
1280 /* If VAL1 is different than VAL2, return +2.
1281 For integer constants we either have already returned -1 or 1
1282 or they are equivalent. We still might succeed in proving
1283 something about non-trivial operands. */
1284 if (TREE_CODE (val1) != INTEGER_CST
1285 || TREE_CODE (val2) != INTEGER_CST)
1286 {
1287 t = fold_binary_to_constant (NE_EXPR, boolean_type_node, val1, val2);
1288 if (t && integer_onep (t))
1289 return 2;
1290 }
1291
1292 return -2;
1293 }
1294 }
1295
1296 /* Compare values like compare_values_warnv, but treat comparisons of
1297 nonconstants which rely on undefined overflow as incomparable. */
1298
1299 static int
1300 compare_values (tree val1, tree val2)
1301 {
1302 bool sop;
1303 int ret;
1304
1305 sop = false;
1306 ret = compare_values_warnv (val1, val2, &sop);
1307 if (sop
1308 && (!is_gimple_min_invariant (val1) || !is_gimple_min_invariant (val2)))
1309 ret = -2;
1310 return ret;
1311 }
1312
1313
1314 /* Return 1 if VAL is inside value range VR (VR->MIN <= VAL <= VR->MAX),
1315 0 if VAL is not inside VR,
1316 -2 if we cannot tell either way.
1317
1318 FIXME, the current semantics of this functions are a bit quirky
1319 when taken in the context of VRP. In here we do not care
1320 about VR's type. If VR is the anti-range ~[3, 5] the call
1321 value_inside_range (4, VR) will return 1.
1322
1323 This is counter-intuitive in a strict sense, but the callers
1324 currently expect this. They are calling the function
1325 merely to determine whether VR->MIN <= VAL <= VR->MAX. The
1326 callers are applying the VR_RANGE/VR_ANTI_RANGE semantics
1327 themselves.
1328
1329 This also applies to value_ranges_intersect_p and
1330 range_includes_zero_p. The semantics of VR_RANGE and
1331 VR_ANTI_RANGE should be encoded here, but that also means
1332 adapting the users of these functions to the new semantics.
1333
1334 Benchmark compile/20001226-1.c compilation time after changing this
1335 function. */
1336
1337 static inline int
1338 value_inside_range (tree val, value_range_t * vr)
1339 {
1340 int cmp1, cmp2;
1341
1342 cmp1 = operand_less_p (val, vr->min);
1343 if (cmp1 == -2)
1344 return -2;
1345 if (cmp1 == 1)
1346 return 0;
1347
1348 cmp2 = operand_less_p (vr->max, val);
1349 if (cmp2 == -2)
1350 return -2;
1351
1352 return !cmp2;
1353 }
1354
1355
1356 /* Return true if value ranges VR0 and VR1 have a non-empty
1357 intersection.
1358
1359 Benchmark compile/20001226-1.c compilation time after changing this
1360 function.
1361 */
1362
1363 static inline bool
1364 value_ranges_intersect_p (value_range_t *vr0, value_range_t *vr1)
1365 {
1366 /* The value ranges do not intersect if the maximum of the first range is
1367 less than the minimum of the second range or vice versa.
1368 When those relations are unknown, we can't do any better. */
1369 if (operand_less_p (vr0->max, vr1->min) != 0)
1370 return false;
1371 if (operand_less_p (vr1->max, vr0->min) != 0)
1372 return false;
1373 return true;
1374 }
1375
1376
1377 /* Return true if VR includes the value zero, false otherwise. FIXME,
1378 currently this will return false for an anti-range like ~[-4, 3].
1379 This will be wrong when the semantics of value_inside_range are
1380 modified (currently the users of this function expect these
1381 semantics). */
1382
1383 static inline bool
1384 range_includes_zero_p (value_range_t *vr)
1385 {
1386 tree zero;
1387
1388 gcc_assert (vr->type != VR_UNDEFINED
1389 && vr->type != VR_VARYING
1390 && !symbolic_range_p (vr));
1391
1392 zero = build_int_cst (TREE_TYPE (vr->min), 0);
1393 return (value_inside_range (zero, vr) == 1);
1394 }
1395
1396 /* Return true if *VR is know to only contain nonnegative values. */
1397
1398 static inline bool
1399 value_range_nonnegative_p (value_range_t *vr)
1400 {
1401 /* Testing for VR_ANTI_RANGE is not useful here as any anti-range
1402 which would return a useful value should be encoded as a
1403 VR_RANGE. */
1404 if (vr->type == VR_RANGE)
1405 {
1406 int result = compare_values (vr->min, integer_zero_node);
1407 return (result == 0 || result == 1);
1408 }
1409
1410 return false;
1411 }
1412
1413 /* Return true if T, an SSA_NAME, is known to be nonnegative. Return
1414 false otherwise or if no value range information is available. */
1415
1416 bool
1417 ssa_name_nonnegative_p (const_tree t)
1418 {
1419 value_range_t *vr = get_value_range (t);
1420
1421 if (INTEGRAL_TYPE_P (t)
1422 && TYPE_UNSIGNED (t))
1423 return true;
1424
1425 if (!vr)
1426 return false;
1427
1428 return value_range_nonnegative_p (vr);
1429 }
1430
1431 /* If *VR has a value rante that is a single constant value return that,
1432 otherwise return NULL_TREE. */
1433
1434 static tree
1435 value_range_constant_singleton (value_range_t *vr)
1436 {
1437 if (vr->type == VR_RANGE
1438 && operand_equal_p (vr->min, vr->max, 0)
1439 && is_gimple_min_invariant (vr->min))
1440 return vr->min;
1441
1442 return NULL_TREE;
1443 }
1444
1445 /* If OP has a value range with a single constant value return that,
1446 otherwise return NULL_TREE. This returns OP itself if OP is a
1447 constant. */
1448
1449 static tree
1450 op_with_constant_singleton_value_range (tree op)
1451 {
1452 if (is_gimple_min_invariant (op))
1453 return op;
1454
1455 if (TREE_CODE (op) != SSA_NAME)
1456 return NULL_TREE;
1457
1458 return value_range_constant_singleton (get_value_range (op));
1459 }
1460
1461 /* Return true if op is in a boolean [0, 1] value-range. */
1462
1463 static bool
1464 op_with_boolean_value_range_p (tree op)
1465 {
1466 value_range_t *vr;
1467
1468 if (TYPE_PRECISION (TREE_TYPE (op)) == 1)
1469 return true;
1470
1471 if (integer_zerop (op)
1472 || integer_onep (op))
1473 return true;
1474
1475 if (TREE_CODE (op) != SSA_NAME)
1476 return false;
1477
1478 vr = get_value_range (op);
1479 return (vr->type == VR_RANGE
1480 && integer_zerop (vr->min)
1481 && integer_onep (vr->max));
1482 }
1483
1484 /* Extract value range information from an ASSERT_EXPR EXPR and store
1485 it in *VR_P. */
1486
1487 static void
1488 extract_range_from_assert (value_range_t *vr_p, tree expr)
1489 {
1490 tree var, cond, limit, min, max, type;
1491 value_range_t *var_vr, *limit_vr;
1492 enum tree_code cond_code;
1493
1494 var = ASSERT_EXPR_VAR (expr);
1495 cond = ASSERT_EXPR_COND (expr);
1496
1497 gcc_assert (COMPARISON_CLASS_P (cond));
1498
1499 /* Find VAR in the ASSERT_EXPR conditional. */
1500 if (var == TREE_OPERAND (cond, 0)
1501 || TREE_CODE (TREE_OPERAND (cond, 0)) == PLUS_EXPR
1502 || TREE_CODE (TREE_OPERAND (cond, 0)) == NOP_EXPR)
1503 {
1504 /* If the predicate is of the form VAR COMP LIMIT, then we just
1505 take LIMIT from the RHS and use the same comparison code. */
1506 cond_code = TREE_CODE (cond);
1507 limit = TREE_OPERAND (cond, 1);
1508 cond = TREE_OPERAND (cond, 0);
1509 }
1510 else
1511 {
1512 /* If the predicate is of the form LIMIT COMP VAR, then we need
1513 to flip around the comparison code to create the proper range
1514 for VAR. */
1515 cond_code = swap_tree_comparison (TREE_CODE (cond));
1516 limit = TREE_OPERAND (cond, 0);
1517 cond = TREE_OPERAND (cond, 1);
1518 }
1519
1520 limit = avoid_overflow_infinity (limit);
1521
1522 type = TREE_TYPE (var);
1523 gcc_assert (limit != var);
1524
1525 /* For pointer arithmetic, we only keep track of pointer equality
1526 and inequality. */
1527 if (POINTER_TYPE_P (type) && cond_code != NE_EXPR && cond_code != EQ_EXPR)
1528 {
1529 set_value_range_to_varying (vr_p);
1530 return;
1531 }
1532
1533 /* If LIMIT is another SSA name and LIMIT has a range of its own,
1534 try to use LIMIT's range to avoid creating symbolic ranges
1535 unnecessarily. */
1536 limit_vr = (TREE_CODE (limit) == SSA_NAME) ? get_value_range (limit) : NULL;
1537
1538 /* LIMIT's range is only interesting if it has any useful information. */
1539 if (limit_vr
1540 && (limit_vr->type == VR_UNDEFINED
1541 || limit_vr->type == VR_VARYING
1542 || symbolic_range_p (limit_vr)))
1543 limit_vr = NULL;
1544
1545 /* Initially, the new range has the same set of equivalences of
1546 VAR's range. This will be revised before returning the final
1547 value. Since assertions may be chained via mutually exclusive
1548 predicates, we will need to trim the set of equivalences before
1549 we are done. */
1550 gcc_assert (vr_p->equiv == NULL);
1551 add_equivalence (&vr_p->equiv, var);
1552
1553 /* Extract a new range based on the asserted comparison for VAR and
1554 LIMIT's value range. Notice that if LIMIT has an anti-range, we
1555 will only use it for equality comparisons (EQ_EXPR). For any
1556 other kind of assertion, we cannot derive a range from LIMIT's
1557 anti-range that can be used to describe the new range. For
1558 instance, ASSERT_EXPR <x_2, x_2 <= b_4>. If b_4 is ~[2, 10],
1559 then b_4 takes on the ranges [-INF, 1] and [11, +INF]. There is
1560 no single range for x_2 that could describe LE_EXPR, so we might
1561 as well build the range [b_4, +INF] for it.
1562 One special case we handle is extracting a range from a
1563 range test encoded as (unsigned)var + CST <= limit. */
1564 if (TREE_CODE (cond) == NOP_EXPR
1565 || TREE_CODE (cond) == PLUS_EXPR)
1566 {
1567 if (TREE_CODE (cond) == PLUS_EXPR)
1568 {
1569 min = fold_build1 (NEGATE_EXPR, TREE_TYPE (TREE_OPERAND (cond, 1)),
1570 TREE_OPERAND (cond, 1));
1571 max = int_const_binop (PLUS_EXPR, limit, min);
1572 cond = TREE_OPERAND (cond, 0);
1573 }
1574 else
1575 {
1576 min = build_int_cst (TREE_TYPE (var), 0);
1577 max = limit;
1578 }
1579
1580 /* Make sure to not set TREE_OVERFLOW on the final type
1581 conversion. We are willingly interpreting large positive
1582 unsigned values as negative singed values here. */
1583 min = force_fit_type_double (TREE_TYPE (var), tree_to_double_int (min),
1584 0, false);
1585 max = force_fit_type_double (TREE_TYPE (var), tree_to_double_int (max),
1586 0, false);
1587
1588 /* We can transform a max, min range to an anti-range or
1589 vice-versa. Use set_and_canonicalize_value_range which does
1590 this for us. */
1591 if (cond_code == LE_EXPR)
1592 set_and_canonicalize_value_range (vr_p, VR_RANGE,
1593 min, max, vr_p->equiv);
1594 else if (cond_code == GT_EXPR)
1595 set_and_canonicalize_value_range (vr_p, VR_ANTI_RANGE,
1596 min, max, vr_p->equiv);
1597 else
1598 gcc_unreachable ();
1599 }
1600 else if (cond_code == EQ_EXPR)
1601 {
1602 enum value_range_type range_type;
1603
1604 if (limit_vr)
1605 {
1606 range_type = limit_vr->type;
1607 min = limit_vr->min;
1608 max = limit_vr->max;
1609 }
1610 else
1611 {
1612 range_type = VR_RANGE;
1613 min = limit;
1614 max = limit;
1615 }
1616
1617 set_value_range (vr_p, range_type, min, max, vr_p->equiv);
1618
1619 /* When asserting the equality VAR == LIMIT and LIMIT is another
1620 SSA name, the new range will also inherit the equivalence set
1621 from LIMIT. */
1622 if (TREE_CODE (limit) == SSA_NAME)
1623 add_equivalence (&vr_p->equiv, limit);
1624 }
1625 else if (cond_code == NE_EXPR)
1626 {
1627 /* As described above, when LIMIT's range is an anti-range and
1628 this assertion is an inequality (NE_EXPR), then we cannot
1629 derive anything from the anti-range. For instance, if
1630 LIMIT's range was ~[0, 0], the assertion 'VAR != LIMIT' does
1631 not imply that VAR's range is [0, 0]. So, in the case of
1632 anti-ranges, we just assert the inequality using LIMIT and
1633 not its anti-range.
1634
1635 If LIMIT_VR is a range, we can only use it to build a new
1636 anti-range if LIMIT_VR is a single-valued range. For
1637 instance, if LIMIT_VR is [0, 1], the predicate
1638 VAR != [0, 1] does not mean that VAR's range is ~[0, 1].
1639 Rather, it means that for value 0 VAR should be ~[0, 0]
1640 and for value 1, VAR should be ~[1, 1]. We cannot
1641 represent these ranges.
1642
1643 The only situation in which we can build a valid
1644 anti-range is when LIMIT_VR is a single-valued range
1645 (i.e., LIMIT_VR->MIN == LIMIT_VR->MAX). In that case,
1646 build the anti-range ~[LIMIT_VR->MIN, LIMIT_VR->MAX]. */
1647 if (limit_vr
1648 && limit_vr->type == VR_RANGE
1649 && compare_values (limit_vr->min, limit_vr->max) == 0)
1650 {
1651 min = limit_vr->min;
1652 max = limit_vr->max;
1653 }
1654 else
1655 {
1656 /* In any other case, we cannot use LIMIT's range to build a
1657 valid anti-range. */
1658 min = max = limit;
1659 }
1660
1661 /* If MIN and MAX cover the whole range for their type, then
1662 just use the original LIMIT. */
1663 if (INTEGRAL_TYPE_P (type)
1664 && vrp_val_is_min (min)
1665 && vrp_val_is_max (max))
1666 min = max = limit;
1667
1668 set_value_range (vr_p, VR_ANTI_RANGE, min, max, vr_p->equiv);
1669 }
1670 else if (cond_code == LE_EXPR || cond_code == LT_EXPR)
1671 {
1672 min = TYPE_MIN_VALUE (type);
1673
1674 if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE)
1675 max = limit;
1676 else
1677 {
1678 /* If LIMIT_VR is of the form [N1, N2], we need to build the
1679 range [MIN, N2] for LE_EXPR and [MIN, N2 - 1] for
1680 LT_EXPR. */
1681 max = limit_vr->max;
1682 }
1683
1684 /* If the maximum value forces us to be out of bounds, simply punt.
1685 It would be pointless to try and do anything more since this
1686 all should be optimized away above us. */
1687 if ((cond_code == LT_EXPR
1688 && compare_values (max, min) == 0)
1689 || (CONSTANT_CLASS_P (max) && TREE_OVERFLOW (max)))
1690 set_value_range_to_varying (vr_p);
1691 else
1692 {
1693 /* For LT_EXPR, we create the range [MIN, MAX - 1]. */
1694 if (cond_code == LT_EXPR)
1695 {
1696 if (TYPE_PRECISION (TREE_TYPE (max)) == 1
1697 && !TYPE_UNSIGNED (TREE_TYPE (max)))
1698 max = fold_build2 (PLUS_EXPR, TREE_TYPE (max), max,
1699 build_int_cst (TREE_TYPE (max), -1));
1700 else
1701 max = fold_build2 (MINUS_EXPR, TREE_TYPE (max), max,
1702 build_int_cst (TREE_TYPE (max), 1));
1703 if (EXPR_P (max))
1704 TREE_NO_WARNING (max) = 1;
1705 }
1706
1707 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1708 }
1709 }
1710 else if (cond_code == GE_EXPR || cond_code == GT_EXPR)
1711 {
1712 max = TYPE_MAX_VALUE (type);
1713
1714 if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE)
1715 min = limit;
1716 else
1717 {
1718 /* If LIMIT_VR is of the form [N1, N2], we need to build the
1719 range [N1, MAX] for GE_EXPR and [N1 + 1, MAX] for
1720 GT_EXPR. */
1721 min = limit_vr->min;
1722 }
1723
1724 /* If the minimum value forces us to be out of bounds, simply punt.
1725 It would be pointless to try and do anything more since this
1726 all should be optimized away above us. */
1727 if ((cond_code == GT_EXPR
1728 && compare_values (min, max) == 0)
1729 || (CONSTANT_CLASS_P (min) && TREE_OVERFLOW (min)))
1730 set_value_range_to_varying (vr_p);
1731 else
1732 {
1733 /* For GT_EXPR, we create the range [MIN + 1, MAX]. */
1734 if (cond_code == GT_EXPR)
1735 {
1736 if (TYPE_PRECISION (TREE_TYPE (min)) == 1
1737 && !TYPE_UNSIGNED (TREE_TYPE (min)))
1738 min = fold_build2 (MINUS_EXPR, TREE_TYPE (min), min,
1739 build_int_cst (TREE_TYPE (min), -1));
1740 else
1741 min = fold_build2 (PLUS_EXPR, TREE_TYPE (min), min,
1742 build_int_cst (TREE_TYPE (min), 1));
1743 if (EXPR_P (min))
1744 TREE_NO_WARNING (min) = 1;
1745 }
1746
1747 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1748 }
1749 }
1750 else
1751 gcc_unreachable ();
1752
1753 /* If VAR already had a known range, it may happen that the new
1754 range we have computed and VAR's range are not compatible. For
1755 instance,
1756
1757 if (p_5 == NULL)
1758 p_6 = ASSERT_EXPR <p_5, p_5 == NULL>;
1759 x_7 = p_6->fld;
1760 p_8 = ASSERT_EXPR <p_6, p_6 != NULL>;
1761
1762 While the above comes from a faulty program, it will cause an ICE
1763 later because p_8 and p_6 will have incompatible ranges and at
1764 the same time will be considered equivalent. A similar situation
1765 would arise from
1766
1767 if (i_5 > 10)
1768 i_6 = ASSERT_EXPR <i_5, i_5 > 10>;
1769 if (i_5 < 5)
1770 i_7 = ASSERT_EXPR <i_6, i_6 < 5>;
1771
1772 Again i_6 and i_7 will have incompatible ranges. It would be
1773 pointless to try and do anything with i_7's range because
1774 anything dominated by 'if (i_5 < 5)' will be optimized away.
1775 Note, due to the wa in which simulation proceeds, the statement
1776 i_7 = ASSERT_EXPR <...> we would never be visited because the
1777 conditional 'if (i_5 < 5)' always evaluates to false. However,
1778 this extra check does not hurt and may protect against future
1779 changes to VRP that may get into a situation similar to the
1780 NULL pointer dereference example.
1781
1782 Note that these compatibility tests are only needed when dealing
1783 with ranges or a mix of range and anti-range. If VAR_VR and VR_P
1784 are both anti-ranges, they will always be compatible, because two
1785 anti-ranges will always have a non-empty intersection. */
1786
1787 var_vr = get_value_range (var);
1788
1789 /* We may need to make adjustments when VR_P and VAR_VR are numeric
1790 ranges or anti-ranges. */
1791 if (vr_p->type == VR_VARYING
1792 || vr_p->type == VR_UNDEFINED
1793 || var_vr->type == VR_VARYING
1794 || var_vr->type == VR_UNDEFINED
1795 || symbolic_range_p (vr_p)
1796 || symbolic_range_p (var_vr))
1797 return;
1798
1799 if (var_vr->type == VR_RANGE && vr_p->type == VR_RANGE)
1800 {
1801 /* If the two ranges have a non-empty intersection, we can
1802 refine the resulting range. Since the assert expression
1803 creates an equivalency and at the same time it asserts a
1804 predicate, we can take the intersection of the two ranges to
1805 get better precision. */
1806 if (value_ranges_intersect_p (var_vr, vr_p))
1807 {
1808 /* Use the larger of the two minimums. */
1809 if (compare_values (vr_p->min, var_vr->min) == -1)
1810 min = var_vr->min;
1811 else
1812 min = vr_p->min;
1813
1814 /* Use the smaller of the two maximums. */
1815 if (compare_values (vr_p->max, var_vr->max) == 1)
1816 max = var_vr->max;
1817 else
1818 max = vr_p->max;
1819
1820 set_value_range (vr_p, vr_p->type, min, max, vr_p->equiv);
1821 }
1822 else
1823 {
1824 /* The two ranges do not intersect, set the new range to
1825 VARYING, because we will not be able to do anything
1826 meaningful with it. */
1827 set_value_range_to_varying (vr_p);
1828 }
1829 }
1830 else if ((var_vr->type == VR_RANGE && vr_p->type == VR_ANTI_RANGE)
1831 || (var_vr->type == VR_ANTI_RANGE && vr_p->type == VR_RANGE))
1832 {
1833 /* A range and an anti-range will cancel each other only if
1834 their ends are the same. For instance, in the example above,
1835 p_8's range ~[0, 0] and p_6's range [0, 0] are incompatible,
1836 so VR_P should be set to VR_VARYING. */
1837 if (compare_values (var_vr->min, vr_p->min) == 0
1838 && compare_values (var_vr->max, vr_p->max) == 0)
1839 set_value_range_to_varying (vr_p);
1840 else
1841 {
1842 tree min, max, anti_min, anti_max, real_min, real_max;
1843 int cmp;
1844
1845 /* We want to compute the logical AND of the two ranges;
1846 there are three cases to consider.
1847
1848
1849 1. The VR_ANTI_RANGE range is completely within the
1850 VR_RANGE and the endpoints of the ranges are
1851 different. In that case the resulting range
1852 should be whichever range is more precise.
1853 Typically that will be the VR_RANGE.
1854
1855 2. The VR_ANTI_RANGE is completely disjoint from
1856 the VR_RANGE. In this case the resulting range
1857 should be the VR_RANGE.
1858
1859 3. There is some overlap between the VR_ANTI_RANGE
1860 and the VR_RANGE.
1861
1862 3a. If the high limit of the VR_ANTI_RANGE resides
1863 within the VR_RANGE, then the result is a new
1864 VR_RANGE starting at the high limit of the
1865 VR_ANTI_RANGE + 1 and extending to the
1866 high limit of the original VR_RANGE.
1867
1868 3b. If the low limit of the VR_ANTI_RANGE resides
1869 within the VR_RANGE, then the result is a new
1870 VR_RANGE starting at the low limit of the original
1871 VR_RANGE and extending to the low limit of the
1872 VR_ANTI_RANGE - 1. */
1873 if (vr_p->type == VR_ANTI_RANGE)
1874 {
1875 anti_min = vr_p->min;
1876 anti_max = vr_p->max;
1877 real_min = var_vr->min;
1878 real_max = var_vr->max;
1879 }
1880 else
1881 {
1882 anti_min = var_vr->min;
1883 anti_max = var_vr->max;
1884 real_min = vr_p->min;
1885 real_max = vr_p->max;
1886 }
1887
1888
1889 /* Case 1, VR_ANTI_RANGE completely within VR_RANGE,
1890 not including any endpoints. */
1891 if (compare_values (anti_max, real_max) == -1
1892 && compare_values (anti_min, real_min) == 1)
1893 {
1894 /* If the range is covering the whole valid range of
1895 the type keep the anti-range. */
1896 if (!vrp_val_is_min (real_min)
1897 || !vrp_val_is_max (real_max))
1898 set_value_range (vr_p, VR_RANGE, real_min,
1899 real_max, vr_p->equiv);
1900 }
1901 /* Case 2, VR_ANTI_RANGE completely disjoint from
1902 VR_RANGE. */
1903 else if (compare_values (anti_min, real_max) == 1
1904 || compare_values (anti_max, real_min) == -1)
1905 {
1906 set_value_range (vr_p, VR_RANGE, real_min,
1907 real_max, vr_p->equiv);
1908 }
1909 /* Case 3a, the anti-range extends into the low
1910 part of the real range. Thus creating a new
1911 low for the real range. */
1912 else if (((cmp = compare_values (anti_max, real_min)) == 1
1913 || cmp == 0)
1914 && compare_values (anti_max, real_max) == -1)
1915 {
1916 gcc_assert (!is_positive_overflow_infinity (anti_max));
1917 if (needs_overflow_infinity (TREE_TYPE (anti_max))
1918 && vrp_val_is_max (anti_max))
1919 {
1920 if (!supports_overflow_infinity (TREE_TYPE (var_vr->min)))
1921 {
1922 set_value_range_to_varying (vr_p);
1923 return;
1924 }
1925 min = positive_overflow_infinity (TREE_TYPE (var_vr->min));
1926 }
1927 else if (!POINTER_TYPE_P (TREE_TYPE (var_vr->min)))
1928 {
1929 if (TYPE_PRECISION (TREE_TYPE (var_vr->min)) == 1
1930 && !TYPE_UNSIGNED (TREE_TYPE (var_vr->min)))
1931 min = fold_build2 (MINUS_EXPR, TREE_TYPE (var_vr->min),
1932 anti_max,
1933 build_int_cst (TREE_TYPE (var_vr->min),
1934 -1));
1935 else
1936 min = fold_build2 (PLUS_EXPR, TREE_TYPE (var_vr->min),
1937 anti_max,
1938 build_int_cst (TREE_TYPE (var_vr->min),
1939 1));
1940 }
1941 else
1942 min = fold_build_pointer_plus_hwi (anti_max, 1);
1943 max = real_max;
1944 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1945 }
1946 /* Case 3b, the anti-range extends into the high
1947 part of the real range. Thus creating a new
1948 higher for the real range. */
1949 else if (compare_values (anti_min, real_min) == 1
1950 && ((cmp = compare_values (anti_min, real_max)) == -1
1951 || cmp == 0))
1952 {
1953 gcc_assert (!is_negative_overflow_infinity (anti_min));
1954 if (needs_overflow_infinity (TREE_TYPE (anti_min))
1955 && vrp_val_is_min (anti_min))
1956 {
1957 if (!supports_overflow_infinity (TREE_TYPE (var_vr->min)))
1958 {
1959 set_value_range_to_varying (vr_p);
1960 return;
1961 }
1962 max = negative_overflow_infinity (TREE_TYPE (var_vr->min));
1963 }
1964 else if (!POINTER_TYPE_P (TREE_TYPE (var_vr->min)))
1965 {
1966 if (TYPE_PRECISION (TREE_TYPE (var_vr->min)) == 1
1967 && !TYPE_UNSIGNED (TREE_TYPE (var_vr->min)))
1968 max = fold_build2 (PLUS_EXPR, TREE_TYPE (var_vr->min),
1969 anti_min,
1970 build_int_cst (TREE_TYPE (var_vr->min),
1971 -1));
1972 else
1973 max = fold_build2 (MINUS_EXPR, TREE_TYPE (var_vr->min),
1974 anti_min,
1975 build_int_cst (TREE_TYPE (var_vr->min),
1976 1));
1977 }
1978 else
1979 max = fold_build_pointer_plus_hwi (anti_min, -1);
1980 min = real_min;
1981 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1982 }
1983 }
1984 }
1985 }
1986
1987
1988 /* Extract range information from SSA name VAR and store it in VR. If
1989 VAR has an interesting range, use it. Otherwise, create the
1990 range [VAR, VAR] and return it. This is useful in situations where
1991 we may have conditionals testing values of VARYING names. For
1992 instance,
1993
1994 x_3 = y_5;
1995 if (x_3 > y_5)
1996 ...
1997
1998 Even if y_5 is deemed VARYING, we can determine that x_3 > y_5 is
1999 always false. */
2000
2001 static void
2002 extract_range_from_ssa_name (value_range_t *vr, tree var)
2003 {
2004 value_range_t *var_vr = get_value_range (var);
2005
2006 if (var_vr->type != VR_UNDEFINED && var_vr->type != VR_VARYING)
2007 copy_value_range (vr, var_vr);
2008 else
2009 set_value_range (vr, VR_RANGE, var, var, NULL);
2010
2011 add_equivalence (&vr->equiv, var);
2012 }
2013
2014
2015 /* Wrapper around int_const_binop. If the operation overflows and we
2016 are not using wrapping arithmetic, then adjust the result to be
2017 -INF or +INF depending on CODE, VAL1 and VAL2. This can return
2018 NULL_TREE if we need to use an overflow infinity representation but
2019 the type does not support it. */
2020
2021 static tree
2022 vrp_int_const_binop (enum tree_code code, tree val1, tree val2)
2023 {
2024 tree res;
2025
2026 res = int_const_binop (code, val1, val2);
2027
2028 /* If we are using unsigned arithmetic, operate symbolically
2029 on -INF and +INF as int_const_binop only handles signed overflow. */
2030 if (TYPE_UNSIGNED (TREE_TYPE (val1)))
2031 {
2032 int checkz = compare_values (res, val1);
2033 bool overflow = false;
2034
2035 /* Ensure that res = val1 [+*] val2 >= val1
2036 or that res = val1 - val2 <= val1. */
2037 if ((code == PLUS_EXPR
2038 && !(checkz == 1 || checkz == 0))
2039 || (code == MINUS_EXPR
2040 && !(checkz == 0 || checkz == -1)))
2041 {
2042 overflow = true;
2043 }
2044 /* Checking for multiplication overflow is done by dividing the
2045 output of the multiplication by the first input of the
2046 multiplication. If the result of that division operation is
2047 not equal to the second input of the multiplication, then the
2048 multiplication overflowed. */
2049 else if (code == MULT_EXPR && !integer_zerop (val1))
2050 {
2051 tree tmp = int_const_binop (TRUNC_DIV_EXPR,
2052 res,
2053 val1);
2054 int check = compare_values (tmp, val2);
2055
2056 if (check != 0)
2057 overflow = true;
2058 }
2059
2060 if (overflow)
2061 {
2062 res = copy_node (res);
2063 TREE_OVERFLOW (res) = 1;
2064 }
2065
2066 }
2067 else if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (val1)))
2068 /* If the singed operation wraps then int_const_binop has done
2069 everything we want. */
2070 ;
2071 else if ((TREE_OVERFLOW (res)
2072 && !TREE_OVERFLOW (val1)
2073 && !TREE_OVERFLOW (val2))
2074 || is_overflow_infinity (val1)
2075 || is_overflow_infinity (val2))
2076 {
2077 /* If the operation overflowed but neither VAL1 nor VAL2 are
2078 overflown, return -INF or +INF depending on the operation
2079 and the combination of signs of the operands. */
2080 int sgn1 = tree_int_cst_sgn (val1);
2081 int sgn2 = tree_int_cst_sgn (val2);
2082
2083 if (needs_overflow_infinity (TREE_TYPE (res))
2084 && !supports_overflow_infinity (TREE_TYPE (res)))
2085 return NULL_TREE;
2086
2087 /* We have to punt on adding infinities of different signs,
2088 since we can't tell what the sign of the result should be.
2089 Likewise for subtracting infinities of the same sign. */
2090 if (((code == PLUS_EXPR && sgn1 != sgn2)
2091 || (code == MINUS_EXPR && sgn1 == sgn2))
2092 && is_overflow_infinity (val1)
2093 && is_overflow_infinity (val2))
2094 return NULL_TREE;
2095
2096 /* Don't try to handle division or shifting of infinities. */
2097 if ((code == TRUNC_DIV_EXPR
2098 || code == FLOOR_DIV_EXPR
2099 || code == CEIL_DIV_EXPR
2100 || code == EXACT_DIV_EXPR
2101 || code == ROUND_DIV_EXPR
2102 || code == RSHIFT_EXPR)
2103 && (is_overflow_infinity (val1)
2104 || is_overflow_infinity (val2)))
2105 return NULL_TREE;
2106
2107 /* Notice that we only need to handle the restricted set of
2108 operations handled by extract_range_from_binary_expr.
2109 Among them, only multiplication, addition and subtraction
2110 can yield overflow without overflown operands because we
2111 are working with integral types only... except in the
2112 case VAL1 = -INF and VAL2 = -1 which overflows to +INF
2113 for division too. */
2114
2115 /* For multiplication, the sign of the overflow is given
2116 by the comparison of the signs of the operands. */
2117 if ((code == MULT_EXPR && sgn1 == sgn2)
2118 /* For addition, the operands must be of the same sign
2119 to yield an overflow. Its sign is therefore that
2120 of one of the operands, for example the first. For
2121 infinite operands X + -INF is negative, not positive. */
2122 || (code == PLUS_EXPR
2123 && (sgn1 >= 0
2124 ? !is_negative_overflow_infinity (val2)
2125 : is_positive_overflow_infinity (val2)))
2126 /* For subtraction, non-infinite operands must be of
2127 different signs to yield an overflow. Its sign is
2128 therefore that of the first operand or the opposite of
2129 that of the second operand. A first operand of 0 counts
2130 as positive here, for the corner case 0 - (-INF), which
2131 overflows, but must yield +INF. For infinite operands 0
2132 - INF is negative, not positive. */
2133 || (code == MINUS_EXPR
2134 && (sgn1 >= 0
2135 ? !is_positive_overflow_infinity (val2)
2136 : is_negative_overflow_infinity (val2)))
2137 /* We only get in here with positive shift count, so the
2138 overflow direction is the same as the sign of val1.
2139 Actually rshift does not overflow at all, but we only
2140 handle the case of shifting overflowed -INF and +INF. */
2141 || (code == RSHIFT_EXPR
2142 && sgn1 >= 0)
2143 /* For division, the only case is -INF / -1 = +INF. */
2144 || code == TRUNC_DIV_EXPR
2145 || code == FLOOR_DIV_EXPR
2146 || code == CEIL_DIV_EXPR
2147 || code == EXACT_DIV_EXPR
2148 || code == ROUND_DIV_EXPR)
2149 return (needs_overflow_infinity (TREE_TYPE (res))
2150 ? positive_overflow_infinity (TREE_TYPE (res))
2151 : TYPE_MAX_VALUE (TREE_TYPE (res)));
2152 else
2153 return (needs_overflow_infinity (TREE_TYPE (res))
2154 ? negative_overflow_infinity (TREE_TYPE (res))
2155 : TYPE_MIN_VALUE (TREE_TYPE (res)));
2156 }
2157
2158 return res;
2159 }
2160
2161
2162 /* For range VR compute two double_int bitmasks. In *MAY_BE_NONZERO
2163 bitmask if some bit is unset, it means for all numbers in the range
2164 the bit is 0, otherwise it might be 0 or 1. In *MUST_BE_NONZERO
2165 bitmask if some bit is set, it means for all numbers in the range
2166 the bit is 1, otherwise it might be 0 or 1. */
2167
2168 static bool
2169 zero_nonzero_bits_from_vr (value_range_t *vr,
2170 double_int *may_be_nonzero,
2171 double_int *must_be_nonzero)
2172 {
2173 *may_be_nonzero = double_int_minus_one;
2174 *must_be_nonzero = double_int_zero;
2175 if (!range_int_cst_p (vr))
2176 return false;
2177
2178 if (range_int_cst_singleton_p (vr))
2179 {
2180 *may_be_nonzero = tree_to_double_int (vr->min);
2181 *must_be_nonzero = *may_be_nonzero;
2182 }
2183 else if (tree_int_cst_sgn (vr->min) >= 0
2184 || tree_int_cst_sgn (vr->max) < 0)
2185 {
2186 double_int dmin = tree_to_double_int (vr->min);
2187 double_int dmax = tree_to_double_int (vr->max);
2188 double_int xor_mask = double_int_xor (dmin, dmax);
2189 *may_be_nonzero = double_int_ior (dmin, dmax);
2190 *must_be_nonzero = double_int_and (dmin, dmax);
2191 if (xor_mask.high != 0)
2192 {
2193 unsigned HOST_WIDE_INT mask
2194 = ((unsigned HOST_WIDE_INT) 1
2195 << floor_log2 (xor_mask.high)) - 1;
2196 may_be_nonzero->low = ALL_ONES;
2197 may_be_nonzero->high |= mask;
2198 must_be_nonzero->low = 0;
2199 must_be_nonzero->high &= ~mask;
2200 }
2201 else if (xor_mask.low != 0)
2202 {
2203 unsigned HOST_WIDE_INT mask
2204 = ((unsigned HOST_WIDE_INT) 1
2205 << floor_log2 (xor_mask.low)) - 1;
2206 may_be_nonzero->low |= mask;
2207 must_be_nonzero->low &= ~mask;
2208 }
2209 }
2210
2211 return true;
2212 }
2213
2214 /* Helper to extract a value-range *VR for a multiplicative operation
2215 *VR0 CODE *VR1. */
2216
2217 static void
2218 extract_range_from_multiplicative_op_1 (value_range_t *vr,
2219 enum tree_code code,
2220 value_range_t *vr0, value_range_t *vr1)
2221 {
2222 enum value_range_type type;
2223 tree val[4];
2224 size_t i;
2225 tree min, max;
2226 bool sop;
2227 int cmp;
2228
2229 /* Multiplications, divisions and shifts are a bit tricky to handle,
2230 depending on the mix of signs we have in the two ranges, we
2231 need to operate on different values to get the minimum and
2232 maximum values for the new range. One approach is to figure
2233 out all the variations of range combinations and do the
2234 operations.
2235
2236 However, this involves several calls to compare_values and it
2237 is pretty convoluted. It's simpler to do the 4 operations
2238 (MIN0 OP MIN1, MIN0 OP MAX1, MAX0 OP MIN1 and MAX0 OP MAX0 OP
2239 MAX1) and then figure the smallest and largest values to form
2240 the new range. */
2241 gcc_assert (code == MULT_EXPR
2242 || code == TRUNC_DIV_EXPR
2243 || code == FLOOR_DIV_EXPR
2244 || code == CEIL_DIV_EXPR
2245 || code == EXACT_DIV_EXPR
2246 || code == ROUND_DIV_EXPR
2247 || code == RSHIFT_EXPR);
2248 gcc_assert ((vr0->type == VR_RANGE
2249 || (code == MULT_EXPR && vr0->type == VR_ANTI_RANGE))
2250 && vr0->type == vr1->type);
2251
2252 type = vr0->type;
2253
2254 /* Compute the 4 cross operations. */
2255 sop = false;
2256 val[0] = vrp_int_const_binop (code, vr0->min, vr1->min);
2257 if (val[0] == NULL_TREE)
2258 sop = true;
2259
2260 if (vr1->max == vr1->min)
2261 val[1] = NULL_TREE;
2262 else
2263 {
2264 val[1] = vrp_int_const_binop (code, vr0->min, vr1->max);
2265 if (val[1] == NULL_TREE)
2266 sop = true;
2267 }
2268
2269 if (vr0->max == vr0->min)
2270 val[2] = NULL_TREE;
2271 else
2272 {
2273 val[2] = vrp_int_const_binop (code, vr0->max, vr1->min);
2274 if (val[2] == NULL_TREE)
2275 sop = true;
2276 }
2277
2278 if (vr0->min == vr0->max || vr1->min == vr1->max)
2279 val[3] = NULL_TREE;
2280 else
2281 {
2282 val[3] = vrp_int_const_binop (code, vr0->max, vr1->max);
2283 if (val[3] == NULL_TREE)
2284 sop = true;
2285 }
2286
2287 if (sop)
2288 {
2289 set_value_range_to_varying (vr);
2290 return;
2291 }
2292
2293 /* Set MIN to the minimum of VAL[i] and MAX to the maximum
2294 of VAL[i]. */
2295 min = val[0];
2296 max = val[0];
2297 for (i = 1; i < 4; i++)
2298 {
2299 if (!is_gimple_min_invariant (min)
2300 || (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
2301 || !is_gimple_min_invariant (max)
2302 || (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
2303 break;
2304
2305 if (val[i])
2306 {
2307 if (!is_gimple_min_invariant (val[i])
2308 || (TREE_OVERFLOW (val[i])
2309 && !is_overflow_infinity (val[i])))
2310 {
2311 /* If we found an overflowed value, set MIN and MAX
2312 to it so that we set the resulting range to
2313 VARYING. */
2314 min = max = val[i];
2315 break;
2316 }
2317
2318 if (compare_values (val[i], min) == -1)
2319 min = val[i];
2320
2321 if (compare_values (val[i], max) == 1)
2322 max = val[i];
2323 }
2324 }
2325
2326 /* If either MIN or MAX overflowed, then set the resulting range to
2327 VARYING. But we do accept an overflow infinity
2328 representation. */
2329 if (min == NULL_TREE
2330 || !is_gimple_min_invariant (min)
2331 || (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
2332 || max == NULL_TREE
2333 || !is_gimple_min_invariant (max)
2334 || (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
2335 {
2336 set_value_range_to_varying (vr);
2337 return;
2338 }
2339
2340 /* We punt if:
2341 1) [-INF, +INF]
2342 2) [-INF, +-INF(OVF)]
2343 3) [+-INF(OVF), +INF]
2344 4) [+-INF(OVF), +-INF(OVF)]
2345 We learn nothing when we have INF and INF(OVF) on both sides.
2346 Note that we do accept [-INF, -INF] and [+INF, +INF] without
2347 overflow. */
2348 if ((vrp_val_is_min (min) || is_overflow_infinity (min))
2349 && (vrp_val_is_max (max) || is_overflow_infinity (max)))
2350 {
2351 set_value_range_to_varying (vr);
2352 return;
2353 }
2354
2355 cmp = compare_values (min, max);
2356 if (cmp == -2 || cmp == 1)
2357 {
2358 /* If the new range has its limits swapped around (MIN > MAX),
2359 then the operation caused one of them to wrap around, mark
2360 the new range VARYING. */
2361 set_value_range_to_varying (vr);
2362 }
2363 else
2364 set_value_range (vr, type, min, max, NULL);
2365 }
2366
2367 /* Extract range information from a binary operation CODE based on
2368 the ranges of each of its operands, *VR0 and *VR1 with resulting
2369 type EXPR_TYPE. The resulting range is stored in *VR. */
2370
2371 static void
2372 extract_range_from_binary_expr_1 (value_range_t *vr,
2373 enum tree_code code, tree expr_type,
2374 value_range_t *vr0_, value_range_t *vr1_)
2375 {
2376 value_range_t vr0 = *vr0_, vr1 = *vr1_;
2377 enum value_range_type type;
2378 tree min = NULL_TREE, max = NULL_TREE;
2379 int cmp;
2380
2381 if (!INTEGRAL_TYPE_P (expr_type)
2382 && !POINTER_TYPE_P (expr_type))
2383 {
2384 set_value_range_to_varying (vr);
2385 return;
2386 }
2387
2388 /* Not all binary expressions can be applied to ranges in a
2389 meaningful way. Handle only arithmetic operations. */
2390 if (code != PLUS_EXPR
2391 && code != MINUS_EXPR
2392 && code != POINTER_PLUS_EXPR
2393 && code != MULT_EXPR
2394 && code != TRUNC_DIV_EXPR
2395 && code != FLOOR_DIV_EXPR
2396 && code != CEIL_DIV_EXPR
2397 && code != EXACT_DIV_EXPR
2398 && code != ROUND_DIV_EXPR
2399 && code != TRUNC_MOD_EXPR
2400 && code != RSHIFT_EXPR
2401 && code != MIN_EXPR
2402 && code != MAX_EXPR
2403 && code != BIT_AND_EXPR
2404 && code != BIT_IOR_EXPR
2405 && code != BIT_XOR_EXPR)
2406 {
2407 set_value_range_to_varying (vr);
2408 return;
2409 }
2410
2411 /* If both ranges are UNDEFINED, so is the result. */
2412 if (vr0.type == VR_UNDEFINED && vr1.type == VR_UNDEFINED)
2413 {
2414 set_value_range_to_undefined (vr);
2415 return;
2416 }
2417 /* If one of the ranges is UNDEFINED drop it to VARYING for the following
2418 code. At some point we may want to special-case operations that
2419 have UNDEFINED result for all or some value-ranges of the not UNDEFINED
2420 operand. */
2421 else if (vr0.type == VR_UNDEFINED)
2422 set_value_range_to_varying (&vr0);
2423 else if (vr1.type == VR_UNDEFINED)
2424 set_value_range_to_varying (&vr1);
2425
2426 /* The type of the resulting value range defaults to VR0.TYPE. */
2427 type = vr0.type;
2428
2429 /* Refuse to operate on VARYING ranges, ranges of different kinds
2430 and symbolic ranges. As an exception, we allow BIT_AND_EXPR
2431 because we may be able to derive a useful range even if one of
2432 the operands is VR_VARYING or symbolic range. Similarly for
2433 divisions. TODO, we may be able to derive anti-ranges in
2434 some cases. */
2435 if (code != BIT_AND_EXPR
2436 && code != BIT_IOR_EXPR
2437 && code != TRUNC_DIV_EXPR
2438 && code != FLOOR_DIV_EXPR
2439 && code != CEIL_DIV_EXPR
2440 && code != EXACT_DIV_EXPR
2441 && code != ROUND_DIV_EXPR
2442 && code != TRUNC_MOD_EXPR
2443 && (vr0.type == VR_VARYING
2444 || vr1.type == VR_VARYING
2445 || vr0.type != vr1.type
2446 || symbolic_range_p (&vr0)
2447 || symbolic_range_p (&vr1)))
2448 {
2449 set_value_range_to_varying (vr);
2450 return;
2451 }
2452
2453 /* Now evaluate the expression to determine the new range. */
2454 if (POINTER_TYPE_P (expr_type))
2455 {
2456 if (code == MIN_EXPR || code == MAX_EXPR)
2457 {
2458 /* For MIN/MAX expressions with pointers, we only care about
2459 nullness, if both are non null, then the result is nonnull.
2460 If both are null, then the result is null. Otherwise they
2461 are varying. */
2462 if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1))
2463 set_value_range_to_nonnull (vr, expr_type);
2464 else if (range_is_null (&vr0) && range_is_null (&vr1))
2465 set_value_range_to_null (vr, expr_type);
2466 else
2467 set_value_range_to_varying (vr);
2468 }
2469 else if (code == POINTER_PLUS_EXPR)
2470 {
2471 /* For pointer types, we are really only interested in asserting
2472 whether the expression evaluates to non-NULL. */
2473 if (range_is_nonnull (&vr0) || range_is_nonnull (&vr1))
2474 set_value_range_to_nonnull (vr, expr_type);
2475 else if (range_is_null (&vr0) && range_is_null (&vr1))
2476 set_value_range_to_null (vr, expr_type);
2477 else
2478 set_value_range_to_varying (vr);
2479 }
2480 else if (code == BIT_AND_EXPR)
2481 {
2482 /* For pointer types, we are really only interested in asserting
2483 whether the expression evaluates to non-NULL. */
2484 if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1))
2485 set_value_range_to_nonnull (vr, expr_type);
2486 else if (range_is_null (&vr0) || range_is_null (&vr1))
2487 set_value_range_to_null (vr, expr_type);
2488 else
2489 set_value_range_to_varying (vr);
2490 }
2491 else
2492 set_value_range_to_varying (vr);
2493
2494 return;
2495 }
2496
2497 /* For integer ranges, apply the operation to each end of the
2498 range and see what we end up with. */
2499 if (code == PLUS_EXPR)
2500 {
2501 /* If we have a PLUS_EXPR with two VR_ANTI_RANGEs, drop to
2502 VR_VARYING. It would take more effort to compute a precise
2503 range for such a case. For example, if we have op0 == 1 and
2504 op1 == -1 with their ranges both being ~[0,0], we would have
2505 op0 + op1 == 0, so we cannot claim that the sum is in ~[0,0].
2506 Note that we are guaranteed to have vr0.type == vr1.type at
2507 this point. */
2508 if (vr0.type == VR_ANTI_RANGE)
2509 {
2510 set_value_range_to_varying (vr);
2511 return;
2512 }
2513
2514 /* For operations that make the resulting range directly
2515 proportional to the original ranges, apply the operation to
2516 the same end of each range. */
2517 min = vrp_int_const_binop (code, vr0.min, vr1.min);
2518 max = vrp_int_const_binop (code, vr0.max, vr1.max);
2519
2520 /* If both additions overflowed the range kind is still correct.
2521 This happens regularly with subtracting something in unsigned
2522 arithmetic.
2523 ??? See PR30318 for all the cases we do not handle. */
2524 if ((TREE_OVERFLOW (min) && !is_overflow_infinity (min))
2525 && (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
2526 {
2527 min = build_int_cst_wide (TREE_TYPE (min),
2528 TREE_INT_CST_LOW (min),
2529 TREE_INT_CST_HIGH (min));
2530 max = build_int_cst_wide (TREE_TYPE (max),
2531 TREE_INT_CST_LOW (max),
2532 TREE_INT_CST_HIGH (max));
2533 }
2534 }
2535 else if (code == MIN_EXPR
2536 || code == MAX_EXPR)
2537 {
2538 if (vr0.type == VR_ANTI_RANGE)
2539 {
2540 /* For MIN_EXPR and MAX_EXPR with two VR_ANTI_RANGEs,
2541 the resulting VR_ANTI_RANGE is the same - intersection
2542 of the two ranges. */
2543 min = vrp_int_const_binop (MAX_EXPR, vr0.min, vr1.min);
2544 max = vrp_int_const_binop (MIN_EXPR, vr0.max, vr1.max);
2545 }
2546 else
2547 {
2548 /* For operations that make the resulting range directly
2549 proportional to the original ranges, apply the operation to
2550 the same end of each range. */
2551 min = vrp_int_const_binop (code, vr0.min, vr1.min);
2552 max = vrp_int_const_binop (code, vr0.max, vr1.max);
2553 }
2554 }
2555 else if (code == MULT_EXPR)
2556 {
2557 /* If we have an unsigned MULT_EXPR with two VR_ANTI_RANGEs,
2558 drop to VR_VARYING. It would take more effort to compute a
2559 precise range for such a case. For example, if we have
2560 op0 == 65536 and op1 == 65536 with their ranges both being
2561 ~[0,0] on a 32-bit machine, we would have op0 * op1 == 0, so
2562 we cannot claim that the product is in ~[0,0]. Note that we
2563 are guaranteed to have vr0.type == vr1.type at this
2564 point. */
2565 if (vr0.type == VR_ANTI_RANGE
2566 && !TYPE_OVERFLOW_UNDEFINED (expr_type))
2567 {
2568 set_value_range_to_varying (vr);
2569 return;
2570 }
2571
2572 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2573 return;
2574 }
2575 else if (code == RSHIFT_EXPR)
2576 {
2577 /* If we have a RSHIFT_EXPR with any shift values outside [0..prec-1],
2578 then drop to VR_VARYING. Outside of this range we get undefined
2579 behavior from the shift operation. We cannot even trust
2580 SHIFT_COUNT_TRUNCATED at this stage, because that applies to rtl
2581 shifts, and the operation at the tree level may be widened. */
2582 if (vr1.type != VR_RANGE
2583 || !value_range_nonnegative_p (&vr1)
2584 || TREE_CODE (vr1.max) != INTEGER_CST
2585 || compare_tree_int (vr1.max, TYPE_PRECISION (expr_type) - 1) == 1)
2586 {
2587 set_value_range_to_varying (vr);
2588 return;
2589 }
2590
2591 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2592 return;
2593 }
2594 else if (code == TRUNC_DIV_EXPR
2595 || code == FLOOR_DIV_EXPR
2596 || code == CEIL_DIV_EXPR
2597 || code == EXACT_DIV_EXPR
2598 || code == ROUND_DIV_EXPR)
2599 {
2600 if (vr0.type != VR_RANGE || symbolic_range_p (&vr0))
2601 {
2602 /* For division, if op1 has VR_RANGE but op0 does not, something
2603 can be deduced just from that range. Say [min, max] / [4, max]
2604 gives [min / 4, max / 4] range. */
2605 if (vr1.type == VR_RANGE
2606 && !symbolic_range_p (&vr1)
2607 && !range_includes_zero_p (&vr1))
2608 {
2609 vr0.type = type = VR_RANGE;
2610 vr0.min = vrp_val_min (expr_type);
2611 vr0.max = vrp_val_max (expr_type);
2612 }
2613 else
2614 {
2615 set_value_range_to_varying (vr);
2616 return;
2617 }
2618 }
2619
2620 /* For divisions, if flag_non_call_exceptions is true, we must
2621 not eliminate a division by zero. */
2622 if (cfun->can_throw_non_call_exceptions
2623 && (vr1.type != VR_RANGE
2624 || symbolic_range_p (&vr1)
2625 || range_includes_zero_p (&vr1)))
2626 {
2627 set_value_range_to_varying (vr);
2628 return;
2629 }
2630
2631 /* For divisions, if op0 is VR_RANGE, we can deduce a range
2632 even if op1 is VR_VARYING, VR_ANTI_RANGE, symbolic or can
2633 include 0. */
2634 if (vr0.type == VR_RANGE
2635 && (vr1.type != VR_RANGE
2636 || symbolic_range_p (&vr1)
2637 || range_includes_zero_p (&vr1)))
2638 {
2639 tree zero = build_int_cst (TREE_TYPE (vr0.min), 0);
2640 int cmp;
2641
2642 min = NULL_TREE;
2643 max = NULL_TREE;
2644 if (TYPE_UNSIGNED (expr_type)
2645 || value_range_nonnegative_p (&vr1))
2646 {
2647 /* For unsigned division or when divisor is known
2648 to be non-negative, the range has to cover
2649 all numbers from 0 to max for positive max
2650 and all numbers from min to 0 for negative min. */
2651 cmp = compare_values (vr0.max, zero);
2652 if (cmp == -1)
2653 max = zero;
2654 else if (cmp == 0 || cmp == 1)
2655 max = vr0.max;
2656 else
2657 type = VR_VARYING;
2658 cmp = compare_values (vr0.min, zero);
2659 if (cmp == 1)
2660 min = zero;
2661 else if (cmp == 0 || cmp == -1)
2662 min = vr0.min;
2663 else
2664 type = VR_VARYING;
2665 }
2666 else
2667 {
2668 /* Otherwise the range is -max .. max or min .. -min
2669 depending on which bound is bigger in absolute value,
2670 as the division can change the sign. */
2671 abs_extent_range (vr, vr0.min, vr0.max);
2672 return;
2673 }
2674 if (type == VR_VARYING)
2675 {
2676 set_value_range_to_varying (vr);
2677 return;
2678 }
2679 }
2680 else
2681 {
2682 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2683 return;
2684 }
2685 }
2686 else if (code == TRUNC_MOD_EXPR)
2687 {
2688 if (vr1.type != VR_RANGE
2689 || symbolic_range_p (&vr1)
2690 || range_includes_zero_p (&vr1)
2691 || vrp_val_is_min (vr1.min))
2692 {
2693 set_value_range_to_varying (vr);
2694 return;
2695 }
2696 type = VR_RANGE;
2697 /* Compute MAX <|vr1.min|, |vr1.max|> - 1. */
2698 max = fold_unary_to_constant (ABS_EXPR, expr_type, vr1.min);
2699 if (tree_int_cst_lt (max, vr1.max))
2700 max = vr1.max;
2701 max = int_const_binop (MINUS_EXPR, max, integer_one_node);
2702 /* If the dividend is non-negative the modulus will be
2703 non-negative as well. */
2704 if (TYPE_UNSIGNED (expr_type)
2705 || value_range_nonnegative_p (&vr0))
2706 min = build_int_cst (TREE_TYPE (max), 0);
2707 else
2708 min = fold_unary_to_constant (NEGATE_EXPR, expr_type, max);
2709 }
2710 else if (code == MINUS_EXPR)
2711 {
2712 /* If we have a MINUS_EXPR with two VR_ANTI_RANGEs, drop to
2713 VR_VARYING. It would take more effort to compute a precise
2714 range for such a case. For example, if we have op0 == 1 and
2715 op1 == 1 with their ranges both being ~[0,0], we would have
2716 op0 - op1 == 0, so we cannot claim that the difference is in
2717 ~[0,0]. Note that we are guaranteed to have
2718 vr0.type == vr1.type at this point. */
2719 if (vr0.type == VR_ANTI_RANGE)
2720 {
2721 set_value_range_to_varying (vr);
2722 return;
2723 }
2724
2725 /* For MINUS_EXPR, apply the operation to the opposite ends of
2726 each range. */
2727 min = vrp_int_const_binop (code, vr0.min, vr1.max);
2728 max = vrp_int_const_binop (code, vr0.max, vr1.min);
2729 }
2730 else if (code == BIT_AND_EXPR || code == BIT_IOR_EXPR || code == BIT_XOR_EXPR)
2731 {
2732 bool int_cst_range0, int_cst_range1;
2733 double_int may_be_nonzero0, may_be_nonzero1;
2734 double_int must_be_nonzero0, must_be_nonzero1;
2735
2736 int_cst_range0 = zero_nonzero_bits_from_vr (&vr0, &may_be_nonzero0,
2737 &must_be_nonzero0);
2738 int_cst_range1 = zero_nonzero_bits_from_vr (&vr1, &may_be_nonzero1,
2739 &must_be_nonzero1);
2740
2741 type = VR_RANGE;
2742 if (code == BIT_AND_EXPR)
2743 {
2744 double_int dmax;
2745 min = double_int_to_tree (expr_type,
2746 double_int_and (must_be_nonzero0,
2747 must_be_nonzero1));
2748 dmax = double_int_and (may_be_nonzero0, may_be_nonzero1);
2749 /* If both input ranges contain only negative values we can
2750 truncate the result range maximum to the minimum of the
2751 input range maxima. */
2752 if (int_cst_range0 && int_cst_range1
2753 && tree_int_cst_sgn (vr0.max) < 0
2754 && tree_int_cst_sgn (vr1.max) < 0)
2755 {
2756 dmax = double_int_min (dmax, tree_to_double_int (vr0.max),
2757 TYPE_UNSIGNED (expr_type));
2758 dmax = double_int_min (dmax, tree_to_double_int (vr1.max),
2759 TYPE_UNSIGNED (expr_type));
2760 }
2761 /* If either input range contains only non-negative values
2762 we can truncate the result range maximum to the respective
2763 maximum of the input range. */
2764 if (int_cst_range0 && tree_int_cst_sgn (vr0.min) >= 0)
2765 dmax = double_int_min (dmax, tree_to_double_int (vr0.max),
2766 TYPE_UNSIGNED (expr_type));
2767 if (int_cst_range1 && tree_int_cst_sgn (vr1.min) >= 0)
2768 dmax = double_int_min (dmax, tree_to_double_int (vr1.max),
2769 TYPE_UNSIGNED (expr_type));
2770 max = double_int_to_tree (expr_type, dmax);
2771 }
2772 else if (code == BIT_IOR_EXPR)
2773 {
2774 double_int dmin;
2775 max = double_int_to_tree (expr_type,
2776 double_int_ior (may_be_nonzero0,
2777 may_be_nonzero1));
2778 dmin = double_int_ior (must_be_nonzero0, must_be_nonzero1);
2779 /* If the input ranges contain only positive values we can
2780 truncate the minimum of the result range to the maximum
2781 of the input range minima. */
2782 if (int_cst_range0 && int_cst_range1
2783 && tree_int_cst_sgn (vr0.min) >= 0
2784 && tree_int_cst_sgn (vr1.min) >= 0)
2785 {
2786 dmin = double_int_max (dmin, tree_to_double_int (vr0.min),
2787 TYPE_UNSIGNED (expr_type));
2788 dmin = double_int_max (dmin, tree_to_double_int (vr1.min),
2789 TYPE_UNSIGNED (expr_type));
2790 }
2791 /* If either input range contains only negative values
2792 we can truncate the minimum of the result range to the
2793 respective minimum range. */
2794 if (int_cst_range0 && tree_int_cst_sgn (vr0.max) < 0)
2795 dmin = double_int_max (dmin, tree_to_double_int (vr0.min),
2796 TYPE_UNSIGNED (expr_type));
2797 if (int_cst_range1 && tree_int_cst_sgn (vr1.max) < 0)
2798 dmin = double_int_max (dmin, tree_to_double_int (vr1.min),
2799 TYPE_UNSIGNED (expr_type));
2800 min = double_int_to_tree (expr_type, dmin);
2801 }
2802 else if (code == BIT_XOR_EXPR)
2803 {
2804 double_int result_zero_bits, result_one_bits;
2805 result_zero_bits
2806 = double_int_ior (double_int_and (must_be_nonzero0,
2807 must_be_nonzero1),
2808 double_int_not
2809 (double_int_ior (may_be_nonzero0,
2810 may_be_nonzero1)));
2811 result_one_bits
2812 = double_int_ior (double_int_and
2813 (must_be_nonzero0,
2814 double_int_not (may_be_nonzero1)),
2815 double_int_and
2816 (must_be_nonzero1,
2817 double_int_not (may_be_nonzero0)));
2818 max = double_int_to_tree (expr_type,
2819 double_int_not (result_zero_bits));
2820 min = double_int_to_tree (expr_type, result_one_bits);
2821 /* If the range has all positive or all negative values the
2822 result is better than VARYING. */
2823 if (tree_int_cst_sgn (min) < 0
2824 || tree_int_cst_sgn (max) >= 0)
2825 ;
2826 else
2827 max = min = NULL_TREE;
2828 }
2829 }
2830 else
2831 gcc_unreachable ();
2832
2833 /* If either MIN or MAX overflowed, then set the resulting range to
2834 VARYING. But we do accept an overflow infinity
2835 representation. */
2836 if (min == NULL_TREE
2837 || !is_gimple_min_invariant (min)
2838 || (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
2839 || max == NULL_TREE
2840 || !is_gimple_min_invariant (max)
2841 || (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
2842 {
2843 set_value_range_to_varying (vr);
2844 return;
2845 }
2846
2847 /* We punt if:
2848 1) [-INF, +INF]
2849 2) [-INF, +-INF(OVF)]
2850 3) [+-INF(OVF), +INF]
2851 4) [+-INF(OVF), +-INF(OVF)]
2852 We learn nothing when we have INF and INF(OVF) on both sides.
2853 Note that we do accept [-INF, -INF] and [+INF, +INF] without
2854 overflow. */
2855 if ((vrp_val_is_min (min) || is_overflow_infinity (min))
2856 && (vrp_val_is_max (max) || is_overflow_infinity (max)))
2857 {
2858 set_value_range_to_varying (vr);
2859 return;
2860 }
2861
2862 cmp = compare_values (min, max);
2863 if (cmp == -2 || cmp == 1)
2864 {
2865 /* If the new range has its limits swapped around (MIN > MAX),
2866 then the operation caused one of them to wrap around, mark
2867 the new range VARYING. */
2868 set_value_range_to_varying (vr);
2869 }
2870 else
2871 set_value_range (vr, type, min, max, NULL);
2872 }
2873
2874 /* Extract range information from a binary expression OP0 CODE OP1 based on
2875 the ranges of each of its operands with resulting type EXPR_TYPE.
2876 The resulting range is stored in *VR. */
2877
2878 static void
2879 extract_range_from_binary_expr (value_range_t *vr,
2880 enum tree_code code,
2881 tree expr_type, tree op0, tree op1)
2882 {
2883 value_range_t vr0 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
2884 value_range_t vr1 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
2885
2886 /* Get value ranges for each operand. For constant operands, create
2887 a new value range with the operand to simplify processing. */
2888 if (TREE_CODE (op0) == SSA_NAME)
2889 vr0 = *(get_value_range (op0));
2890 else if (is_gimple_min_invariant (op0))
2891 set_value_range_to_value (&vr0, op0, NULL);
2892 else
2893 set_value_range_to_varying (&vr0);
2894
2895 if (TREE_CODE (op1) == SSA_NAME)
2896 vr1 = *(get_value_range (op1));
2897 else if (is_gimple_min_invariant (op1))
2898 set_value_range_to_value (&vr1, op1, NULL);
2899 else
2900 set_value_range_to_varying (&vr1);
2901
2902 extract_range_from_binary_expr_1 (vr, code, expr_type, &vr0, &vr1);
2903 }
2904
2905 /* Extract range information from a unary operation CODE based on
2906 the range of its operand *VR0 with type OP0_TYPE with resulting type TYPE.
2907 The The resulting range is stored in *VR. */
2908
2909 static void
2910 extract_range_from_unary_expr_1 (value_range_t *vr,
2911 enum tree_code code, tree type,
2912 value_range_t *vr0_, tree op0_type)
2913 {
2914 value_range_t vr0 = *vr0_;
2915
2916 /* VRP only operates on integral and pointer types. */
2917 if (!(INTEGRAL_TYPE_P (op0_type)
2918 || POINTER_TYPE_P (op0_type))
2919 || !(INTEGRAL_TYPE_P (type)
2920 || POINTER_TYPE_P (type)))
2921 {
2922 set_value_range_to_varying (vr);
2923 return;
2924 }
2925
2926 /* If VR0 is UNDEFINED, so is the result. */
2927 if (vr0.type == VR_UNDEFINED)
2928 {
2929 set_value_range_to_undefined (vr);
2930 return;
2931 }
2932
2933 if (CONVERT_EXPR_CODE_P (code))
2934 {
2935 tree inner_type = op0_type;
2936 tree outer_type = type;
2937
2938 /* If the expression evaluates to a pointer, we are only interested in
2939 determining if it evaluates to NULL [0, 0] or non-NULL (~[0, 0]). */
2940 if (POINTER_TYPE_P (type))
2941 {
2942 if (range_is_nonnull (&vr0))
2943 set_value_range_to_nonnull (vr, type);
2944 else if (range_is_null (&vr0))
2945 set_value_range_to_null (vr, type);
2946 else
2947 set_value_range_to_varying (vr);
2948 return;
2949 }
2950
2951 /* If VR0 is varying and we increase the type precision, assume
2952 a full range for the following transformation. */
2953 if (vr0.type == VR_VARYING
2954 && INTEGRAL_TYPE_P (inner_type)
2955 && TYPE_PRECISION (inner_type) < TYPE_PRECISION (outer_type))
2956 {
2957 vr0.type = VR_RANGE;
2958 vr0.min = TYPE_MIN_VALUE (inner_type);
2959 vr0.max = TYPE_MAX_VALUE (inner_type);
2960 }
2961
2962 /* If VR0 is a constant range or anti-range and the conversion is
2963 not truncating we can convert the min and max values and
2964 canonicalize the resulting range. Otherwise we can do the
2965 conversion if the size of the range is less than what the
2966 precision of the target type can represent and the range is
2967 not an anti-range. */
2968 if ((vr0.type == VR_RANGE
2969 || vr0.type == VR_ANTI_RANGE)
2970 && TREE_CODE (vr0.min) == INTEGER_CST
2971 && TREE_CODE (vr0.max) == INTEGER_CST
2972 && (!is_overflow_infinity (vr0.min)
2973 || (vr0.type == VR_RANGE
2974 && TYPE_PRECISION (outer_type) > TYPE_PRECISION (inner_type)
2975 && needs_overflow_infinity (outer_type)
2976 && supports_overflow_infinity (outer_type)))
2977 && (!is_overflow_infinity (vr0.max)
2978 || (vr0.type == VR_RANGE
2979 && TYPE_PRECISION (outer_type) > TYPE_PRECISION (inner_type)
2980 && needs_overflow_infinity (outer_type)
2981 && supports_overflow_infinity (outer_type)))
2982 && (TYPE_PRECISION (outer_type) >= TYPE_PRECISION (inner_type)
2983 || (vr0.type == VR_RANGE
2984 && integer_zerop (int_const_binop (RSHIFT_EXPR,
2985 int_const_binop (MINUS_EXPR, vr0.max, vr0.min),
2986 size_int (TYPE_PRECISION (outer_type)))))))
2987 {
2988 tree new_min, new_max;
2989 if (is_overflow_infinity (vr0.min))
2990 new_min = negative_overflow_infinity (outer_type);
2991 else
2992 new_min = force_fit_type_double (outer_type,
2993 tree_to_double_int (vr0.min),
2994 0, false);
2995 if (is_overflow_infinity (vr0.max))
2996 new_max = positive_overflow_infinity (outer_type);
2997 else
2998 new_max = force_fit_type_double (outer_type,
2999 tree_to_double_int (vr0.max),
3000 0, false);
3001 set_and_canonicalize_value_range (vr, vr0.type,
3002 new_min, new_max, NULL);
3003 return;
3004 }
3005
3006 set_value_range_to_varying (vr);
3007 return;
3008 }
3009 else if (code == NEGATE_EXPR)
3010 {
3011 /* -X is simply 0 - X, so re-use existing code that also handles
3012 anti-ranges fine. */
3013 value_range_t zero = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
3014 set_value_range_to_value (&zero, build_int_cst (type, 0), NULL);
3015 extract_range_from_binary_expr_1 (vr, MINUS_EXPR, type, &zero, &vr0);
3016 return;
3017 }
3018 else if (code == ABS_EXPR)
3019 {
3020 tree min, max;
3021 int cmp;
3022
3023 /* Pass through vr0 in the easy cases. */
3024 if (TYPE_UNSIGNED (type)
3025 || value_range_nonnegative_p (&vr0))
3026 {
3027 copy_value_range (vr, &vr0);
3028 return;
3029 }
3030
3031 /* For the remaining varying or symbolic ranges we can't do anything
3032 useful. */
3033 if (vr0.type == VR_VARYING
3034 || symbolic_range_p (&vr0))
3035 {
3036 set_value_range_to_varying (vr);
3037 return;
3038 }
3039
3040 /* -TYPE_MIN_VALUE = TYPE_MIN_VALUE with flag_wrapv so we can't get a
3041 useful range. */
3042 if (!TYPE_OVERFLOW_UNDEFINED (type)
3043 && ((vr0.type == VR_RANGE
3044 && vrp_val_is_min (vr0.min))
3045 || (vr0.type == VR_ANTI_RANGE
3046 && !vrp_val_is_min (vr0.min))))
3047 {
3048 set_value_range_to_varying (vr);
3049 return;
3050 }
3051
3052 /* ABS_EXPR may flip the range around, if the original range
3053 included negative values. */
3054 if (is_overflow_infinity (vr0.min))
3055 min = positive_overflow_infinity (type);
3056 else if (!vrp_val_is_min (vr0.min))
3057 min = fold_unary_to_constant (code, type, vr0.min);
3058 else if (!needs_overflow_infinity (type))
3059 min = TYPE_MAX_VALUE (type);
3060 else if (supports_overflow_infinity (type))
3061 min = positive_overflow_infinity (type);
3062 else
3063 {
3064 set_value_range_to_varying (vr);
3065 return;
3066 }
3067
3068 if (is_overflow_infinity (vr0.max))
3069 max = positive_overflow_infinity (type);
3070 else if (!vrp_val_is_min (vr0.max))
3071 max = fold_unary_to_constant (code, type, vr0.max);
3072 else if (!needs_overflow_infinity (type))
3073 max = TYPE_MAX_VALUE (type);
3074 else if (supports_overflow_infinity (type)
3075 /* We shouldn't generate [+INF, +INF] as set_value_range
3076 doesn't like this and ICEs. */
3077 && !is_positive_overflow_infinity (min))
3078 max = positive_overflow_infinity (type);
3079 else
3080 {
3081 set_value_range_to_varying (vr);
3082 return;
3083 }
3084
3085 cmp = compare_values (min, max);
3086
3087 /* If a VR_ANTI_RANGEs contains zero, then we have
3088 ~[-INF, min(MIN, MAX)]. */
3089 if (vr0.type == VR_ANTI_RANGE)
3090 {
3091 if (range_includes_zero_p (&vr0))
3092 {
3093 /* Take the lower of the two values. */
3094 if (cmp != 1)
3095 max = min;
3096
3097 /* Create ~[-INF, min (abs(MIN), abs(MAX))]
3098 or ~[-INF + 1, min (abs(MIN), abs(MAX))] when
3099 flag_wrapv is set and the original anti-range doesn't include
3100 TYPE_MIN_VALUE, remember -TYPE_MIN_VALUE = TYPE_MIN_VALUE. */
3101 if (TYPE_OVERFLOW_WRAPS (type))
3102 {
3103 tree type_min_value = TYPE_MIN_VALUE (type);
3104
3105 min = (vr0.min != type_min_value
3106 ? int_const_binop (PLUS_EXPR, type_min_value,
3107 integer_one_node)
3108 : type_min_value);
3109 }
3110 else
3111 {
3112 if (overflow_infinity_range_p (&vr0))
3113 min = negative_overflow_infinity (type);
3114 else
3115 min = TYPE_MIN_VALUE (type);
3116 }
3117 }
3118 else
3119 {
3120 /* All else has failed, so create the range [0, INF], even for
3121 flag_wrapv since TYPE_MIN_VALUE is in the original
3122 anti-range. */
3123 vr0.type = VR_RANGE;
3124 min = build_int_cst (type, 0);
3125 if (needs_overflow_infinity (type))
3126 {
3127 if (supports_overflow_infinity (type))
3128 max = positive_overflow_infinity (type);
3129 else
3130 {
3131 set_value_range_to_varying (vr);
3132 return;
3133 }
3134 }
3135 else
3136 max = TYPE_MAX_VALUE (type);
3137 }
3138 }
3139
3140 /* If the range contains zero then we know that the minimum value in the
3141 range will be zero. */
3142 else if (range_includes_zero_p (&vr0))
3143 {
3144 if (cmp == 1)
3145 max = min;
3146 min = build_int_cst (type, 0);
3147 }
3148 else
3149 {
3150 /* If the range was reversed, swap MIN and MAX. */
3151 if (cmp == 1)
3152 {
3153 tree t = min;
3154 min = max;
3155 max = t;
3156 }
3157 }
3158
3159 cmp = compare_values (min, max);
3160 if (cmp == -2 || cmp == 1)
3161 {
3162 /* If the new range has its limits swapped around (MIN > MAX),
3163 then the operation caused one of them to wrap around, mark
3164 the new range VARYING. */
3165 set_value_range_to_varying (vr);
3166 }
3167 else
3168 set_value_range (vr, vr0.type, min, max, NULL);
3169 return;
3170 }
3171 else if (code == BIT_NOT_EXPR)
3172 {
3173 /* ~X is simply -1 - X, so re-use existing code that also handles
3174 anti-ranges fine. */
3175 value_range_t minusone = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
3176 set_value_range_to_value (&minusone, build_int_cst (type, -1), NULL);
3177 extract_range_from_binary_expr_1 (vr, MINUS_EXPR,
3178 type, &minusone, &vr0);
3179 return;
3180 }
3181 else if (code == PAREN_EXPR)
3182 {
3183 copy_value_range (vr, &vr0);
3184 return;
3185 }
3186
3187 /* For unhandled operations fall back to varying. */
3188 set_value_range_to_varying (vr);
3189 return;
3190 }
3191
3192
3193 /* Extract range information from a unary expression CODE OP0 based on
3194 the range of its operand with resulting type TYPE.
3195 The resulting range is stored in *VR. */
3196
3197 static void
3198 extract_range_from_unary_expr (value_range_t *vr, enum tree_code code,
3199 tree type, tree op0)
3200 {
3201 value_range_t vr0 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
3202
3203 /* Get value ranges for the operand. For constant operands, create
3204 a new value range with the operand to simplify processing. */
3205 if (TREE_CODE (op0) == SSA_NAME)
3206 vr0 = *(get_value_range (op0));
3207 else if (is_gimple_min_invariant (op0))
3208 set_value_range_to_value (&vr0, op0, NULL);
3209 else
3210 set_value_range_to_varying (&vr0);
3211
3212 extract_range_from_unary_expr_1 (vr, code, type, &vr0, TREE_TYPE (op0));
3213 }
3214
3215
3216 /* Extract range information from a conditional expression STMT based on
3217 the ranges of each of its operands and the expression code. */
3218
3219 static void
3220 extract_range_from_cond_expr (value_range_t *vr, gimple stmt)
3221 {
3222 tree op0, op1;
3223 value_range_t vr0 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
3224 value_range_t vr1 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
3225
3226 /* Get value ranges for each operand. For constant operands, create
3227 a new value range with the operand to simplify processing. */
3228 op0 = gimple_assign_rhs2 (stmt);
3229 if (TREE_CODE (op0) == SSA_NAME)
3230 vr0 = *(get_value_range (op0));
3231 else if (is_gimple_min_invariant (op0))
3232 set_value_range_to_value (&vr0, op0, NULL);
3233 else
3234 set_value_range_to_varying (&vr0);
3235
3236 op1 = gimple_assign_rhs3 (stmt);
3237 if (TREE_CODE (op1) == SSA_NAME)
3238 vr1 = *(get_value_range (op1));
3239 else if (is_gimple_min_invariant (op1))
3240 set_value_range_to_value (&vr1, op1, NULL);
3241 else
3242 set_value_range_to_varying (&vr1);
3243
3244 /* The resulting value range is the union of the operand ranges */
3245 vrp_meet (&vr0, &vr1);
3246 copy_value_range (vr, &vr0);
3247 }
3248
3249
3250 /* Extract range information from a comparison expression EXPR based
3251 on the range of its operand and the expression code. */
3252
3253 static void
3254 extract_range_from_comparison (value_range_t *vr, enum tree_code code,
3255 tree type, tree op0, tree op1)
3256 {
3257 bool sop = false;
3258 tree val;
3259
3260 val = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, false, &sop,
3261 NULL);
3262
3263 /* A disadvantage of using a special infinity as an overflow
3264 representation is that we lose the ability to record overflow
3265 when we don't have an infinity. So we have to ignore a result
3266 which relies on overflow. */
3267
3268 if (val && !is_overflow_infinity (val) && !sop)
3269 {
3270 /* Since this expression was found on the RHS of an assignment,
3271 its type may be different from _Bool. Convert VAL to EXPR's
3272 type. */
3273 val = fold_convert (type, val);
3274 if (is_gimple_min_invariant (val))
3275 set_value_range_to_value (vr, val, vr->equiv);
3276 else
3277 set_value_range (vr, VR_RANGE, val, val, vr->equiv);
3278 }
3279 else
3280 /* The result of a comparison is always true or false. */
3281 set_value_range_to_truthvalue (vr, type);
3282 }
3283
3284 /* Try to derive a nonnegative or nonzero range out of STMT relying
3285 primarily on generic routines in fold in conjunction with range data.
3286 Store the result in *VR */
3287
3288 static void
3289 extract_range_basic (value_range_t *vr, gimple stmt)
3290 {
3291 bool sop = false;
3292 tree type = gimple_expr_type (stmt);
3293
3294 if (INTEGRAL_TYPE_P (type)
3295 && gimple_stmt_nonnegative_warnv_p (stmt, &sop))
3296 set_value_range_to_nonnegative (vr, type,
3297 sop || stmt_overflow_infinity (stmt));
3298 else if (vrp_stmt_computes_nonzero (stmt, &sop)
3299 && !sop)
3300 set_value_range_to_nonnull (vr, type);
3301 else
3302 set_value_range_to_varying (vr);
3303 }
3304
3305
3306 /* Try to compute a useful range out of assignment STMT and store it
3307 in *VR. */
3308
3309 static void
3310 extract_range_from_assignment (value_range_t *vr, gimple stmt)
3311 {
3312 enum tree_code code = gimple_assign_rhs_code (stmt);
3313
3314 if (code == ASSERT_EXPR)
3315 extract_range_from_assert (vr, gimple_assign_rhs1 (stmt));
3316 else if (code == SSA_NAME)
3317 extract_range_from_ssa_name (vr, gimple_assign_rhs1 (stmt));
3318 else if (TREE_CODE_CLASS (code) == tcc_binary)
3319 extract_range_from_binary_expr (vr, gimple_assign_rhs_code (stmt),
3320 gimple_expr_type (stmt),
3321 gimple_assign_rhs1 (stmt),
3322 gimple_assign_rhs2 (stmt));
3323 else if (TREE_CODE_CLASS (code) == tcc_unary)
3324 extract_range_from_unary_expr (vr, gimple_assign_rhs_code (stmt),
3325 gimple_expr_type (stmt),
3326 gimple_assign_rhs1 (stmt));
3327 else if (code == COND_EXPR)
3328 extract_range_from_cond_expr (vr, stmt);
3329 else if (TREE_CODE_CLASS (code) == tcc_comparison)
3330 extract_range_from_comparison (vr, gimple_assign_rhs_code (stmt),
3331 gimple_expr_type (stmt),
3332 gimple_assign_rhs1 (stmt),
3333 gimple_assign_rhs2 (stmt));
3334 else if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS
3335 && is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
3336 set_value_range_to_value (vr, gimple_assign_rhs1 (stmt), NULL);
3337 else
3338 set_value_range_to_varying (vr);
3339
3340 if (vr->type == VR_VARYING)
3341 extract_range_basic (vr, stmt);
3342 }
3343
3344 /* Given a range VR, a LOOP and a variable VAR, determine whether it
3345 would be profitable to adjust VR using scalar evolution information
3346 for VAR. If so, update VR with the new limits. */
3347
3348 static void
3349 adjust_range_with_scev (value_range_t *vr, struct loop *loop,
3350 gimple stmt, tree var)
3351 {
3352 tree init, step, chrec, tmin, tmax, min, max, type, tem;
3353 enum ev_direction dir;
3354
3355 /* TODO. Don't adjust anti-ranges. An anti-range may provide
3356 better opportunities than a regular range, but I'm not sure. */
3357 if (vr->type == VR_ANTI_RANGE)
3358 return;
3359
3360 chrec = instantiate_parameters (loop, analyze_scalar_evolution (loop, var));
3361
3362 /* Like in PR19590, scev can return a constant function. */
3363 if (is_gimple_min_invariant (chrec))
3364 {
3365 set_value_range_to_value (vr, chrec, vr->equiv);
3366 return;
3367 }
3368
3369 if (TREE_CODE (chrec) != POLYNOMIAL_CHREC)
3370 return;
3371
3372 init = initial_condition_in_loop_num (chrec, loop->num);
3373 tem = op_with_constant_singleton_value_range (init);
3374 if (tem)
3375 init = tem;
3376 step = evolution_part_in_loop_num (chrec, loop->num);
3377 tem = op_with_constant_singleton_value_range (step);
3378 if (tem)
3379 step = tem;
3380
3381 /* If STEP is symbolic, we can't know whether INIT will be the
3382 minimum or maximum value in the range. Also, unless INIT is
3383 a simple expression, compare_values and possibly other functions
3384 in tree-vrp won't be able to handle it. */
3385 if (step == NULL_TREE
3386 || !is_gimple_min_invariant (step)
3387 || !valid_value_p (init))
3388 return;
3389
3390 dir = scev_direction (chrec);
3391 if (/* Do not adjust ranges if we do not know whether the iv increases
3392 or decreases, ... */
3393 dir == EV_DIR_UNKNOWN
3394 /* ... or if it may wrap. */
3395 || scev_probably_wraps_p (init, step, stmt, get_chrec_loop (chrec),
3396 true))
3397 return;
3398
3399 /* We use TYPE_MIN_VALUE and TYPE_MAX_VALUE here instead of
3400 negative_overflow_infinity and positive_overflow_infinity,
3401 because we have concluded that the loop probably does not
3402 wrap. */
3403
3404 type = TREE_TYPE (var);
3405 if (POINTER_TYPE_P (type) || !TYPE_MIN_VALUE (type))
3406 tmin = lower_bound_in_type (type, type);
3407 else
3408 tmin = TYPE_MIN_VALUE (type);
3409 if (POINTER_TYPE_P (type) || !TYPE_MAX_VALUE (type))
3410 tmax = upper_bound_in_type (type, type);
3411 else
3412 tmax = TYPE_MAX_VALUE (type);
3413
3414 /* Try to use estimated number of iterations for the loop to constrain the
3415 final value in the evolution. */
3416 if (TREE_CODE (step) == INTEGER_CST
3417 && is_gimple_val (init)
3418 && (TREE_CODE (init) != SSA_NAME
3419 || get_value_range (init)->type == VR_RANGE))
3420 {
3421 double_int nit;
3422
3423 if (estimated_loop_iterations (loop, true, &nit))
3424 {
3425 value_range_t maxvr = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
3426 double_int dtmp;
3427 bool unsigned_p = TYPE_UNSIGNED (TREE_TYPE (step));
3428 int overflow = 0;
3429
3430 dtmp = double_int_mul_with_sign (tree_to_double_int (step), nit,
3431 unsigned_p, &overflow);
3432 /* If the multiplication overflowed we can't do a meaningful
3433 adjustment. Likewise if the result doesn't fit in the type
3434 of the induction variable. For a signed type we have to
3435 check whether the result has the expected signedness which
3436 is that of the step as number of iterations is unsigned. */
3437 if (!overflow
3438 && double_int_fits_to_tree_p (TREE_TYPE (init), dtmp)
3439 && (unsigned_p
3440 || ((dtmp.high ^ TREE_INT_CST_HIGH (step)) >= 0)))
3441 {
3442 tem = double_int_to_tree (TREE_TYPE (init), dtmp);
3443 extract_range_from_binary_expr (&maxvr, PLUS_EXPR,
3444 TREE_TYPE (init), init, tem);
3445 /* Likewise if the addition did. */
3446 if (maxvr.type == VR_RANGE)
3447 {
3448 tmin = maxvr.min;
3449 tmax = maxvr.max;
3450 }
3451 }
3452 }
3453 }
3454
3455 if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED)
3456 {
3457 min = tmin;
3458 max = tmax;
3459
3460 /* For VARYING or UNDEFINED ranges, just about anything we get
3461 from scalar evolutions should be better. */
3462
3463 if (dir == EV_DIR_DECREASES)
3464 max = init;
3465 else
3466 min = init;
3467
3468 /* If we would create an invalid range, then just assume we
3469 know absolutely nothing. This may be over-conservative,
3470 but it's clearly safe, and should happen only in unreachable
3471 parts of code, or for invalid programs. */
3472 if (compare_values (min, max) == 1)
3473 return;
3474
3475 set_value_range (vr, VR_RANGE, min, max, vr->equiv);
3476 }
3477 else if (vr->type == VR_RANGE)
3478 {
3479 min = vr->min;
3480 max = vr->max;
3481
3482 if (dir == EV_DIR_DECREASES)
3483 {
3484 /* INIT is the maximum value. If INIT is lower than VR->MAX
3485 but no smaller than VR->MIN, set VR->MAX to INIT. */
3486 if (compare_values (init, max) == -1)
3487 max = init;
3488
3489 /* According to the loop information, the variable does not
3490 overflow. If we think it does, probably because of an
3491 overflow due to arithmetic on a different INF value,
3492 reset now. */
3493 if (is_negative_overflow_infinity (min)
3494 || compare_values (min, tmin) == -1)
3495 min = tmin;
3496
3497 }
3498 else
3499 {
3500 /* If INIT is bigger than VR->MIN, set VR->MIN to INIT. */
3501 if (compare_values (init, min) == 1)
3502 min = init;
3503
3504 if (is_positive_overflow_infinity (max)
3505 || compare_values (tmax, max) == -1)
3506 max = tmax;
3507 }
3508
3509 /* If we just created an invalid range with the minimum
3510 greater than the maximum, we fail conservatively.
3511 This should happen only in unreachable
3512 parts of code, or for invalid programs. */
3513 if (compare_values (min, max) == 1)
3514 return;
3515
3516 set_value_range (vr, VR_RANGE, min, max, vr->equiv);
3517 }
3518 }
3519
3520 /* Return true if VAR may overflow at STMT. This checks any available
3521 loop information to see if we can determine that VAR does not
3522 overflow. */
3523
3524 static bool
3525 vrp_var_may_overflow (tree var, gimple stmt)
3526 {
3527 struct loop *l;
3528 tree chrec, init, step;
3529
3530 if (current_loops == NULL)
3531 return true;
3532
3533 l = loop_containing_stmt (stmt);
3534 if (l == NULL
3535 || !loop_outer (l))
3536 return true;
3537
3538 chrec = instantiate_parameters (l, analyze_scalar_evolution (l, var));
3539 if (TREE_CODE (chrec) != POLYNOMIAL_CHREC)
3540 return true;
3541
3542 init = initial_condition_in_loop_num (chrec, l->num);
3543 step = evolution_part_in_loop_num (chrec, l->num);
3544
3545 if (step == NULL_TREE
3546 || !is_gimple_min_invariant (step)
3547 || !valid_value_p (init))
3548 return true;
3549
3550 /* If we get here, we know something useful about VAR based on the
3551 loop information. If it wraps, it may overflow. */
3552
3553 if (scev_probably_wraps_p (init, step, stmt, get_chrec_loop (chrec),
3554 true))
3555 return true;
3556
3557 if (dump_file && (dump_flags & TDF_DETAILS) != 0)
3558 {
3559 print_generic_expr (dump_file, var, 0);
3560 fprintf (dump_file, ": loop information indicates does not overflow\n");
3561 }
3562
3563 return false;
3564 }
3565
3566
3567 /* Given two numeric value ranges VR0, VR1 and a comparison code COMP:
3568
3569 - Return BOOLEAN_TRUE_NODE if VR0 COMP VR1 always returns true for
3570 all the values in the ranges.
3571
3572 - Return BOOLEAN_FALSE_NODE if the comparison always returns false.
3573
3574 - Return NULL_TREE if it is not always possible to determine the
3575 value of the comparison.
3576
3577 Also set *STRICT_OVERFLOW_P to indicate whether a range with an
3578 overflow infinity was used in the test. */
3579
3580
3581 static tree
3582 compare_ranges (enum tree_code comp, value_range_t *vr0, value_range_t *vr1,
3583 bool *strict_overflow_p)
3584 {
3585 /* VARYING or UNDEFINED ranges cannot be compared. */
3586 if (vr0->type == VR_VARYING
3587 || vr0->type == VR_UNDEFINED
3588 || vr1->type == VR_VARYING
3589 || vr1->type == VR_UNDEFINED)
3590 return NULL_TREE;
3591
3592 /* Anti-ranges need to be handled separately. */
3593 if (vr0->type == VR_ANTI_RANGE || vr1->type == VR_ANTI_RANGE)
3594 {
3595 /* If both are anti-ranges, then we cannot compute any
3596 comparison. */
3597 if (vr0->type == VR_ANTI_RANGE && vr1->type == VR_ANTI_RANGE)
3598 return NULL_TREE;
3599
3600 /* These comparisons are never statically computable. */
3601 if (comp == GT_EXPR
3602 || comp == GE_EXPR
3603 || comp == LT_EXPR
3604 || comp == LE_EXPR)
3605 return NULL_TREE;
3606
3607 /* Equality can be computed only between a range and an
3608 anti-range. ~[VAL1, VAL2] == [VAL1, VAL2] is always false. */
3609 if (vr0->type == VR_RANGE)
3610 {
3611 /* To simplify processing, make VR0 the anti-range. */
3612 value_range_t *tmp = vr0;
3613 vr0 = vr1;
3614 vr1 = tmp;
3615 }
3616
3617 gcc_assert (comp == NE_EXPR || comp == EQ_EXPR);
3618
3619 if (compare_values_warnv (vr0->min, vr1->min, strict_overflow_p) == 0
3620 && compare_values_warnv (vr0->max, vr1->max, strict_overflow_p) == 0)
3621 return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node;
3622
3623 return NULL_TREE;
3624 }
3625
3626 if (!usable_range_p (vr0, strict_overflow_p)
3627 || !usable_range_p (vr1, strict_overflow_p))
3628 return NULL_TREE;
3629
3630 /* Simplify processing. If COMP is GT_EXPR or GE_EXPR, switch the
3631 operands around and change the comparison code. */
3632 if (comp == GT_EXPR || comp == GE_EXPR)
3633 {
3634 value_range_t *tmp;
3635 comp = (comp == GT_EXPR) ? LT_EXPR : LE_EXPR;
3636 tmp = vr0;
3637 vr0 = vr1;
3638 vr1 = tmp;
3639 }
3640
3641 if (comp == EQ_EXPR)
3642 {
3643 /* Equality may only be computed if both ranges represent
3644 exactly one value. */
3645 if (compare_values_warnv (vr0->min, vr0->max, strict_overflow_p) == 0
3646 && compare_values_warnv (vr1->min, vr1->max, strict_overflow_p) == 0)
3647 {
3648 int cmp_min = compare_values_warnv (vr0->min, vr1->min,
3649 strict_overflow_p);
3650 int cmp_max = compare_values_warnv (vr0->max, vr1->max,
3651 strict_overflow_p);
3652 if (cmp_min == 0 && cmp_max == 0)
3653 return boolean_true_node;
3654 else if (cmp_min != -2 && cmp_max != -2)
3655 return boolean_false_node;
3656 }
3657 /* If [V0_MIN, V1_MAX] < [V1_MIN, V1_MAX] then V0 != V1. */
3658 else if (compare_values_warnv (vr0->min, vr1->max,
3659 strict_overflow_p) == 1
3660 || compare_values_warnv (vr1->min, vr0->max,
3661 strict_overflow_p) == 1)
3662 return boolean_false_node;
3663
3664 return NULL_TREE;
3665 }
3666 else if (comp == NE_EXPR)
3667 {
3668 int cmp1, cmp2;
3669
3670 /* If VR0 is completely to the left or completely to the right
3671 of VR1, they are always different. Notice that we need to
3672 make sure that both comparisons yield similar results to
3673 avoid comparing values that cannot be compared at
3674 compile-time. */
3675 cmp1 = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p);
3676 cmp2 = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p);
3677 if ((cmp1 == -1 && cmp2 == -1) || (cmp1 == 1 && cmp2 == 1))
3678 return boolean_true_node;
3679
3680 /* If VR0 and VR1 represent a single value and are identical,
3681 return false. */
3682 else if (compare_values_warnv (vr0->min, vr0->max,
3683 strict_overflow_p) == 0
3684 && compare_values_warnv (vr1->min, vr1->max,
3685 strict_overflow_p) == 0
3686 && compare_values_warnv (vr0->min, vr1->min,
3687 strict_overflow_p) == 0
3688 && compare_values_warnv (vr0->max, vr1->max,
3689 strict_overflow_p) == 0)
3690 return boolean_false_node;
3691
3692 /* Otherwise, they may or may not be different. */
3693 else
3694 return NULL_TREE;
3695 }
3696 else if (comp == LT_EXPR || comp == LE_EXPR)
3697 {
3698 int tst;
3699
3700 /* If VR0 is to the left of VR1, return true. */
3701 tst = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p);
3702 if ((comp == LT_EXPR && tst == -1)
3703 || (comp == LE_EXPR && (tst == -1 || tst == 0)))
3704 {
3705 if (overflow_infinity_range_p (vr0)
3706 || overflow_infinity_range_p (vr1))
3707 *strict_overflow_p = true;
3708 return boolean_true_node;
3709 }
3710
3711 /* If VR0 is to the right of VR1, return false. */
3712 tst = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p);
3713 if ((comp == LT_EXPR && (tst == 0 || tst == 1))
3714 || (comp == LE_EXPR && tst == 1))
3715 {
3716 if (overflow_infinity_range_p (vr0)
3717 || overflow_infinity_range_p (vr1))
3718 *strict_overflow_p = true;
3719 return boolean_false_node;
3720 }
3721
3722 /* Otherwise, we don't know. */
3723 return NULL_TREE;
3724 }
3725
3726 gcc_unreachable ();
3727 }
3728
3729
3730 /* Given a value range VR, a value VAL and a comparison code COMP, return
3731 BOOLEAN_TRUE_NODE if VR COMP VAL always returns true for all the
3732 values in VR. Return BOOLEAN_FALSE_NODE if the comparison
3733 always returns false. Return NULL_TREE if it is not always
3734 possible to determine the value of the comparison. Also set
3735 *STRICT_OVERFLOW_P to indicate whether a range with an overflow
3736 infinity was used in the test. */
3737
3738 static tree
3739 compare_range_with_value (enum tree_code comp, value_range_t *vr, tree val,
3740 bool *strict_overflow_p)
3741 {
3742 if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED)
3743 return NULL_TREE;
3744
3745 /* Anti-ranges need to be handled separately. */
3746 if (vr->type == VR_ANTI_RANGE)
3747 {
3748 /* For anti-ranges, the only predicates that we can compute at
3749 compile time are equality and inequality. */
3750 if (comp == GT_EXPR
3751 || comp == GE_EXPR
3752 || comp == LT_EXPR
3753 || comp == LE_EXPR)
3754 return NULL_TREE;
3755
3756 /* ~[VAL_1, VAL_2] OP VAL is known if VAL_1 <= VAL <= VAL_2. */
3757 if (value_inside_range (val, vr) == 1)
3758 return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node;
3759
3760 return NULL_TREE;
3761 }
3762
3763 if (!usable_range_p (vr, strict_overflow_p))
3764 return NULL_TREE;
3765
3766 if (comp == EQ_EXPR)
3767 {
3768 /* EQ_EXPR may only be computed if VR represents exactly
3769 one value. */
3770 if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0)
3771 {
3772 int cmp = compare_values_warnv (vr->min, val, strict_overflow_p);
3773 if (cmp == 0)
3774 return boolean_true_node;
3775 else if (cmp == -1 || cmp == 1 || cmp == 2)
3776 return boolean_false_node;
3777 }
3778 else if (compare_values_warnv (val, vr->min, strict_overflow_p) == -1
3779 || compare_values_warnv (vr->max, val, strict_overflow_p) == -1)
3780 return boolean_false_node;
3781
3782 return NULL_TREE;
3783 }
3784 else if (comp == NE_EXPR)
3785 {
3786 /* If VAL is not inside VR, then they are always different. */
3787 if (compare_values_warnv (vr->max, val, strict_overflow_p) == -1
3788 || compare_values_warnv (vr->min, val, strict_overflow_p) == 1)
3789 return boolean_true_node;
3790
3791 /* If VR represents exactly one value equal to VAL, then return
3792 false. */
3793 if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0
3794 && compare_values_warnv (vr->min, val, strict_overflow_p) == 0)
3795 return boolean_false_node;
3796
3797 /* Otherwise, they may or may not be different. */
3798 return NULL_TREE;
3799 }
3800 else if (comp == LT_EXPR || comp == LE_EXPR)
3801 {
3802 int tst;
3803
3804 /* If VR is to the left of VAL, return true. */
3805 tst = compare_values_warnv (vr->max, val, strict_overflow_p);
3806 if ((comp == LT_EXPR && tst == -1)
3807 || (comp == LE_EXPR && (tst == -1 || tst == 0)))
3808 {
3809 if (overflow_infinity_range_p (vr))
3810 *strict_overflow_p = true;
3811 return boolean_true_node;
3812 }
3813
3814 /* If VR is to the right of VAL, return false. */
3815 tst = compare_values_warnv (vr->min, val, strict_overflow_p);
3816 if ((comp == LT_EXPR && (tst == 0 || tst == 1))
3817 || (comp == LE_EXPR && tst == 1))
3818 {
3819 if (overflow_infinity_range_p (vr))
3820 *strict_overflow_p = true;
3821 return boolean_false_node;
3822 }
3823
3824 /* Otherwise, we don't know. */
3825 return NULL_TREE;
3826 }
3827 else if (comp == GT_EXPR || comp == GE_EXPR)
3828 {
3829 int tst;
3830
3831 /* If VR is to the right of VAL, return true. */
3832 tst = compare_values_warnv (vr->min, val, strict_overflow_p);
3833 if ((comp == GT_EXPR && tst == 1)
3834 || (comp == GE_EXPR && (tst == 0 || tst == 1)))
3835 {
3836 if (overflow_infinity_range_p (vr))
3837 *strict_overflow_p = true;
3838 return boolean_true_node;
3839 }
3840
3841 /* If VR is to the left of VAL, return false. */
3842 tst = compare_values_warnv (vr->max, val, strict_overflow_p);
3843 if ((comp == GT_EXPR && (tst == -1 || tst == 0))
3844 || (comp == GE_EXPR && tst == -1))
3845 {
3846 if (overflow_infinity_range_p (vr))
3847 *strict_overflow_p = true;
3848 return boolean_false_node;
3849 }
3850
3851 /* Otherwise, we don't know. */
3852 return NULL_TREE;
3853 }
3854
3855 gcc_unreachable ();
3856 }
3857
3858
3859 /* Debugging dumps. */
3860
3861 void dump_value_range (FILE *, value_range_t *);
3862 void debug_value_range (value_range_t *);
3863 void dump_all_value_ranges (FILE *);
3864 void debug_all_value_ranges (void);
3865 void dump_vr_equiv (FILE *, bitmap);
3866 void debug_vr_equiv (bitmap);
3867
3868
3869 /* Dump value range VR to FILE. */
3870
3871 void
3872 dump_value_range (FILE *file, value_range_t *vr)
3873 {
3874 if (vr == NULL)
3875 fprintf (file, "[]");
3876 else if (vr->type == VR_UNDEFINED)
3877 fprintf (file, "UNDEFINED");
3878 else if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
3879 {
3880 tree type = TREE_TYPE (vr->min);
3881
3882 fprintf (file, "%s[", (vr->type == VR_ANTI_RANGE) ? "~" : "");
3883
3884 if (is_negative_overflow_infinity (vr->min))
3885 fprintf (file, "-INF(OVF)");
3886 else if (INTEGRAL_TYPE_P (type)
3887 && !TYPE_UNSIGNED (type)
3888 && vrp_val_is_min (vr->min))
3889 fprintf (file, "-INF");
3890 else
3891 print_generic_expr (file, vr->min, 0);
3892
3893 fprintf (file, ", ");
3894
3895 if (is_positive_overflow_infinity (vr->max))
3896 fprintf (file, "+INF(OVF)");
3897 else if (INTEGRAL_TYPE_P (type)
3898 && vrp_val_is_max (vr->max))
3899 fprintf (file, "+INF");
3900 else
3901 print_generic_expr (file, vr->max, 0);
3902
3903 fprintf (file, "]");
3904
3905 if (vr->equiv)
3906 {
3907 bitmap_iterator bi;
3908 unsigned i, c = 0;
3909
3910 fprintf (file, " EQUIVALENCES: { ");
3911
3912 EXECUTE_IF_SET_IN_BITMAP (vr->equiv, 0, i, bi)
3913 {
3914 print_generic_expr (file, ssa_name (i), 0);
3915 fprintf (file, " ");
3916 c++;
3917 }
3918
3919 fprintf (file, "} (%u elements)", c);
3920 }
3921 }
3922 else if (vr->type == VR_VARYING)
3923 fprintf (file, "VARYING");
3924 else
3925 fprintf (file, "INVALID RANGE");
3926 }
3927
3928
3929 /* Dump value range VR to stderr. */
3930
3931 DEBUG_FUNCTION void
3932 debug_value_range (value_range_t *vr)
3933 {
3934 dump_value_range (stderr, vr);
3935 fprintf (stderr, "\n");
3936 }
3937
3938
3939 /* Dump value ranges of all SSA_NAMEs to FILE. */
3940
3941 void
3942 dump_all_value_ranges (FILE *file)
3943 {
3944 size_t i;
3945
3946 for (i = 0; i < num_vr_values; i++)
3947 {
3948 if (vr_value[i])
3949 {
3950 print_generic_expr (file, ssa_name (i), 0);
3951 fprintf (file, ": ");
3952 dump_value_range (file, vr_value[i]);
3953 fprintf (file, "\n");
3954 }
3955 }
3956
3957 fprintf (file, "\n");
3958 }
3959
3960
3961 /* Dump all value ranges to stderr. */
3962
3963 DEBUG_FUNCTION void
3964 debug_all_value_ranges (void)
3965 {
3966 dump_all_value_ranges (stderr);
3967 }
3968
3969
3970 /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
3971 create a new SSA name N and return the assertion assignment
3972 'V = ASSERT_EXPR <V, V OP W>'. */
3973
3974 static gimple
3975 build_assert_expr_for (tree cond, tree v)
3976 {
3977 tree n;
3978 gimple assertion;
3979
3980 gcc_assert (TREE_CODE (v) == SSA_NAME);
3981 n = duplicate_ssa_name (v, NULL);
3982
3983 if (COMPARISON_CLASS_P (cond))
3984 {
3985 tree a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond);
3986 assertion = gimple_build_assign (n, a);
3987 }
3988 else if (TREE_CODE (cond) == SSA_NAME)
3989 {
3990 /* Given V, build the assignment N = true. */
3991 gcc_assert (v == cond);
3992 assertion = gimple_build_assign (n, boolean_true_node);
3993 }
3994 else
3995 gcc_unreachable ();
3996
3997 SSA_NAME_DEF_STMT (n) = assertion;
3998
3999 /* The new ASSERT_EXPR, creates a new SSA name that replaces the
4000 operand of the ASSERT_EXPR. Register the new name and the old one
4001 in the replacement table so that we can fix the SSA web after
4002 adding all the ASSERT_EXPRs. */
4003 register_new_name_mapping (n, v);
4004
4005 return assertion;
4006 }
4007
4008
4009 /* Return false if EXPR is a predicate expression involving floating
4010 point values. */
4011
4012 static inline bool
4013 fp_predicate (gimple stmt)
4014 {
4015 GIMPLE_CHECK (stmt, GIMPLE_COND);
4016
4017 return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)));
4018 }
4019
4020
4021 /* If the range of values taken by OP can be inferred after STMT executes,
4022 return the comparison code (COMP_CODE_P) and value (VAL_P) that
4023 describes the inferred range. Return true if a range could be
4024 inferred. */
4025
4026 static bool
4027 infer_value_range (gimple stmt, tree op, enum tree_code *comp_code_p, tree *val_p)
4028 {
4029 *val_p = NULL_TREE;
4030 *comp_code_p = ERROR_MARK;
4031
4032 /* Do not attempt to infer anything in names that flow through
4033 abnormal edges. */
4034 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op))
4035 return false;
4036
4037 /* Similarly, don't infer anything from statements that may throw
4038 exceptions. */
4039 if (stmt_could_throw_p (stmt))
4040 return false;
4041
4042 /* If STMT is the last statement of a basic block with no
4043 successors, there is no point inferring anything about any of its
4044 operands. We would not be able to find a proper insertion point
4045 for the assertion, anyway. */
4046 if (stmt_ends_bb_p (stmt) && EDGE_COUNT (gimple_bb (stmt)->succs) == 0)
4047 return false;
4048
4049 /* We can only assume that a pointer dereference will yield
4050 non-NULL if -fdelete-null-pointer-checks is enabled. */
4051 if (flag_delete_null_pointer_checks
4052 && POINTER_TYPE_P (TREE_TYPE (op))
4053 && gimple_code (stmt) != GIMPLE_ASM)
4054 {
4055 unsigned num_uses, num_loads, num_stores;
4056
4057 count_uses_and_derefs (op, stmt, &num_uses, &num_loads, &num_stores);
4058 if (num_loads + num_stores > 0)
4059 {
4060 *val_p = build_int_cst (TREE_TYPE (op), 0);
4061 *comp_code_p = NE_EXPR;
4062 return true;
4063 }
4064 }
4065
4066 return false;
4067 }
4068
4069
4070 void dump_asserts_for (FILE *, tree);
4071 void debug_asserts_for (tree);
4072 void dump_all_asserts (FILE *);
4073 void debug_all_asserts (void);
4074
4075 /* Dump all the registered assertions for NAME to FILE. */
4076
4077 void
4078 dump_asserts_for (FILE *file, tree name)
4079 {
4080 assert_locus_t loc;
4081
4082 fprintf (file, "Assertions to be inserted for ");
4083 print_generic_expr (file, name, 0);
4084 fprintf (file, "\n");
4085
4086 loc = asserts_for[SSA_NAME_VERSION (name)];
4087 while (loc)
4088 {
4089 fprintf (file, "\t");
4090 print_gimple_stmt (file, gsi_stmt (loc->si), 0, 0);
4091 fprintf (file, "\n\tBB #%d", loc->bb->index);
4092 if (loc->e)
4093 {
4094 fprintf (file, "\n\tEDGE %d->%d", loc->e->src->index,
4095 loc->e->dest->index);
4096 dump_edge_info (file, loc->e, 0);
4097 }
4098 fprintf (file, "\n\tPREDICATE: ");
4099 print_generic_expr (file, name, 0);
4100 fprintf (file, " %s ", tree_code_name[(int)loc->comp_code]);
4101 print_generic_expr (file, loc->val, 0);
4102 fprintf (file, "\n\n");
4103 loc = loc->next;
4104 }
4105
4106 fprintf (file, "\n");
4107 }
4108
4109
4110 /* Dump all the registered assertions for NAME to stderr. */
4111
4112 DEBUG_FUNCTION void
4113 debug_asserts_for (tree name)
4114 {
4115 dump_asserts_for (stderr, name);
4116 }
4117
4118
4119 /* Dump all the registered assertions for all the names to FILE. */
4120
4121 void
4122 dump_all_asserts (FILE *file)
4123 {
4124 unsigned i;
4125 bitmap_iterator bi;
4126
4127 fprintf (file, "\nASSERT_EXPRs to be inserted\n\n");
4128 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
4129 dump_asserts_for (file, ssa_name (i));
4130 fprintf (file, "\n");
4131 }
4132
4133
4134 /* Dump all the registered assertions for all the names to stderr. */
4135
4136 DEBUG_FUNCTION void
4137 debug_all_asserts (void)
4138 {
4139 dump_all_asserts (stderr);
4140 }
4141
4142
4143 /* If NAME doesn't have an ASSERT_EXPR registered for asserting
4144 'EXPR COMP_CODE VAL' at a location that dominates block BB or
4145 E->DEST, then register this location as a possible insertion point
4146 for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
4147
4148 BB, E and SI provide the exact insertion point for the new
4149 ASSERT_EXPR. If BB is NULL, then the ASSERT_EXPR is to be inserted
4150 on edge E. Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
4151 BB. If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
4152 must not be NULL. */
4153
4154 static void
4155 register_new_assert_for (tree name, tree expr,
4156 enum tree_code comp_code,
4157 tree val,
4158 basic_block bb,
4159 edge e,
4160 gimple_stmt_iterator si)
4161 {
4162 assert_locus_t n, loc, last_loc;
4163 basic_block dest_bb;
4164
4165 gcc_checking_assert (bb == NULL || e == NULL);
4166
4167 if (e == NULL)
4168 gcc_checking_assert (gimple_code (gsi_stmt (si)) != GIMPLE_COND
4169 && gimple_code (gsi_stmt (si)) != GIMPLE_SWITCH);
4170
4171 /* Never build an assert comparing against an integer constant with
4172 TREE_OVERFLOW set. This confuses our undefined overflow warning
4173 machinery. */
4174 if (TREE_CODE (val) == INTEGER_CST
4175 && TREE_OVERFLOW (val))
4176 val = build_int_cst_wide (TREE_TYPE (val),
4177 TREE_INT_CST_LOW (val), TREE_INT_CST_HIGH (val));
4178
4179 /* The new assertion A will be inserted at BB or E. We need to
4180 determine if the new location is dominated by a previously
4181 registered location for A. If we are doing an edge insertion,
4182 assume that A will be inserted at E->DEST. Note that this is not
4183 necessarily true.
4184
4185 If E is a critical edge, it will be split. But even if E is
4186 split, the new block will dominate the same set of blocks that
4187 E->DEST dominates.
4188
4189 The reverse, however, is not true, blocks dominated by E->DEST
4190 will not be dominated by the new block created to split E. So,
4191 if the insertion location is on a critical edge, we will not use
4192 the new location to move another assertion previously registered
4193 at a block dominated by E->DEST. */
4194 dest_bb = (bb) ? bb : e->dest;
4195
4196 /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and
4197 VAL at a block dominating DEST_BB, then we don't need to insert a new
4198 one. Similarly, if the same assertion already exists at a block
4199 dominated by DEST_BB and the new location is not on a critical
4200 edge, then update the existing location for the assertion (i.e.,
4201 move the assertion up in the dominance tree).
4202
4203 Note, this is implemented as a simple linked list because there
4204 should not be more than a handful of assertions registered per
4205 name. If this becomes a performance problem, a table hashed by
4206 COMP_CODE and VAL could be implemented. */
4207 loc = asserts_for[SSA_NAME_VERSION (name)];
4208 last_loc = loc;
4209 while (loc)
4210 {
4211 if (loc->comp_code == comp_code
4212 && (loc->val == val
4213 || operand_equal_p (loc->val, val, 0))
4214 && (loc->expr == expr
4215 || operand_equal_p (loc->expr, expr, 0)))
4216 {
4217 /* If the assertion NAME COMP_CODE VAL has already been
4218 registered at a basic block that dominates DEST_BB, then
4219 we don't need to insert the same assertion again. Note
4220 that we don't check strict dominance here to avoid
4221 replicating the same assertion inside the same basic
4222 block more than once (e.g., when a pointer is
4223 dereferenced several times inside a block).
4224
4225 An exception to this rule are edge insertions. If the
4226 new assertion is to be inserted on edge E, then it will
4227 dominate all the other insertions that we may want to
4228 insert in DEST_BB. So, if we are doing an edge
4229 insertion, don't do this dominance check. */
4230 if (e == NULL
4231 && dominated_by_p (CDI_DOMINATORS, dest_bb, loc->bb))
4232 return;
4233
4234 /* Otherwise, if E is not a critical edge and DEST_BB
4235 dominates the existing location for the assertion, move
4236 the assertion up in the dominance tree by updating its
4237 location information. */
4238 if ((e == NULL || !EDGE_CRITICAL_P (e))
4239 && dominated_by_p (CDI_DOMINATORS, loc->bb, dest_bb))
4240 {
4241 loc->bb = dest_bb;
4242 loc->e = e;
4243 loc->si = si;
4244 return;
4245 }
4246 }
4247
4248 /* Update the last node of the list and move to the next one. */
4249 last_loc = loc;
4250 loc = loc->next;
4251 }
4252
4253 /* If we didn't find an assertion already registered for
4254 NAME COMP_CODE VAL, add a new one at the end of the list of
4255 assertions associated with NAME. */
4256 n = XNEW (struct assert_locus_d);
4257 n->bb = dest_bb;
4258 n->e = e;
4259 n->si = si;
4260 n->comp_code = comp_code;
4261 n->val = val;
4262 n->expr = expr;
4263 n->next = NULL;
4264
4265 if (last_loc)
4266 last_loc->next = n;
4267 else
4268 asserts_for[SSA_NAME_VERSION (name)] = n;
4269
4270 bitmap_set_bit (need_assert_for, SSA_NAME_VERSION (name));
4271 }
4272
4273 /* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME.
4274 Extract a suitable test code and value and store them into *CODE_P and
4275 *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P.
4276
4277 If no extraction was possible, return FALSE, otherwise return TRUE.
4278
4279 If INVERT is true, then we invert the result stored into *CODE_P. */
4280
4281 static bool
4282 extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code,
4283 tree cond_op0, tree cond_op1,
4284 bool invert, enum tree_code *code_p,
4285 tree *val_p)
4286 {
4287 enum tree_code comp_code;
4288 tree val;
4289
4290 /* Otherwise, we have a comparison of the form NAME COMP VAL
4291 or VAL COMP NAME. */
4292 if (name == cond_op1)
4293 {
4294 /* If the predicate is of the form VAL COMP NAME, flip
4295 COMP around because we need to register NAME as the
4296 first operand in the predicate. */
4297 comp_code = swap_tree_comparison (cond_code);
4298 val = cond_op0;
4299 }
4300 else
4301 {
4302 /* The comparison is of the form NAME COMP VAL, so the
4303 comparison code remains unchanged. */
4304 comp_code = cond_code;
4305 val = cond_op1;
4306 }
4307
4308 /* Invert the comparison code as necessary. */
4309 if (invert)
4310 comp_code = invert_tree_comparison (comp_code, 0);
4311
4312 /* VRP does not handle float types. */
4313 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (val)))
4314 return false;
4315
4316 /* Do not register always-false predicates.
4317 FIXME: this works around a limitation in fold() when dealing with
4318 enumerations. Given 'enum { N1, N2 } x;', fold will not
4319 fold 'if (x > N2)' to 'if (0)'. */
4320 if ((comp_code == GT_EXPR || comp_code == LT_EXPR)
4321 && INTEGRAL_TYPE_P (TREE_TYPE (val)))
4322 {
4323 tree min = TYPE_MIN_VALUE (TREE_TYPE (val));
4324 tree max = TYPE_MAX_VALUE (TREE_TYPE (val));
4325
4326 if (comp_code == GT_EXPR
4327 && (!max
4328 || compare_values (val, max) == 0))
4329 return false;
4330
4331 if (comp_code == LT_EXPR
4332 && (!min
4333 || compare_values (val, min) == 0))
4334 return false;
4335 }
4336 *code_p = comp_code;
4337 *val_p = val;
4338 return true;
4339 }
4340
4341 /* Try to register an edge assertion for SSA name NAME on edge E for
4342 the condition COND contributing to the conditional jump pointed to by BSI.
4343 Invert the condition COND if INVERT is true.
4344 Return true if an assertion for NAME could be registered. */
4345
4346 static bool
4347 register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi,
4348 enum tree_code cond_code,
4349 tree cond_op0, tree cond_op1, bool invert)
4350 {
4351 tree val;
4352 enum tree_code comp_code;
4353 bool retval = false;
4354
4355 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
4356 cond_op0,
4357 cond_op1,
4358 invert, &comp_code, &val))
4359 return false;
4360
4361 /* Only register an ASSERT_EXPR if NAME was found in the sub-graph
4362 reachable from E. */
4363 if (live_on_edge (e, name)
4364 && !has_single_use (name))
4365 {
4366 register_new_assert_for (name, name, comp_code, val, NULL, e, bsi);
4367 retval = true;
4368 }
4369
4370 /* In the case of NAME <= CST and NAME being defined as
4371 NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2
4372 and NAME2 <= CST - CST2. We can do the same for NAME > CST.
4373 This catches range and anti-range tests. */
4374 if ((comp_code == LE_EXPR
4375 || comp_code == GT_EXPR)
4376 && TREE_CODE (val) == INTEGER_CST
4377 && TYPE_UNSIGNED (TREE_TYPE (val)))
4378 {
4379 gimple def_stmt = SSA_NAME_DEF_STMT (name);
4380 tree cst2 = NULL_TREE, name2 = NULL_TREE, name3 = NULL_TREE;
4381
4382 /* Extract CST2 from the (optional) addition. */
4383 if (is_gimple_assign (def_stmt)
4384 && gimple_assign_rhs_code (def_stmt) == PLUS_EXPR)
4385 {
4386 name2 = gimple_assign_rhs1 (def_stmt);
4387 cst2 = gimple_assign_rhs2 (def_stmt);
4388 if (TREE_CODE (name2) == SSA_NAME
4389 && TREE_CODE (cst2) == INTEGER_CST)
4390 def_stmt = SSA_NAME_DEF_STMT (name2);
4391 }
4392
4393 /* Extract NAME2 from the (optional) sign-changing cast. */
4394 if (gimple_assign_cast_p (def_stmt))
4395 {
4396 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt))
4397 && ! TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
4398 && (TYPE_PRECISION (gimple_expr_type (def_stmt))
4399 == TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))))
4400 name3 = gimple_assign_rhs1 (def_stmt);
4401 }
4402
4403 /* If name3 is used later, create an ASSERT_EXPR for it. */
4404 if (name3 != NULL_TREE
4405 && TREE_CODE (name3) == SSA_NAME
4406 && (cst2 == NULL_TREE
4407 || TREE_CODE (cst2) == INTEGER_CST)
4408 && INTEGRAL_TYPE_P (TREE_TYPE (name3))
4409 && live_on_edge (e, name3)
4410 && !has_single_use (name3))
4411 {
4412 tree tmp;
4413
4414 /* Build an expression for the range test. */
4415 tmp = build1 (NOP_EXPR, TREE_TYPE (name), name3);
4416 if (cst2 != NULL_TREE)
4417 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
4418
4419 if (dump_file)
4420 {
4421 fprintf (dump_file, "Adding assert for ");
4422 print_generic_expr (dump_file, name3, 0);
4423 fprintf (dump_file, " from ");
4424 print_generic_expr (dump_file, tmp, 0);
4425 fprintf (dump_file, "\n");
4426 }
4427
4428 register_new_assert_for (name3, tmp, comp_code, val, NULL, e, bsi);
4429
4430 retval = true;
4431 }
4432
4433 /* If name2 is used later, create an ASSERT_EXPR for it. */
4434 if (name2 != NULL_TREE
4435 && TREE_CODE (name2) == SSA_NAME
4436 && TREE_CODE (cst2) == INTEGER_CST
4437 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
4438 && live_on_edge (e, name2)
4439 && !has_single_use (name2))
4440 {
4441 tree tmp;
4442
4443 /* Build an expression for the range test. */
4444 tmp = name2;
4445 if (TREE_TYPE (name) != TREE_TYPE (name2))
4446 tmp = build1 (NOP_EXPR, TREE_TYPE (name), tmp);
4447 if (cst2 != NULL_TREE)
4448 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
4449
4450 if (dump_file)
4451 {
4452 fprintf (dump_file, "Adding assert for ");
4453 print_generic_expr (dump_file, name2, 0);
4454 fprintf (dump_file, " from ");
4455 print_generic_expr (dump_file, tmp, 0);
4456 fprintf (dump_file, "\n");
4457 }
4458
4459 register_new_assert_for (name2, tmp, comp_code, val, NULL, e, bsi);
4460
4461 retval = true;
4462 }
4463 }
4464
4465 if (TREE_CODE_CLASS (comp_code) == tcc_comparison
4466 && TREE_CODE (val) == INTEGER_CST)
4467 {
4468 gimple def_stmt = SSA_NAME_DEF_STMT (name);
4469 tree name2 = NULL_TREE, cst2 = NULL_TREE;
4470 tree val2 = NULL_TREE;
4471 double_int mask = double_int_zero;
4472 unsigned int prec = TYPE_PRECISION (TREE_TYPE (val));
4473
4474 /* Add asserts for NAME cmp CST and NAME being defined
4475 as NAME = (int) NAME2. */
4476 if (!TYPE_UNSIGNED (TREE_TYPE (val))
4477 && (comp_code == LE_EXPR || comp_code == LT_EXPR
4478 || comp_code == GT_EXPR || comp_code == GE_EXPR)
4479 && gimple_assign_cast_p (def_stmt))
4480 {
4481 name2 = gimple_assign_rhs1 (def_stmt);
4482 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt))
4483 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
4484 && TYPE_UNSIGNED (TREE_TYPE (name2))
4485 && prec == TYPE_PRECISION (TREE_TYPE (name2))
4486 && (comp_code == LE_EXPR || comp_code == GT_EXPR
4487 || !tree_int_cst_equal (val,
4488 TYPE_MIN_VALUE (TREE_TYPE (val))))
4489 && live_on_edge (e, name2)
4490 && !has_single_use (name2))
4491 {
4492 tree tmp, cst;
4493 enum tree_code new_comp_code = comp_code;
4494
4495 cst = fold_convert (TREE_TYPE (name2),
4496 TYPE_MIN_VALUE (TREE_TYPE (val)));
4497 /* Build an expression for the range test. */
4498 tmp = build2 (PLUS_EXPR, TREE_TYPE (name2), name2, cst);
4499 cst = fold_build2 (PLUS_EXPR, TREE_TYPE (name2), cst,
4500 fold_convert (TREE_TYPE (name2), val));
4501 if (comp_code == LT_EXPR || comp_code == GE_EXPR)
4502 {
4503 new_comp_code = comp_code == LT_EXPR ? LE_EXPR : GT_EXPR;
4504 cst = fold_build2 (MINUS_EXPR, TREE_TYPE (name2), cst,
4505 build_int_cst (TREE_TYPE (name2), 1));
4506 }
4507
4508 if (dump_file)
4509 {
4510 fprintf (dump_file, "Adding assert for ");
4511 print_generic_expr (dump_file, name2, 0);
4512 fprintf (dump_file, " from ");
4513 print_generic_expr (dump_file, tmp, 0);
4514 fprintf (dump_file, "\n");
4515 }
4516
4517 register_new_assert_for (name2, tmp, new_comp_code, cst, NULL,
4518 e, bsi);
4519
4520 retval = true;
4521 }
4522 }
4523
4524 /* Add asserts for NAME cmp CST and NAME being defined as
4525 NAME = NAME2 >> CST2.
4526
4527 Extract CST2 from the right shift. */
4528 if (is_gimple_assign (def_stmt)
4529 && gimple_assign_rhs_code (def_stmt) == RSHIFT_EXPR)
4530 {
4531 name2 = gimple_assign_rhs1 (def_stmt);
4532 cst2 = gimple_assign_rhs2 (def_stmt);
4533 if (TREE_CODE (name2) == SSA_NAME
4534 && host_integerp (cst2, 1)
4535 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
4536 && IN_RANGE (tree_low_cst (cst2, 1), 1, prec - 1)
4537 && prec <= 2 * HOST_BITS_PER_WIDE_INT
4538 && live_on_edge (e, name2)
4539 && !has_single_use (name2))
4540 {
4541 mask = double_int_mask (tree_low_cst (cst2, 1));
4542 val2 = fold_binary (LSHIFT_EXPR, TREE_TYPE (val), val, cst2);
4543 }
4544 }
4545 if (val2 != NULL_TREE
4546 && TREE_CODE (val2) == INTEGER_CST
4547 && simple_cst_equal (fold_build2 (RSHIFT_EXPR,
4548 TREE_TYPE (val),
4549 val2, cst2), val))
4550 {
4551 enum tree_code new_comp_code = comp_code;
4552 tree tmp, new_val;
4553
4554 tmp = name2;
4555 if (comp_code == EQ_EXPR || comp_code == NE_EXPR)
4556 {
4557 if (!TYPE_UNSIGNED (TREE_TYPE (val)))
4558 {
4559 tree type = build_nonstandard_integer_type (prec, 1);
4560 tmp = build1 (NOP_EXPR, type, name2);
4561 val2 = fold_convert (type, val2);
4562 }
4563 tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (tmp), tmp, val2);
4564 new_val = double_int_to_tree (TREE_TYPE (tmp), mask);
4565 new_comp_code = comp_code == EQ_EXPR ? LE_EXPR : GT_EXPR;
4566 }
4567 else if (comp_code == LT_EXPR || comp_code == GE_EXPR)
4568 new_val = val2;
4569 else
4570 {
4571 mask = double_int_ior (tree_to_double_int (val2), mask);
4572 if (double_int_minus_one_p (double_int_sext (mask, prec)))
4573 new_val = NULL_TREE;
4574 else
4575 new_val = double_int_to_tree (TREE_TYPE (val2), mask);
4576 }
4577
4578 if (new_val)
4579 {
4580 if (dump_file)
4581 {
4582 fprintf (dump_file, "Adding assert for ");
4583 print_generic_expr (dump_file, name2, 0);
4584 fprintf (dump_file, " from ");
4585 print_generic_expr (dump_file, tmp, 0);
4586 fprintf (dump_file, "\n");
4587 }
4588
4589 register_new_assert_for (name2, tmp, new_comp_code, new_val,
4590 NULL, e, bsi);
4591 retval = true;
4592 }
4593 }
4594 }
4595
4596 return retval;
4597 }
4598
4599 /* OP is an operand of a truth value expression which is known to have
4600 a particular value. Register any asserts for OP and for any
4601 operands in OP's defining statement.
4602
4603 If CODE is EQ_EXPR, then we want to register OP is zero (false),
4604 if CODE is NE_EXPR, then we want to register OP is nonzero (true). */
4605
4606 static bool
4607 register_edge_assert_for_1 (tree op, enum tree_code code,
4608 edge e, gimple_stmt_iterator bsi)
4609 {
4610 bool retval = false;
4611 gimple op_def;
4612 tree val;
4613 enum tree_code rhs_code;
4614
4615 /* We only care about SSA_NAMEs. */
4616 if (TREE_CODE (op) != SSA_NAME)
4617 return false;
4618
4619 /* We know that OP will have a zero or nonzero value. If OP is used
4620 more than once go ahead and register an assert for OP.
4621
4622 The FOUND_IN_SUBGRAPH support is not helpful in this situation as
4623 it will always be set for OP (because OP is used in a COND_EXPR in
4624 the subgraph). */
4625 if (!has_single_use (op))
4626 {
4627 val = build_int_cst (TREE_TYPE (op), 0);
4628 register_new_assert_for (op, op, code, val, NULL, e, bsi);
4629 retval = true;
4630 }
4631
4632 /* Now look at how OP is set. If it's set from a comparison,
4633 a truth operation or some bit operations, then we may be able
4634 to register information about the operands of that assignment. */
4635 op_def = SSA_NAME_DEF_STMT (op);
4636 if (gimple_code (op_def) != GIMPLE_ASSIGN)
4637 return retval;
4638
4639 rhs_code = gimple_assign_rhs_code (op_def);
4640
4641 if (TREE_CODE_CLASS (rhs_code) == tcc_comparison)
4642 {
4643 bool invert = (code == EQ_EXPR ? true : false);
4644 tree op0 = gimple_assign_rhs1 (op_def);
4645 tree op1 = gimple_assign_rhs2 (op_def);
4646
4647 if (TREE_CODE (op0) == SSA_NAME)
4648 retval |= register_edge_assert_for_2 (op0, e, bsi, rhs_code, op0, op1,
4649 invert);
4650 if (TREE_CODE (op1) == SSA_NAME)
4651 retval |= register_edge_assert_for_2 (op1, e, bsi, rhs_code, op0, op1,
4652 invert);
4653 }
4654 else if ((code == NE_EXPR
4655 && gimple_assign_rhs_code (op_def) == BIT_AND_EXPR)
4656 || (code == EQ_EXPR
4657 && gimple_assign_rhs_code (op_def) == BIT_IOR_EXPR))
4658 {
4659 /* Recurse on each operand. */
4660 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def),
4661 code, e, bsi);
4662 retval |= register_edge_assert_for_1 (gimple_assign_rhs2 (op_def),
4663 code, e, bsi);
4664 }
4665 else if (gimple_assign_rhs_code (op_def) == BIT_NOT_EXPR
4666 && TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (op_def))) == 1)
4667 {
4668 /* Recurse, flipping CODE. */
4669 code = invert_tree_comparison (code, false);
4670 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def),
4671 code, e, bsi);
4672 }
4673 else if (gimple_assign_rhs_code (op_def) == SSA_NAME)
4674 {
4675 /* Recurse through the copy. */
4676 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def),
4677 code, e, bsi);
4678 }
4679 else if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (op_def)))
4680 {
4681 /* Recurse through the type conversion. */
4682 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def),
4683 code, e, bsi);
4684 }
4685
4686 return retval;
4687 }
4688
4689 /* Try to register an edge assertion for SSA name NAME on edge E for
4690 the condition COND contributing to the conditional jump pointed to by SI.
4691 Return true if an assertion for NAME could be registered. */
4692
4693 static bool
4694 register_edge_assert_for (tree name, edge e, gimple_stmt_iterator si,
4695 enum tree_code cond_code, tree cond_op0,
4696 tree cond_op1)
4697 {
4698 tree val;
4699 enum tree_code comp_code;
4700 bool retval = false;
4701 bool is_else_edge = (e->flags & EDGE_FALSE_VALUE) != 0;
4702
4703 /* Do not attempt to infer anything in names that flow through
4704 abnormal edges. */
4705 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
4706 return false;
4707
4708 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
4709 cond_op0, cond_op1,
4710 is_else_edge,
4711 &comp_code, &val))
4712 return false;
4713
4714 /* Register ASSERT_EXPRs for name. */
4715 retval |= register_edge_assert_for_2 (name, e, si, cond_code, cond_op0,
4716 cond_op1, is_else_edge);
4717
4718
4719 /* If COND is effectively an equality test of an SSA_NAME against
4720 the value zero or one, then we may be able to assert values
4721 for SSA_NAMEs which flow into COND. */
4722
4723 /* In the case of NAME == 1 or NAME != 0, for BIT_AND_EXPR defining
4724 statement of NAME we can assert both operands of the BIT_AND_EXPR
4725 have nonzero value. */
4726 if (((comp_code == EQ_EXPR && integer_onep (val))
4727 || (comp_code == NE_EXPR && integer_zerop (val))))
4728 {
4729 gimple def_stmt = SSA_NAME_DEF_STMT (name);
4730
4731 if (is_gimple_assign (def_stmt)
4732 && gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR)
4733 {
4734 tree op0 = gimple_assign_rhs1 (def_stmt);
4735 tree op1 = gimple_assign_rhs2 (def_stmt);
4736 retval |= register_edge_assert_for_1 (op0, NE_EXPR, e, si);
4737 retval |= register_edge_assert_for_1 (op1, NE_EXPR, e, si);
4738 }
4739 }
4740
4741 /* In the case of NAME == 0 or NAME != 1, for BIT_IOR_EXPR defining
4742 statement of NAME we can assert both operands of the BIT_IOR_EXPR
4743 have zero value. */
4744 if (((comp_code == EQ_EXPR && integer_zerop (val))
4745 || (comp_code == NE_EXPR && integer_onep (val))))
4746 {
4747 gimple def_stmt = SSA_NAME_DEF_STMT (name);
4748
4749 /* For BIT_IOR_EXPR only if NAME == 0 both operands have
4750 necessarily zero value, or if type-precision is one. */
4751 if (is_gimple_assign (def_stmt)
4752 && (gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR
4753 && (TYPE_PRECISION (TREE_TYPE (name)) == 1
4754 || comp_code == EQ_EXPR)))
4755 {
4756 tree op0 = gimple_assign_rhs1 (def_stmt);
4757 tree op1 = gimple_assign_rhs2 (def_stmt);
4758 retval |= register_edge_assert_for_1 (op0, EQ_EXPR, e, si);
4759 retval |= register_edge_assert_for_1 (op1, EQ_EXPR, e, si);
4760 }
4761 }
4762
4763 return retval;
4764 }
4765
4766
4767 /* Determine whether the outgoing edges of BB should receive an
4768 ASSERT_EXPR for each of the operands of BB's LAST statement.
4769 The last statement of BB must be a COND_EXPR.
4770
4771 If any of the sub-graphs rooted at BB have an interesting use of
4772 the predicate operands, an assert location node is added to the
4773 list of assertions for the corresponding operands. */
4774
4775 static bool
4776 find_conditional_asserts (basic_block bb, gimple last)
4777 {
4778 bool need_assert;
4779 gimple_stmt_iterator bsi;
4780 tree op;
4781 edge_iterator ei;
4782 edge e;
4783 ssa_op_iter iter;
4784
4785 need_assert = false;
4786 bsi = gsi_for_stmt (last);
4787
4788 /* Look for uses of the operands in each of the sub-graphs
4789 rooted at BB. We need to check each of the outgoing edges
4790 separately, so that we know what kind of ASSERT_EXPR to
4791 insert. */
4792 FOR_EACH_EDGE (e, ei, bb->succs)
4793 {
4794 if (e->dest == bb)
4795 continue;
4796
4797 /* Register the necessary assertions for each operand in the
4798 conditional predicate. */
4799 FOR_EACH_SSA_TREE_OPERAND (op, last, iter, SSA_OP_USE)
4800 {
4801 need_assert |= register_edge_assert_for (op, e, bsi,
4802 gimple_cond_code (last),
4803 gimple_cond_lhs (last),
4804 gimple_cond_rhs (last));
4805 }
4806 }
4807
4808 return need_assert;
4809 }
4810
4811 struct case_info
4812 {
4813 tree expr;
4814 basic_block bb;
4815 };
4816
4817 /* Compare two case labels sorting first by the destination bb index
4818 and then by the case value. */
4819
4820 static int
4821 compare_case_labels (const void *p1, const void *p2)
4822 {
4823 const struct case_info *ci1 = (const struct case_info *) p1;
4824 const struct case_info *ci2 = (const struct case_info *) p2;
4825 int idx1 = ci1->bb->index;
4826 int idx2 = ci2->bb->index;
4827
4828 if (idx1 < idx2)
4829 return -1;
4830 else if (idx1 == idx2)
4831 {
4832 /* Make sure the default label is first in a group. */
4833 if (!CASE_LOW (ci1->expr))
4834 return -1;
4835 else if (!CASE_LOW (ci2->expr))
4836 return 1;
4837 else
4838 return tree_int_cst_compare (CASE_LOW (ci1->expr),
4839 CASE_LOW (ci2->expr));
4840 }
4841 else
4842 return 1;
4843 }
4844
4845 /* Determine whether the outgoing edges of BB should receive an
4846 ASSERT_EXPR for each of the operands of BB's LAST statement.
4847 The last statement of BB must be a SWITCH_EXPR.
4848
4849 If any of the sub-graphs rooted at BB have an interesting use of
4850 the predicate operands, an assert location node is added to the
4851 list of assertions for the corresponding operands. */
4852
4853 static bool
4854 find_switch_asserts (basic_block bb, gimple last)
4855 {
4856 bool need_assert;
4857 gimple_stmt_iterator bsi;
4858 tree op;
4859 edge e;
4860 struct case_info *ci;
4861 size_t n = gimple_switch_num_labels (last);
4862 #if GCC_VERSION >= 4000
4863 unsigned int idx;
4864 #else
4865 /* Work around GCC 3.4 bug (PR 37086). */
4866 volatile unsigned int idx;
4867 #endif
4868
4869 need_assert = false;
4870 bsi = gsi_for_stmt (last);
4871 op = gimple_switch_index (last);
4872 if (TREE_CODE (op) != SSA_NAME)
4873 return false;
4874
4875 /* Build a vector of case labels sorted by destination label. */
4876 ci = XNEWVEC (struct case_info, n);
4877 for (idx = 0; idx < n; ++idx)
4878 {
4879 ci[idx].expr = gimple_switch_label (last, idx);
4880 ci[idx].bb = label_to_block (CASE_LABEL (ci[idx].expr));
4881 }
4882 qsort (ci, n, sizeof (struct case_info), compare_case_labels);
4883
4884 for (idx = 0; idx < n; ++idx)
4885 {
4886 tree min, max;
4887 tree cl = ci[idx].expr;
4888 basic_block cbb = ci[idx].bb;
4889
4890 min = CASE_LOW (cl);
4891 max = CASE_HIGH (cl);
4892
4893 /* If there are multiple case labels with the same destination
4894 we need to combine them to a single value range for the edge. */
4895 if (idx + 1 < n && cbb == ci[idx + 1].bb)
4896 {
4897 /* Skip labels until the last of the group. */
4898 do {
4899 ++idx;
4900 } while (idx < n && cbb == ci[idx].bb);
4901 --idx;
4902
4903 /* Pick up the maximum of the case label range. */
4904 if (CASE_HIGH (ci[idx].expr))
4905 max = CASE_HIGH (ci[idx].expr);
4906 else
4907 max = CASE_LOW (ci[idx].expr);
4908 }
4909
4910 /* Nothing to do if the range includes the default label until we
4911 can register anti-ranges. */
4912 if (min == NULL_TREE)
4913 continue;
4914
4915 /* Find the edge to register the assert expr on. */
4916 e = find_edge (bb, cbb);
4917
4918 /* Register the necessary assertions for the operand in the
4919 SWITCH_EXPR. */
4920 need_assert |= register_edge_assert_for (op, e, bsi,
4921 max ? GE_EXPR : EQ_EXPR,
4922 op,
4923 fold_convert (TREE_TYPE (op),
4924 min));
4925 if (max)
4926 {
4927 need_assert |= register_edge_assert_for (op, e, bsi, LE_EXPR,
4928 op,
4929 fold_convert (TREE_TYPE (op),
4930 max));
4931 }
4932 }
4933
4934 XDELETEVEC (ci);
4935 return need_assert;
4936 }
4937
4938
4939 /* Traverse all the statements in block BB looking for statements that
4940 may generate useful assertions for the SSA names in their operand.
4941 If a statement produces a useful assertion A for name N_i, then the
4942 list of assertions already generated for N_i is scanned to
4943 determine if A is actually needed.
4944
4945 If N_i already had the assertion A at a location dominating the
4946 current location, then nothing needs to be done. Otherwise, the
4947 new location for A is recorded instead.
4948
4949 1- For every statement S in BB, all the variables used by S are
4950 added to bitmap FOUND_IN_SUBGRAPH.
4951
4952 2- If statement S uses an operand N in a way that exposes a known
4953 value range for N, then if N was not already generated by an
4954 ASSERT_EXPR, create a new assert location for N. For instance,
4955 if N is a pointer and the statement dereferences it, we can
4956 assume that N is not NULL.
4957
4958 3- COND_EXPRs are a special case of #2. We can derive range
4959 information from the predicate but need to insert different
4960 ASSERT_EXPRs for each of the sub-graphs rooted at the
4961 conditional block. If the last statement of BB is a conditional
4962 expression of the form 'X op Y', then
4963
4964 a) Remove X and Y from the set FOUND_IN_SUBGRAPH.
4965
4966 b) If the conditional is the only entry point to the sub-graph
4967 corresponding to the THEN_CLAUSE, recurse into it. On
4968 return, if X and/or Y are marked in FOUND_IN_SUBGRAPH, then
4969 an ASSERT_EXPR is added for the corresponding variable.
4970
4971 c) Repeat step (b) on the ELSE_CLAUSE.
4972
4973 d) Mark X and Y in FOUND_IN_SUBGRAPH.
4974
4975 For instance,
4976
4977 if (a == 9)
4978 b = a;
4979 else
4980 b = c + 1;
4981
4982 In this case, an assertion on the THEN clause is useful to
4983 determine that 'a' is always 9 on that edge. However, an assertion
4984 on the ELSE clause would be unnecessary.
4985
4986 4- If BB does not end in a conditional expression, then we recurse
4987 into BB's dominator children.
4988
4989 At the end of the recursive traversal, every SSA name will have a
4990 list of locations where ASSERT_EXPRs should be added. When a new
4991 location for name N is found, it is registered by calling
4992 register_new_assert_for. That function keeps track of all the
4993 registered assertions to prevent adding unnecessary assertions.
4994 For instance, if a pointer P_4 is dereferenced more than once in a
4995 dominator tree, only the location dominating all the dereference of
4996 P_4 will receive an ASSERT_EXPR.
4997
4998 If this function returns true, then it means that there are names
4999 for which we need to generate ASSERT_EXPRs. Those assertions are
5000 inserted by process_assert_insertions. */
5001
5002 static bool
5003 find_assert_locations_1 (basic_block bb, sbitmap live)
5004 {
5005 gimple_stmt_iterator si;
5006 gimple last;
5007 gimple phi;
5008 bool need_assert;
5009
5010 need_assert = false;
5011 last = last_stmt (bb);
5012
5013 /* If BB's last statement is a conditional statement involving integer
5014 operands, determine if we need to add ASSERT_EXPRs. */
5015 if (last
5016 && gimple_code (last) == GIMPLE_COND
5017 && !fp_predicate (last)
5018 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
5019 need_assert |= find_conditional_asserts (bb, last);
5020
5021 /* If BB's last statement is a switch statement involving integer
5022 operands, determine if we need to add ASSERT_EXPRs. */
5023 if (last
5024 && gimple_code (last) == GIMPLE_SWITCH
5025 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
5026 need_assert |= find_switch_asserts (bb, last);
5027
5028 /* Traverse all the statements in BB marking used names and looking
5029 for statements that may infer assertions for their used operands. */
5030 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
5031 {
5032 gimple stmt;
5033 tree op;
5034 ssa_op_iter i;
5035
5036 stmt = gsi_stmt (si);
5037
5038 if (is_gimple_debug (stmt))
5039 continue;
5040
5041 /* See if we can derive an assertion for any of STMT's operands. */
5042 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
5043 {
5044 tree value;
5045 enum tree_code comp_code;
5046
5047 /* Mark OP in our live bitmap. */
5048 SET_BIT (live, SSA_NAME_VERSION (op));
5049
5050 /* If OP is used in such a way that we can infer a value
5051 range for it, and we don't find a previous assertion for
5052 it, create a new assertion location node for OP. */
5053 if (infer_value_range (stmt, op, &comp_code, &value))
5054 {
5055 /* If we are able to infer a nonzero value range for OP,
5056 then walk backwards through the use-def chain to see if OP
5057 was set via a typecast.
5058
5059 If so, then we can also infer a nonzero value range
5060 for the operand of the NOP_EXPR. */
5061 if (comp_code == NE_EXPR && integer_zerop (value))
5062 {
5063 tree t = op;
5064 gimple def_stmt = SSA_NAME_DEF_STMT (t);
5065
5066 while (is_gimple_assign (def_stmt)
5067 && gimple_assign_rhs_code (def_stmt) == NOP_EXPR
5068 && TREE_CODE
5069 (gimple_assign_rhs1 (def_stmt)) == SSA_NAME
5070 && POINTER_TYPE_P
5071 (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))
5072 {
5073 t = gimple_assign_rhs1 (def_stmt);
5074 def_stmt = SSA_NAME_DEF_STMT (t);
5075
5076 /* Note we want to register the assert for the
5077 operand of the NOP_EXPR after SI, not after the
5078 conversion. */
5079 if (! has_single_use (t))
5080 {
5081 register_new_assert_for (t, t, comp_code, value,
5082 bb, NULL, si);
5083 need_assert = true;
5084 }
5085 }
5086 }
5087
5088 /* If OP is used only once, namely in this STMT, don't
5089 bother creating an ASSERT_EXPR for it. Such an
5090 ASSERT_EXPR would do nothing but increase compile time. */
5091 if (!has_single_use (op))
5092 {
5093 register_new_assert_for (op, op, comp_code, value,
5094 bb, NULL, si);
5095 need_assert = true;
5096 }
5097 }
5098 }
5099 }
5100
5101 /* Traverse all PHI nodes in BB marking used operands. */
5102 for (si = gsi_start_phis (bb); !gsi_end_p(si); gsi_next (&si))
5103 {
5104 use_operand_p arg_p;
5105 ssa_op_iter i;
5106 phi = gsi_stmt (si);
5107
5108 FOR_EACH_PHI_ARG (arg_p, phi, i, SSA_OP_USE)
5109 {
5110 tree arg = USE_FROM_PTR (arg_p);
5111 if (TREE_CODE (arg) == SSA_NAME)
5112 SET_BIT (live, SSA_NAME_VERSION (arg));
5113 }
5114 }
5115
5116 return need_assert;
5117 }
5118
5119 /* Do an RPO walk over the function computing SSA name liveness
5120 on-the-fly and deciding on assert expressions to insert.
5121 Returns true if there are assert expressions to be inserted. */
5122
5123 static bool
5124 find_assert_locations (void)
5125 {
5126 int *rpo = XCNEWVEC (int, last_basic_block + NUM_FIXED_BLOCKS);
5127 int *bb_rpo = XCNEWVEC (int, last_basic_block + NUM_FIXED_BLOCKS);
5128 int *last_rpo = XCNEWVEC (int, last_basic_block + NUM_FIXED_BLOCKS);
5129 int rpo_cnt, i;
5130 bool need_asserts;
5131
5132 live = XCNEWVEC (sbitmap, last_basic_block + NUM_FIXED_BLOCKS);
5133 rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
5134 for (i = 0; i < rpo_cnt; ++i)
5135 bb_rpo[rpo[i]] = i;
5136
5137 need_asserts = false;
5138 for (i = rpo_cnt-1; i >= 0; --i)
5139 {
5140 basic_block bb = BASIC_BLOCK (rpo[i]);
5141 edge e;
5142 edge_iterator ei;
5143
5144 if (!live[rpo[i]])
5145 {
5146 live[rpo[i]] = sbitmap_alloc (num_ssa_names);
5147 sbitmap_zero (live[rpo[i]]);
5148 }
5149
5150 /* Process BB and update the live information with uses in
5151 this block. */
5152 need_asserts |= find_assert_locations_1 (bb, live[rpo[i]]);
5153
5154 /* Merge liveness into the predecessor blocks and free it. */
5155 if (!sbitmap_empty_p (live[rpo[i]]))
5156 {
5157 int pred_rpo = i;
5158 FOR_EACH_EDGE (e, ei, bb->preds)
5159 {
5160 int pred = e->src->index;
5161 if (e->flags & EDGE_DFS_BACK)
5162 continue;
5163
5164 if (!live[pred])
5165 {
5166 live[pred] = sbitmap_alloc (num_ssa_names);
5167 sbitmap_zero (live[pred]);
5168 }
5169 sbitmap_a_or_b (live[pred], live[pred], live[rpo[i]]);
5170
5171 if (bb_rpo[pred] < pred_rpo)
5172 pred_rpo = bb_rpo[pred];
5173 }
5174
5175 /* Record the RPO number of the last visited block that needs
5176 live information from this block. */
5177 last_rpo[rpo[i]] = pred_rpo;
5178 }
5179 else
5180 {
5181 sbitmap_free (live[rpo[i]]);
5182 live[rpo[i]] = NULL;
5183 }
5184
5185 /* We can free all successors live bitmaps if all their
5186 predecessors have been visited already. */
5187 FOR_EACH_EDGE (e, ei, bb->succs)
5188 if (last_rpo[e->dest->index] == i
5189 && live[e->dest->index])
5190 {
5191 sbitmap_free (live[e->dest->index]);
5192 live[e->dest->index] = NULL;
5193 }
5194 }
5195
5196 XDELETEVEC (rpo);
5197 XDELETEVEC (bb_rpo);
5198 XDELETEVEC (last_rpo);
5199 for (i = 0; i < last_basic_block + NUM_FIXED_BLOCKS; ++i)
5200 if (live[i])
5201 sbitmap_free (live[i]);
5202 XDELETEVEC (live);
5203
5204 return need_asserts;
5205 }
5206
5207 /* Create an ASSERT_EXPR for NAME and insert it in the location
5208 indicated by LOC. Return true if we made any edge insertions. */
5209
5210 static bool
5211 process_assert_insertions_for (tree name, assert_locus_t loc)
5212 {
5213 /* Build the comparison expression NAME_i COMP_CODE VAL. */
5214 gimple stmt;
5215 tree cond;
5216 gimple assert_stmt;
5217 edge_iterator ei;
5218 edge e;
5219
5220 /* If we have X <=> X do not insert an assert expr for that. */
5221 if (loc->expr == loc->val)
5222 return false;
5223
5224 cond = build2 (loc->comp_code, boolean_type_node, loc->expr, loc->val);
5225 assert_stmt = build_assert_expr_for (cond, name);
5226 if (loc->e)
5227 {
5228 /* We have been asked to insert the assertion on an edge. This
5229 is used only by COND_EXPR and SWITCH_EXPR assertions. */
5230 gcc_checking_assert (gimple_code (gsi_stmt (loc->si)) == GIMPLE_COND
5231 || (gimple_code (gsi_stmt (loc->si))
5232 == GIMPLE_SWITCH));
5233
5234 gsi_insert_on_edge (loc->e, assert_stmt);
5235 return true;
5236 }
5237
5238 /* Otherwise, we can insert right after LOC->SI iff the
5239 statement must not be the last statement in the block. */
5240 stmt = gsi_stmt (loc->si);
5241 if (!stmt_ends_bb_p (stmt))
5242 {
5243 gsi_insert_after (&loc->si, assert_stmt, GSI_SAME_STMT);
5244 return false;
5245 }
5246
5247 /* If STMT must be the last statement in BB, we can only insert new
5248 assertions on the non-abnormal edge out of BB. Note that since
5249 STMT is not control flow, there may only be one non-abnormal edge
5250 out of BB. */
5251 FOR_EACH_EDGE (e, ei, loc->bb->succs)
5252 if (!(e->flags & EDGE_ABNORMAL))
5253 {
5254 gsi_insert_on_edge (e, assert_stmt);
5255 return true;
5256 }
5257
5258 gcc_unreachable ();
5259 }
5260
5261
5262 /* Process all the insertions registered for every name N_i registered
5263 in NEED_ASSERT_FOR. The list of assertions to be inserted are
5264 found in ASSERTS_FOR[i]. */
5265
5266 static void
5267 process_assert_insertions (void)
5268 {
5269 unsigned i;
5270 bitmap_iterator bi;
5271 bool update_edges_p = false;
5272 int num_asserts = 0;
5273
5274 if (dump_file && (dump_flags & TDF_DETAILS))
5275 dump_all_asserts (dump_file);
5276
5277 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
5278 {
5279 assert_locus_t loc = asserts_for[i];
5280 gcc_assert (loc);
5281
5282 while (loc)
5283 {
5284 assert_locus_t next = loc->next;
5285 update_edges_p |= process_assert_insertions_for (ssa_name (i), loc);
5286 free (loc);
5287 loc = next;
5288 num_asserts++;
5289 }
5290 }
5291
5292 if (update_edges_p)
5293 gsi_commit_edge_inserts ();
5294
5295 statistics_counter_event (cfun, "Number of ASSERT_EXPR expressions inserted",
5296 num_asserts);
5297 }
5298
5299
5300 /* Traverse the flowgraph looking for conditional jumps to insert range
5301 expressions. These range expressions are meant to provide information
5302 to optimizations that need to reason in terms of value ranges. They
5303 will not be expanded into RTL. For instance, given:
5304
5305 x = ...
5306 y = ...
5307 if (x < y)
5308 y = x - 2;
5309 else
5310 x = y + 3;
5311
5312 this pass will transform the code into:
5313
5314 x = ...
5315 y = ...
5316 if (x < y)
5317 {
5318 x = ASSERT_EXPR <x, x < y>
5319 y = x - 2
5320 }
5321 else
5322 {
5323 y = ASSERT_EXPR <y, x <= y>
5324 x = y + 3
5325 }
5326
5327 The idea is that once copy and constant propagation have run, other
5328 optimizations will be able to determine what ranges of values can 'x'
5329 take in different paths of the code, simply by checking the reaching
5330 definition of 'x'. */
5331
5332 static void
5333 insert_range_assertions (void)
5334 {
5335 need_assert_for = BITMAP_ALLOC (NULL);
5336 asserts_for = XCNEWVEC (assert_locus_t, num_ssa_names);
5337
5338 calculate_dominance_info (CDI_DOMINATORS);
5339
5340 if (find_assert_locations ())
5341 {
5342 process_assert_insertions ();
5343 update_ssa (TODO_update_ssa_no_phi);
5344 }
5345
5346 if (dump_file && (dump_flags & TDF_DETAILS))
5347 {
5348 fprintf (dump_file, "\nSSA form after inserting ASSERT_EXPRs\n");
5349 dump_function_to_file (current_function_decl, dump_file, dump_flags);
5350 }
5351
5352 free (asserts_for);
5353 BITMAP_FREE (need_assert_for);
5354 }
5355
5356 /* Checks one ARRAY_REF in REF, located at LOCUS. Ignores flexible arrays
5357 and "struct" hacks. If VRP can determine that the
5358 array subscript is a constant, check if it is outside valid
5359 range. If the array subscript is a RANGE, warn if it is
5360 non-overlapping with valid range.
5361 IGNORE_OFF_BY_ONE is true if the ARRAY_REF is inside a ADDR_EXPR. */
5362
5363 static void
5364 check_array_ref (location_t location, tree ref, bool ignore_off_by_one)
5365 {
5366 value_range_t* vr = NULL;
5367 tree low_sub, up_sub;
5368 tree low_bound, up_bound, up_bound_p1;
5369 tree base;
5370
5371 if (TREE_NO_WARNING (ref))
5372 return;
5373
5374 low_sub = up_sub = TREE_OPERAND (ref, 1);
5375 up_bound = array_ref_up_bound (ref);
5376
5377 /* Can not check flexible arrays. */
5378 if (!up_bound
5379 || TREE_CODE (up_bound) != INTEGER_CST)
5380 return;
5381
5382 /* Accesses to trailing arrays via pointers may access storage
5383 beyond the types array bounds. */
5384 base = get_base_address (ref);
5385 if (base && TREE_CODE (base) == MEM_REF)
5386 {
5387 tree cref, next = NULL_TREE;
5388
5389 if (TREE_CODE (TREE_OPERAND (ref, 0)) != COMPONENT_REF)
5390 return;
5391
5392 cref = TREE_OPERAND (ref, 0);
5393 if (TREE_CODE (TREE_TYPE (TREE_OPERAND (cref, 0))) == RECORD_TYPE)
5394 for (next = DECL_CHAIN (TREE_OPERAND (cref, 1));
5395 next && TREE_CODE (next) != FIELD_DECL;
5396 next = DECL_CHAIN (next))
5397 ;
5398
5399 /* If this is the last field in a struct type or a field in a
5400 union type do not warn. */
5401 if (!next)
5402 return;
5403 }
5404
5405 low_bound = array_ref_low_bound (ref);
5406 up_bound_p1 = int_const_binop (PLUS_EXPR, up_bound, integer_one_node);
5407
5408 if (TREE_CODE (low_sub) == SSA_NAME)
5409 {
5410 vr = get_value_range (low_sub);
5411 if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
5412 {
5413 low_sub = vr->type == VR_RANGE ? vr->max : vr->min;
5414 up_sub = vr->type == VR_RANGE ? vr->min : vr->max;
5415 }
5416 }
5417
5418 if (vr && vr->type == VR_ANTI_RANGE)
5419 {
5420 if (TREE_CODE (up_sub) == INTEGER_CST
5421 && tree_int_cst_lt (up_bound, up_sub)
5422 && TREE_CODE (low_sub) == INTEGER_CST
5423 && tree_int_cst_lt (low_sub, low_bound))
5424 {
5425 warning_at (location, OPT_Warray_bounds,
5426 "array subscript is outside array bounds");
5427 TREE_NO_WARNING (ref) = 1;
5428 }
5429 }
5430 else if (TREE_CODE (up_sub) == INTEGER_CST
5431 && (ignore_off_by_one
5432 ? (tree_int_cst_lt (up_bound, up_sub)
5433 && !tree_int_cst_equal (up_bound_p1, up_sub))
5434 : (tree_int_cst_lt (up_bound, up_sub)
5435 || tree_int_cst_equal (up_bound_p1, up_sub))))
5436 {
5437 warning_at (location, OPT_Warray_bounds,
5438 "array subscript is above array bounds");
5439 TREE_NO_WARNING (ref) = 1;
5440 }
5441 else if (TREE_CODE (low_sub) == INTEGER_CST
5442 && tree_int_cst_lt (low_sub, low_bound))
5443 {
5444 warning_at (location, OPT_Warray_bounds,
5445 "array subscript is below array bounds");
5446 TREE_NO_WARNING (ref) = 1;
5447 }
5448 }
5449
5450 /* Searches if the expr T, located at LOCATION computes
5451 address of an ARRAY_REF, and call check_array_ref on it. */
5452
5453 static void
5454 search_for_addr_array (tree t, location_t location)
5455 {
5456 while (TREE_CODE (t) == SSA_NAME)
5457 {
5458 gimple g = SSA_NAME_DEF_STMT (t);
5459
5460 if (gimple_code (g) != GIMPLE_ASSIGN)
5461 return;
5462
5463 if (get_gimple_rhs_class (gimple_assign_rhs_code (g))
5464 != GIMPLE_SINGLE_RHS)
5465 return;
5466
5467 t = gimple_assign_rhs1 (g);
5468 }
5469
5470
5471 /* We are only interested in addresses of ARRAY_REF's. */
5472 if (TREE_CODE (t) != ADDR_EXPR)
5473 return;
5474
5475 /* Check each ARRAY_REFs in the reference chain. */
5476 do
5477 {
5478 if (TREE_CODE (t) == ARRAY_REF)
5479 check_array_ref (location, t, true /*ignore_off_by_one*/);
5480
5481 t = TREE_OPERAND (t, 0);
5482 }
5483 while (handled_component_p (t));
5484
5485 if (TREE_CODE (t) == MEM_REF
5486 && TREE_CODE (TREE_OPERAND (t, 0)) == ADDR_EXPR
5487 && !TREE_NO_WARNING (t))
5488 {
5489 tree tem = TREE_OPERAND (TREE_OPERAND (t, 0), 0);
5490 tree low_bound, up_bound, el_sz;
5491 double_int idx;
5492 if (TREE_CODE (TREE_TYPE (tem)) != ARRAY_TYPE
5493 || TREE_CODE (TREE_TYPE (TREE_TYPE (tem))) == ARRAY_TYPE
5494 || !TYPE_DOMAIN (TREE_TYPE (tem)))
5495 return;
5496
5497 low_bound = TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
5498 up_bound = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
5499 el_sz = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (tem)));
5500 if (!low_bound
5501 || TREE_CODE (low_bound) != INTEGER_CST
5502 || !up_bound
5503 || TREE_CODE (up_bound) != INTEGER_CST
5504 || !el_sz
5505 || TREE_CODE (el_sz) != INTEGER_CST)
5506 return;
5507
5508 idx = mem_ref_offset (t);
5509 idx = double_int_sdiv (idx, tree_to_double_int (el_sz), TRUNC_DIV_EXPR);
5510 if (double_int_scmp (idx, double_int_zero) < 0)
5511 {
5512 warning_at (location, OPT_Warray_bounds,
5513 "array subscript is below array bounds");
5514 TREE_NO_WARNING (t) = 1;
5515 }
5516 else if (double_int_scmp (idx,
5517 double_int_add
5518 (double_int_add
5519 (tree_to_double_int (up_bound),
5520 double_int_neg
5521 (tree_to_double_int (low_bound))),
5522 double_int_one)) > 0)
5523 {
5524 warning_at (location, OPT_Warray_bounds,
5525 "array subscript is above array bounds");
5526 TREE_NO_WARNING (t) = 1;
5527 }
5528 }
5529 }
5530
5531 /* walk_tree() callback that checks if *TP is
5532 an ARRAY_REF inside an ADDR_EXPR (in which an array
5533 subscript one outside the valid range is allowed). Call
5534 check_array_ref for each ARRAY_REF found. The location is
5535 passed in DATA. */
5536
5537 static tree
5538 check_array_bounds (tree *tp, int *walk_subtree, void *data)
5539 {
5540 tree t = *tp;
5541 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
5542 location_t location;
5543
5544 if (EXPR_HAS_LOCATION (t))
5545 location = EXPR_LOCATION (t);
5546 else
5547 {
5548 location_t *locp = (location_t *) wi->info;
5549 location = *locp;
5550 }
5551
5552 *walk_subtree = TRUE;
5553
5554 if (TREE_CODE (t) == ARRAY_REF)
5555 check_array_ref (location, t, false /*ignore_off_by_one*/);
5556
5557 if (TREE_CODE (t) == MEM_REF
5558 || (TREE_CODE (t) == RETURN_EXPR && TREE_OPERAND (t, 0)))
5559 search_for_addr_array (TREE_OPERAND (t, 0), location);
5560
5561 if (TREE_CODE (t) == ADDR_EXPR)
5562 *walk_subtree = FALSE;
5563
5564 return NULL_TREE;
5565 }
5566
5567 /* Walk over all statements of all reachable BBs and call check_array_bounds
5568 on them. */
5569
5570 static void
5571 check_all_array_refs (void)
5572 {
5573 basic_block bb;
5574 gimple_stmt_iterator si;
5575
5576 FOR_EACH_BB (bb)
5577 {
5578 edge_iterator ei;
5579 edge e;
5580 bool executable = false;
5581
5582 /* Skip blocks that were found to be unreachable. */
5583 FOR_EACH_EDGE (e, ei, bb->preds)
5584 executable |= !!(e->flags & EDGE_EXECUTABLE);
5585 if (!executable)
5586 continue;
5587
5588 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
5589 {
5590 gimple stmt = gsi_stmt (si);
5591 struct walk_stmt_info wi;
5592 if (!gimple_has_location (stmt))
5593 continue;
5594
5595 if (is_gimple_call (stmt))
5596 {
5597 size_t i;
5598 size_t n = gimple_call_num_args (stmt);
5599 for (i = 0; i < n; i++)
5600 {
5601 tree arg = gimple_call_arg (stmt, i);
5602 search_for_addr_array (arg, gimple_location (stmt));
5603 }
5604 }
5605 else
5606 {
5607 memset (&wi, 0, sizeof (wi));
5608 wi.info = CONST_CAST (void *, (const void *)
5609 gimple_location_ptr (stmt));
5610
5611 walk_gimple_op (gsi_stmt (si),
5612 check_array_bounds,
5613 &wi);
5614 }
5615 }
5616 }
5617 }
5618
5619 /* Convert range assertion expressions into the implied copies and
5620 copy propagate away the copies. Doing the trivial copy propagation
5621 here avoids the need to run the full copy propagation pass after
5622 VRP.
5623
5624 FIXME, this will eventually lead to copy propagation removing the
5625 names that had useful range information attached to them. For
5626 instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>,
5627 then N_i will have the range [3, +INF].
5628
5629 However, by converting the assertion into the implied copy
5630 operation N_i = N_j, we will then copy-propagate N_j into the uses
5631 of N_i and lose the range information. We may want to hold on to
5632 ASSERT_EXPRs a little while longer as the ranges could be used in
5633 things like jump threading.
5634
5635 The problem with keeping ASSERT_EXPRs around is that passes after
5636 VRP need to handle them appropriately.
5637
5638 Another approach would be to make the range information a first
5639 class property of the SSA_NAME so that it can be queried from
5640 any pass. This is made somewhat more complex by the need for
5641 multiple ranges to be associated with one SSA_NAME. */
5642
5643 static void
5644 remove_range_assertions (void)
5645 {
5646 basic_block bb;
5647 gimple_stmt_iterator si;
5648
5649 /* Note that the BSI iterator bump happens at the bottom of the
5650 loop and no bump is necessary if we're removing the statement
5651 referenced by the current BSI. */
5652 FOR_EACH_BB (bb)
5653 for (si = gsi_start_bb (bb); !gsi_end_p (si);)
5654 {
5655 gimple stmt = gsi_stmt (si);
5656 gimple use_stmt;
5657
5658 if (is_gimple_assign (stmt)
5659 && gimple_assign_rhs_code (stmt) == ASSERT_EXPR)
5660 {
5661 tree rhs = gimple_assign_rhs1 (stmt);
5662 tree var;
5663 tree cond = fold (ASSERT_EXPR_COND (rhs));
5664 use_operand_p use_p;
5665 imm_use_iterator iter;
5666
5667 gcc_assert (cond != boolean_false_node);
5668
5669 /* Propagate the RHS into every use of the LHS. */
5670 var = ASSERT_EXPR_VAR (rhs);
5671 FOR_EACH_IMM_USE_STMT (use_stmt, iter,
5672 gimple_assign_lhs (stmt))
5673 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
5674 {
5675 SET_USE (use_p, var);
5676 gcc_assert (TREE_CODE (var) == SSA_NAME);
5677 }
5678
5679 /* And finally, remove the copy, it is not needed. */
5680 gsi_remove (&si, true);
5681 release_defs (stmt);
5682 }
5683 else
5684 gsi_next (&si);
5685 }
5686 }
5687
5688
5689 /* Return true if STMT is interesting for VRP. */
5690
5691 static bool
5692 stmt_interesting_for_vrp (gimple stmt)
5693 {
5694 if (gimple_code (stmt) == GIMPLE_PHI
5695 && is_gimple_reg (gimple_phi_result (stmt))
5696 && (INTEGRAL_TYPE_P (TREE_TYPE (gimple_phi_result (stmt)))
5697 || POINTER_TYPE_P (TREE_TYPE (gimple_phi_result (stmt)))))
5698 return true;
5699 else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
5700 {
5701 tree lhs = gimple_get_lhs (stmt);
5702
5703 /* In general, assignments with virtual operands are not useful
5704 for deriving ranges, with the obvious exception of calls to
5705 builtin functions. */
5706 if (lhs && TREE_CODE (lhs) == SSA_NAME
5707 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
5708 || POINTER_TYPE_P (TREE_TYPE (lhs)))
5709 && ((is_gimple_call (stmt)
5710 && gimple_call_fndecl (stmt) != NULL_TREE
5711 && DECL_BUILT_IN (gimple_call_fndecl (stmt)))
5712 || !gimple_vuse (stmt)))
5713 return true;
5714 }
5715 else if (gimple_code (stmt) == GIMPLE_COND
5716 || gimple_code (stmt) == GIMPLE_SWITCH)
5717 return true;
5718
5719 return false;
5720 }
5721
5722
5723 /* Initialize local data structures for VRP. */
5724
5725 static void
5726 vrp_initialize (void)
5727 {
5728 basic_block bb;
5729
5730 values_propagated = false;
5731 num_vr_values = num_ssa_names;
5732 vr_value = XCNEWVEC (value_range_t *, num_vr_values);
5733 vr_phi_edge_counts = XCNEWVEC (int, num_ssa_names);
5734
5735 FOR_EACH_BB (bb)
5736 {
5737 gimple_stmt_iterator si;
5738
5739 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
5740 {
5741 gimple phi = gsi_stmt (si);
5742 if (!stmt_interesting_for_vrp (phi))
5743 {
5744 tree lhs = PHI_RESULT (phi);
5745 set_value_range_to_varying (get_value_range (lhs));
5746 prop_set_simulate_again (phi, false);
5747 }
5748 else
5749 prop_set_simulate_again (phi, true);
5750 }
5751
5752 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
5753 {
5754 gimple stmt = gsi_stmt (si);
5755
5756 /* If the statement is a control insn, then we do not
5757 want to avoid simulating the statement once. Failure
5758 to do so means that those edges will never get added. */
5759 if (stmt_ends_bb_p (stmt))
5760 prop_set_simulate_again (stmt, true);
5761 else if (!stmt_interesting_for_vrp (stmt))
5762 {
5763 ssa_op_iter i;
5764 tree def;
5765 FOR_EACH_SSA_TREE_OPERAND (def, stmt, i, SSA_OP_DEF)
5766 set_value_range_to_varying (get_value_range (def));
5767 prop_set_simulate_again (stmt, false);
5768 }
5769 else
5770 prop_set_simulate_again (stmt, true);
5771 }
5772 }
5773 }
5774
5775 /* Return the singleton value-range for NAME or NAME. */
5776
5777 static inline tree
5778 vrp_valueize (tree name)
5779 {
5780 if (TREE_CODE (name) == SSA_NAME)
5781 {
5782 value_range_t *vr = get_value_range (name);
5783 if (vr->type == VR_RANGE
5784 && (vr->min == vr->max
5785 || operand_equal_p (vr->min, vr->max, 0)))
5786 return vr->min;
5787 }
5788 return name;
5789 }
5790
5791 /* Visit assignment STMT. If it produces an interesting range, record
5792 the SSA name in *OUTPUT_P. */
5793
5794 static enum ssa_prop_result
5795 vrp_visit_assignment_or_call (gimple stmt, tree *output_p)
5796 {
5797 tree def, lhs;
5798 ssa_op_iter iter;
5799 enum gimple_code code = gimple_code (stmt);
5800 lhs = gimple_get_lhs (stmt);
5801
5802 /* We only keep track of ranges in integral and pointer types. */
5803 if (TREE_CODE (lhs) == SSA_NAME
5804 && ((INTEGRAL_TYPE_P (TREE_TYPE (lhs))
5805 /* It is valid to have NULL MIN/MAX values on a type. See
5806 build_range_type. */
5807 && TYPE_MIN_VALUE (TREE_TYPE (lhs))
5808 && TYPE_MAX_VALUE (TREE_TYPE (lhs)))
5809 || POINTER_TYPE_P (TREE_TYPE (lhs))))
5810 {
5811 value_range_t new_vr = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
5812
5813 /* Try folding the statement to a constant first. */
5814 tree tem = gimple_fold_stmt_to_constant (stmt, vrp_valueize);
5815 if (tem && !is_overflow_infinity (tem))
5816 set_value_range (&new_vr, VR_RANGE, tem, tem, NULL);
5817 /* Then dispatch to value-range extracting functions. */
5818 else if (code == GIMPLE_CALL)
5819 extract_range_basic (&new_vr, stmt);
5820 else
5821 extract_range_from_assignment (&new_vr, stmt);
5822
5823 if (update_value_range (lhs, &new_vr))
5824 {
5825 *output_p = lhs;
5826
5827 if (dump_file && (dump_flags & TDF_DETAILS))
5828 {
5829 fprintf (dump_file, "Found new range for ");
5830 print_generic_expr (dump_file, lhs, 0);
5831 fprintf (dump_file, ": ");
5832 dump_value_range (dump_file, &new_vr);
5833 fprintf (dump_file, "\n\n");
5834 }
5835
5836 if (new_vr.type == VR_VARYING)
5837 return SSA_PROP_VARYING;
5838
5839 return SSA_PROP_INTERESTING;
5840 }
5841
5842 return SSA_PROP_NOT_INTERESTING;
5843 }
5844
5845 /* Every other statement produces no useful ranges. */
5846 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_DEF)
5847 set_value_range_to_varying (get_value_range (def));
5848
5849 return SSA_PROP_VARYING;
5850 }
5851
5852 /* Helper that gets the value range of the SSA_NAME with version I
5853 or a symbolic range containing the SSA_NAME only if the value range
5854 is varying or undefined. */
5855
5856 static inline value_range_t
5857 get_vr_for_comparison (int i)
5858 {
5859 value_range_t vr = *get_value_range (ssa_name (i));
5860
5861 /* If name N_i does not have a valid range, use N_i as its own
5862 range. This allows us to compare against names that may
5863 have N_i in their ranges. */
5864 if (vr.type == VR_VARYING || vr.type == VR_UNDEFINED)
5865 {
5866 vr.type = VR_RANGE;
5867 vr.min = ssa_name (i);
5868 vr.max = ssa_name (i);
5869 }
5870
5871 return vr;
5872 }
5873
5874 /* Compare all the value ranges for names equivalent to VAR with VAL
5875 using comparison code COMP. Return the same value returned by
5876 compare_range_with_value, including the setting of
5877 *STRICT_OVERFLOW_P. */
5878
5879 static tree
5880 compare_name_with_value (enum tree_code comp, tree var, tree val,
5881 bool *strict_overflow_p)
5882 {
5883 bitmap_iterator bi;
5884 unsigned i;
5885 bitmap e;
5886 tree retval, t;
5887 int used_strict_overflow;
5888 bool sop;
5889 value_range_t equiv_vr;
5890
5891 /* Get the set of equivalences for VAR. */
5892 e = get_value_range (var)->equiv;
5893
5894 /* Start at -1. Set it to 0 if we do a comparison without relying
5895 on overflow, or 1 if all comparisons rely on overflow. */
5896 used_strict_overflow = -1;
5897
5898 /* Compare vars' value range with val. */
5899 equiv_vr = get_vr_for_comparison (SSA_NAME_VERSION (var));
5900 sop = false;
5901 retval = compare_range_with_value (comp, &equiv_vr, val, &sop);
5902 if (retval)
5903 used_strict_overflow = sop ? 1 : 0;
5904
5905 /* If the equiv set is empty we have done all work we need to do. */
5906 if (e == NULL)
5907 {
5908 if (retval
5909 && used_strict_overflow > 0)
5910 *strict_overflow_p = true;
5911 return retval;
5912 }
5913
5914 EXECUTE_IF_SET_IN_BITMAP (e, 0, i, bi)
5915 {
5916 equiv_vr = get_vr_for_comparison (i);
5917 sop = false;
5918 t = compare_range_with_value (comp, &equiv_vr, val, &sop);
5919 if (t)
5920 {
5921 /* If we get different answers from different members
5922 of the equivalence set this check must be in a dead
5923 code region. Folding it to a trap representation
5924 would be correct here. For now just return don't-know. */
5925 if (retval != NULL
5926 && t != retval)
5927 {
5928 retval = NULL_TREE;
5929 break;
5930 }
5931 retval = t;
5932
5933 if (!sop)
5934 used_strict_overflow = 0;
5935 else if (used_strict_overflow < 0)
5936 used_strict_overflow = 1;
5937 }
5938 }
5939
5940 if (retval
5941 && used_strict_overflow > 0)
5942 *strict_overflow_p = true;
5943
5944 return retval;
5945 }
5946
5947
5948 /* Given a comparison code COMP and names N1 and N2, compare all the
5949 ranges equivalent to N1 against all the ranges equivalent to N2
5950 to determine the value of N1 COMP N2. Return the same value
5951 returned by compare_ranges. Set *STRICT_OVERFLOW_P to indicate
5952 whether we relied on an overflow infinity in the comparison. */
5953
5954
5955 static tree
5956 compare_names (enum tree_code comp, tree n1, tree n2,
5957 bool *strict_overflow_p)
5958 {
5959 tree t, retval;
5960 bitmap e1, e2;
5961 bitmap_iterator bi1, bi2;
5962 unsigned i1, i2;
5963 int used_strict_overflow;
5964 static bitmap_obstack *s_obstack = NULL;
5965 static bitmap s_e1 = NULL, s_e2 = NULL;
5966
5967 /* Compare the ranges of every name equivalent to N1 against the
5968 ranges of every name equivalent to N2. */
5969 e1 = get_value_range (n1)->equiv;
5970 e2 = get_value_range (n2)->equiv;
5971
5972 /* Use the fake bitmaps if e1 or e2 are not available. */
5973 if (s_obstack == NULL)
5974 {
5975 s_obstack = XNEW (bitmap_obstack);
5976 bitmap_obstack_initialize (s_obstack);
5977 s_e1 = BITMAP_ALLOC (s_obstack);
5978 s_e2 = BITMAP_ALLOC (s_obstack);
5979 }
5980 if (e1 == NULL)
5981 e1 = s_e1;
5982 if (e2 == NULL)
5983 e2 = s_e2;
5984
5985 /* Add N1 and N2 to their own set of equivalences to avoid
5986 duplicating the body of the loop just to check N1 and N2
5987 ranges. */
5988 bitmap_set_bit (e1, SSA_NAME_VERSION (n1));
5989 bitmap_set_bit (e2, SSA_NAME_VERSION (n2));
5990
5991 /* If the equivalence sets have a common intersection, then the two
5992 names can be compared without checking their ranges. */
5993 if (bitmap_intersect_p (e1, e2))
5994 {
5995 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
5996 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
5997
5998 return (comp == EQ_EXPR || comp == GE_EXPR || comp == LE_EXPR)
5999 ? boolean_true_node
6000 : boolean_false_node;
6001 }
6002
6003 /* Start at -1. Set it to 0 if we do a comparison without relying
6004 on overflow, or 1 if all comparisons rely on overflow. */
6005 used_strict_overflow = -1;
6006
6007 /* Otherwise, compare all the equivalent ranges. First, add N1 and
6008 N2 to their own set of equivalences to avoid duplicating the body
6009 of the loop just to check N1 and N2 ranges. */
6010 EXECUTE_IF_SET_IN_BITMAP (e1, 0, i1, bi1)
6011 {
6012 value_range_t vr1 = get_vr_for_comparison (i1);
6013
6014 t = retval = NULL_TREE;
6015 EXECUTE_IF_SET_IN_BITMAP (e2, 0, i2, bi2)
6016 {
6017 bool sop = false;
6018
6019 value_range_t vr2 = get_vr_for_comparison (i2);
6020
6021 t = compare_ranges (comp, &vr1, &vr2, &sop);
6022 if (t)
6023 {
6024 /* If we get different answers from different members
6025 of the equivalence set this check must be in a dead
6026 code region. Folding it to a trap representation
6027 would be correct here. For now just return don't-know. */
6028 if (retval != NULL
6029 && t != retval)
6030 {
6031 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
6032 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
6033 return NULL_TREE;
6034 }
6035 retval = t;
6036
6037 if (!sop)
6038 used_strict_overflow = 0;
6039 else if (used_strict_overflow < 0)
6040 used_strict_overflow = 1;
6041 }
6042 }
6043
6044 if (retval)
6045 {
6046 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
6047 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
6048 if (used_strict_overflow > 0)
6049 *strict_overflow_p = true;
6050 return retval;
6051 }
6052 }
6053
6054 /* None of the equivalent ranges are useful in computing this
6055 comparison. */
6056 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
6057 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
6058 return NULL_TREE;
6059 }
6060
6061 /* Helper function for vrp_evaluate_conditional_warnv. */
6062
6063 static tree
6064 vrp_evaluate_conditional_warnv_with_ops_using_ranges (enum tree_code code,
6065 tree op0, tree op1,
6066 bool * strict_overflow_p)
6067 {
6068 value_range_t *vr0, *vr1;
6069
6070 vr0 = (TREE_CODE (op0) == SSA_NAME) ? get_value_range (op0) : NULL;
6071 vr1 = (TREE_CODE (op1) == SSA_NAME) ? get_value_range (op1) : NULL;
6072
6073 if (vr0 && vr1)
6074 return compare_ranges (code, vr0, vr1, strict_overflow_p);
6075 else if (vr0 && vr1 == NULL)
6076 return compare_range_with_value (code, vr0, op1, strict_overflow_p);
6077 else if (vr0 == NULL && vr1)
6078 return (compare_range_with_value
6079 (swap_tree_comparison (code), vr1, op0, strict_overflow_p));
6080 return NULL;
6081 }
6082
6083 /* Helper function for vrp_evaluate_conditional_warnv. */
6084
6085 static tree
6086 vrp_evaluate_conditional_warnv_with_ops (enum tree_code code, tree op0,
6087 tree op1, bool use_equiv_p,
6088 bool *strict_overflow_p, bool *only_ranges)
6089 {
6090 tree ret;
6091 if (only_ranges)
6092 *only_ranges = true;
6093
6094 /* We only deal with integral and pointer types. */
6095 if (!INTEGRAL_TYPE_P (TREE_TYPE (op0))
6096 && !POINTER_TYPE_P (TREE_TYPE (op0)))
6097 return NULL_TREE;
6098
6099 if (use_equiv_p)
6100 {
6101 if (only_ranges
6102 && (ret = vrp_evaluate_conditional_warnv_with_ops_using_ranges
6103 (code, op0, op1, strict_overflow_p)))
6104 return ret;
6105 *only_ranges = false;
6106 if (TREE_CODE (op0) == SSA_NAME && TREE_CODE (op1) == SSA_NAME)
6107 return compare_names (code, op0, op1, strict_overflow_p);
6108 else if (TREE_CODE (op0) == SSA_NAME)
6109 return compare_name_with_value (code, op0, op1, strict_overflow_p);
6110 else if (TREE_CODE (op1) == SSA_NAME)
6111 return (compare_name_with_value
6112 (swap_tree_comparison (code), op1, op0, strict_overflow_p));
6113 }
6114 else
6115 return vrp_evaluate_conditional_warnv_with_ops_using_ranges (code, op0, op1,
6116 strict_overflow_p);
6117 return NULL_TREE;
6118 }
6119
6120 /* Given (CODE OP0 OP1) within STMT, try to simplify it based on value range
6121 information. Return NULL if the conditional can not be evaluated.
6122 The ranges of all the names equivalent with the operands in COND
6123 will be used when trying to compute the value. If the result is
6124 based on undefined signed overflow, issue a warning if
6125 appropriate. */
6126
6127 static tree
6128 vrp_evaluate_conditional (enum tree_code code, tree op0, tree op1, gimple stmt)
6129 {
6130 bool sop;
6131 tree ret;
6132 bool only_ranges;
6133
6134 /* Some passes and foldings leak constants with overflow flag set
6135 into the IL. Avoid doing wrong things with these and bail out. */
6136 if ((TREE_CODE (op0) == INTEGER_CST
6137 && TREE_OVERFLOW (op0))
6138 || (TREE_CODE (op1) == INTEGER_CST
6139 && TREE_OVERFLOW (op1)))
6140 return NULL_TREE;
6141
6142 sop = false;
6143 ret = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, true, &sop,
6144 &only_ranges);
6145
6146 if (ret && sop)
6147 {
6148 enum warn_strict_overflow_code wc;
6149 const char* warnmsg;
6150
6151 if (is_gimple_min_invariant (ret))
6152 {
6153 wc = WARN_STRICT_OVERFLOW_CONDITIONAL;
6154 warnmsg = G_("assuming signed overflow does not occur when "
6155 "simplifying conditional to constant");
6156 }
6157 else
6158 {
6159 wc = WARN_STRICT_OVERFLOW_COMPARISON;
6160 warnmsg = G_("assuming signed overflow does not occur when "
6161 "simplifying conditional");
6162 }
6163
6164 if (issue_strict_overflow_warning (wc))
6165 {
6166 location_t location;
6167
6168 if (!gimple_has_location (stmt))
6169 location = input_location;
6170 else
6171 location = gimple_location (stmt);
6172 warning_at (location, OPT_Wstrict_overflow, "%s", warnmsg);
6173 }
6174 }
6175
6176 if (warn_type_limits
6177 && ret && only_ranges
6178 && TREE_CODE_CLASS (code) == tcc_comparison
6179 && TREE_CODE (op0) == SSA_NAME)
6180 {
6181 /* If the comparison is being folded and the operand on the LHS
6182 is being compared against a constant value that is outside of
6183 the natural range of OP0's type, then the predicate will
6184 always fold regardless of the value of OP0. If -Wtype-limits
6185 was specified, emit a warning. */
6186 tree type = TREE_TYPE (op0);
6187 value_range_t *vr0 = get_value_range (op0);
6188
6189 if (vr0->type != VR_VARYING
6190 && INTEGRAL_TYPE_P (type)
6191 && vrp_val_is_min (vr0->min)
6192 && vrp_val_is_max (vr0->max)
6193 && is_gimple_min_invariant (op1))
6194 {
6195 location_t location;
6196
6197 if (!gimple_has_location (stmt))
6198 location = input_location;
6199 else
6200 location = gimple_location (stmt);
6201
6202 warning_at (location, OPT_Wtype_limits,
6203 integer_zerop (ret)
6204 ? G_("comparison always false "
6205 "due to limited range of data type")
6206 : G_("comparison always true "
6207 "due to limited range of data type"));
6208 }
6209 }
6210
6211 return ret;
6212 }
6213
6214
6215 /* Visit conditional statement STMT. If we can determine which edge
6216 will be taken out of STMT's basic block, record it in
6217 *TAKEN_EDGE_P and return SSA_PROP_INTERESTING. Otherwise, return
6218 SSA_PROP_VARYING. */
6219
6220 static enum ssa_prop_result
6221 vrp_visit_cond_stmt (gimple stmt, edge *taken_edge_p)
6222 {
6223 tree val;
6224 bool sop;
6225
6226 *taken_edge_p = NULL;
6227
6228 if (dump_file && (dump_flags & TDF_DETAILS))
6229 {
6230 tree use;
6231 ssa_op_iter i;
6232
6233 fprintf (dump_file, "\nVisiting conditional with predicate: ");
6234 print_gimple_stmt (dump_file, stmt, 0, 0);
6235 fprintf (dump_file, "\nWith known ranges\n");
6236
6237 FOR_EACH_SSA_TREE_OPERAND (use, stmt, i, SSA_OP_USE)
6238 {
6239 fprintf (dump_file, "\t");
6240 print_generic_expr (dump_file, use, 0);
6241 fprintf (dump_file, ": ");
6242 dump_value_range (dump_file, vr_value[SSA_NAME_VERSION (use)]);
6243 }
6244
6245 fprintf (dump_file, "\n");
6246 }
6247
6248 /* Compute the value of the predicate COND by checking the known
6249 ranges of each of its operands.
6250
6251 Note that we cannot evaluate all the equivalent ranges here
6252 because those ranges may not yet be final and with the current
6253 propagation strategy, we cannot determine when the value ranges
6254 of the names in the equivalence set have changed.
6255
6256 For instance, given the following code fragment
6257
6258 i_5 = PHI <8, i_13>
6259 ...
6260 i_14 = ASSERT_EXPR <i_5, i_5 != 0>
6261 if (i_14 == 1)
6262 ...
6263
6264 Assume that on the first visit to i_14, i_5 has the temporary
6265 range [8, 8] because the second argument to the PHI function is
6266 not yet executable. We derive the range ~[0, 0] for i_14 and the
6267 equivalence set { i_5 }. So, when we visit 'if (i_14 == 1)' for
6268 the first time, since i_14 is equivalent to the range [8, 8], we
6269 determine that the predicate is always false.
6270
6271 On the next round of propagation, i_13 is determined to be
6272 VARYING, which causes i_5 to drop down to VARYING. So, another
6273 visit to i_14 is scheduled. In this second visit, we compute the
6274 exact same range and equivalence set for i_14, namely ~[0, 0] and
6275 { i_5 }. But we did not have the previous range for i_5
6276 registered, so vrp_visit_assignment thinks that the range for
6277 i_14 has not changed. Therefore, the predicate 'if (i_14 == 1)'
6278 is not visited again, which stops propagation from visiting
6279 statements in the THEN clause of that if().
6280
6281 To properly fix this we would need to keep the previous range
6282 value for the names in the equivalence set. This way we would've
6283 discovered that from one visit to the other i_5 changed from
6284 range [8, 8] to VR_VARYING.
6285
6286 However, fixing this apparent limitation may not be worth the
6287 additional checking. Testing on several code bases (GCC, DLV,
6288 MICO, TRAMP3D and SPEC2000) showed that doing this results in
6289 4 more predicates folded in SPEC. */
6290 sop = false;
6291
6292 val = vrp_evaluate_conditional_warnv_with_ops (gimple_cond_code (stmt),
6293 gimple_cond_lhs (stmt),
6294 gimple_cond_rhs (stmt),
6295 false, &sop, NULL);
6296 if (val)
6297 {
6298 if (!sop)
6299 *taken_edge_p = find_taken_edge (gimple_bb (stmt), val);
6300 else
6301 {
6302 if (dump_file && (dump_flags & TDF_DETAILS))
6303 fprintf (dump_file,
6304 "\nIgnoring predicate evaluation because "
6305 "it assumes that signed overflow is undefined");
6306 val = NULL_TREE;
6307 }
6308 }
6309
6310 if (dump_file && (dump_flags & TDF_DETAILS))
6311 {
6312 fprintf (dump_file, "\nPredicate evaluates to: ");
6313 if (val == NULL_TREE)
6314 fprintf (dump_file, "DON'T KNOW\n");
6315 else
6316 print_generic_stmt (dump_file, val, 0);
6317 }
6318
6319 return (*taken_edge_p) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING;
6320 }
6321
6322 /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL
6323 that includes the value VAL. The search is restricted to the range
6324 [START_IDX, n - 1] where n is the size of VEC.
6325
6326 If there is a CASE_LABEL for VAL, its index is placed in IDX and true is
6327 returned.
6328
6329 If there is no CASE_LABEL for VAL and there is one that is larger than VAL,
6330 it is placed in IDX and false is returned.
6331
6332 If VAL is larger than any CASE_LABEL, n is placed on IDX and false is
6333 returned. */
6334
6335 static bool
6336 find_case_label_index (gimple stmt, size_t start_idx, tree val, size_t *idx)
6337 {
6338 size_t n = gimple_switch_num_labels (stmt);
6339 size_t low, high;
6340
6341 /* Find case label for minimum of the value range or the next one.
6342 At each iteration we are searching in [low, high - 1]. */
6343
6344 for (low = start_idx, high = n; high != low; )
6345 {
6346 tree t;
6347 int cmp;
6348 /* Note that i != high, so we never ask for n. */
6349 size_t i = (high + low) / 2;
6350 t = gimple_switch_label (stmt, i);
6351
6352 /* Cache the result of comparing CASE_LOW and val. */
6353 cmp = tree_int_cst_compare (CASE_LOW (t), val);
6354
6355 if (cmp == 0)
6356 {
6357 /* Ranges cannot be empty. */
6358 *idx = i;
6359 return true;
6360 }
6361 else if (cmp > 0)
6362 high = i;
6363 else
6364 {
6365 low = i + 1;
6366 if (CASE_HIGH (t) != NULL
6367 && tree_int_cst_compare (CASE_HIGH (t), val) >= 0)
6368 {
6369 *idx = i;
6370 return true;
6371 }
6372 }
6373 }
6374
6375 *idx = high;
6376 return false;
6377 }
6378
6379 /* Searches the case label vector VEC for the range of CASE_LABELs that is used
6380 for values between MIN and MAX. The first index is placed in MIN_IDX. The
6381 last index is placed in MAX_IDX. If the range of CASE_LABELs is empty
6382 then MAX_IDX < MIN_IDX.
6383 Returns true if the default label is not needed. */
6384
6385 static bool
6386 find_case_label_range (gimple stmt, tree min, tree max, size_t *min_idx,
6387 size_t *max_idx)
6388 {
6389 size_t i, j;
6390 bool min_take_default = !find_case_label_index (stmt, 1, min, &i);
6391 bool max_take_default = !find_case_label_index (stmt, i, max, &j);
6392
6393 if (i == j
6394 && min_take_default
6395 && max_take_default)
6396 {
6397 /* Only the default case label reached.
6398 Return an empty range. */
6399 *min_idx = 1;
6400 *max_idx = 0;
6401 return false;
6402 }
6403 else
6404 {
6405 bool take_default = min_take_default || max_take_default;
6406 tree low, high;
6407 size_t k;
6408
6409 if (max_take_default)
6410 j--;
6411
6412 /* If the case label range is continuous, we do not need
6413 the default case label. Verify that. */
6414 high = CASE_LOW (gimple_switch_label (stmt, i));
6415 if (CASE_HIGH (gimple_switch_label (stmt, i)))
6416 high = CASE_HIGH (gimple_switch_label (stmt, i));
6417 for (k = i + 1; k <= j; ++k)
6418 {
6419 low = CASE_LOW (gimple_switch_label (stmt, k));
6420 if (!integer_onep (int_const_binop (MINUS_EXPR, low, high)))
6421 {
6422 take_default = true;
6423 break;
6424 }
6425 high = low;
6426 if (CASE_HIGH (gimple_switch_label (stmt, k)))
6427 high = CASE_HIGH (gimple_switch_label (stmt, k));
6428 }
6429
6430 *min_idx = i;
6431 *max_idx = j;
6432 return !take_default;
6433 }
6434 }
6435
6436 /* Visit switch statement STMT. If we can determine which edge
6437 will be taken out of STMT's basic block, record it in
6438 *TAKEN_EDGE_P and return SSA_PROP_INTERESTING. Otherwise, return
6439 SSA_PROP_VARYING. */
6440
6441 static enum ssa_prop_result
6442 vrp_visit_switch_stmt (gimple stmt, edge *taken_edge_p)
6443 {
6444 tree op, val;
6445 value_range_t *vr;
6446 size_t i = 0, j = 0;
6447 bool take_default;
6448
6449 *taken_edge_p = NULL;
6450 op = gimple_switch_index (stmt);
6451 if (TREE_CODE (op) != SSA_NAME)
6452 return SSA_PROP_VARYING;
6453
6454 vr = get_value_range (op);
6455 if (dump_file && (dump_flags & TDF_DETAILS))
6456 {
6457 fprintf (dump_file, "\nVisiting switch expression with operand ");
6458 print_generic_expr (dump_file, op, 0);
6459 fprintf (dump_file, " with known range ");
6460 dump_value_range (dump_file, vr);
6461 fprintf (dump_file, "\n");
6462 }
6463
6464 if (vr->type != VR_RANGE
6465 || symbolic_range_p (vr))
6466 return SSA_PROP_VARYING;
6467
6468 /* Find the single edge that is taken from the switch expression. */
6469 take_default = !find_case_label_range (stmt, vr->min, vr->max, &i, &j);
6470
6471 /* Check if the range spans no CASE_LABEL. If so, we only reach the default
6472 label */
6473 if (j < i)
6474 {
6475 gcc_assert (take_default);
6476 val = gimple_switch_default_label (stmt);
6477 }
6478 else
6479 {
6480 /* Check if labels with index i to j and maybe the default label
6481 are all reaching the same label. */
6482
6483 val = gimple_switch_label (stmt, i);
6484 if (take_default
6485 && CASE_LABEL (gimple_switch_default_label (stmt))
6486 != CASE_LABEL (val))
6487 {
6488 if (dump_file && (dump_flags & TDF_DETAILS))
6489 fprintf (dump_file, " not a single destination for this "
6490 "range\n");
6491 return SSA_PROP_VARYING;
6492 }
6493 for (++i; i <= j; ++i)
6494 {
6495 if (CASE_LABEL (gimple_switch_label (stmt, i)) != CASE_LABEL (val))
6496 {
6497 if (dump_file && (dump_flags & TDF_DETAILS))
6498 fprintf (dump_file, " not a single destination for this "
6499 "range\n");
6500 return SSA_PROP_VARYING;
6501 }
6502 }
6503 }
6504
6505 *taken_edge_p = find_edge (gimple_bb (stmt),
6506 label_to_block (CASE_LABEL (val)));
6507
6508 if (dump_file && (dump_flags & TDF_DETAILS))
6509 {
6510 fprintf (dump_file, " will take edge to ");
6511 print_generic_stmt (dump_file, CASE_LABEL (val), 0);
6512 }
6513
6514 return SSA_PROP_INTERESTING;
6515 }
6516
6517
6518 /* Evaluate statement STMT. If the statement produces a useful range,
6519 return SSA_PROP_INTERESTING and record the SSA name with the
6520 interesting range into *OUTPUT_P.
6521
6522 If STMT is a conditional branch and we can determine its truth
6523 value, the taken edge is recorded in *TAKEN_EDGE_P.
6524
6525 If STMT produces a varying value, return SSA_PROP_VARYING. */
6526
6527 static enum ssa_prop_result
6528 vrp_visit_stmt (gimple stmt, edge *taken_edge_p, tree *output_p)
6529 {
6530 tree def;
6531 ssa_op_iter iter;
6532
6533 if (dump_file && (dump_flags & TDF_DETAILS))
6534 {
6535 fprintf (dump_file, "\nVisiting statement:\n");
6536 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
6537 fprintf (dump_file, "\n");
6538 }
6539
6540 if (!stmt_interesting_for_vrp (stmt))
6541 gcc_assert (stmt_ends_bb_p (stmt));
6542 else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
6543 {
6544 /* In general, assignments with virtual operands are not useful
6545 for deriving ranges, with the obvious exception of calls to
6546 builtin functions. */
6547 if ((is_gimple_call (stmt)
6548 && gimple_call_fndecl (stmt) != NULL_TREE
6549 && DECL_BUILT_IN (gimple_call_fndecl (stmt)))
6550 || !gimple_vuse (stmt))
6551 return vrp_visit_assignment_or_call (stmt, output_p);
6552 }
6553 else if (gimple_code (stmt) == GIMPLE_COND)
6554 return vrp_visit_cond_stmt (stmt, taken_edge_p);
6555 else if (gimple_code (stmt) == GIMPLE_SWITCH)
6556 return vrp_visit_switch_stmt (stmt, taken_edge_p);
6557
6558 /* All other statements produce nothing of interest for VRP, so mark
6559 their outputs varying and prevent further simulation. */
6560 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_DEF)
6561 set_value_range_to_varying (get_value_range (def));
6562
6563 return SSA_PROP_VARYING;
6564 }
6565
6566
6567 /* Meet operation for value ranges. Given two value ranges VR0 and
6568 VR1, store in VR0 a range that contains both VR0 and VR1. This
6569 may not be the smallest possible such range. */
6570
6571 static void
6572 vrp_meet (value_range_t *vr0, value_range_t *vr1)
6573 {
6574 if (vr0->type == VR_UNDEFINED)
6575 {
6576 copy_value_range (vr0, vr1);
6577 return;
6578 }
6579
6580 if (vr1->type == VR_UNDEFINED)
6581 {
6582 /* Nothing to do. VR0 already has the resulting range. */
6583 return;
6584 }
6585
6586 if (vr0->type == VR_VARYING)
6587 {
6588 /* Nothing to do. VR0 already has the resulting range. */
6589 return;
6590 }
6591
6592 if (vr1->type == VR_VARYING)
6593 {
6594 set_value_range_to_varying (vr0);
6595 return;
6596 }
6597
6598 if (vr0->type == VR_RANGE && vr1->type == VR_RANGE)
6599 {
6600 int cmp;
6601 tree min, max;
6602
6603 /* Compute the convex hull of the ranges. The lower limit of
6604 the new range is the minimum of the two ranges. If they
6605 cannot be compared, then give up. */
6606 cmp = compare_values (vr0->min, vr1->min);
6607 if (cmp == 0 || cmp == 1)
6608 min = vr1->min;
6609 else if (cmp == -1)
6610 min = vr0->min;
6611 else
6612 goto give_up;
6613
6614 /* Similarly, the upper limit of the new range is the maximum
6615 of the two ranges. If they cannot be compared, then
6616 give up. */
6617 cmp = compare_values (vr0->max, vr1->max);
6618 if (cmp == 0 || cmp == -1)
6619 max = vr1->max;
6620 else if (cmp == 1)
6621 max = vr0->max;
6622 else
6623 goto give_up;
6624
6625 /* Check for useless ranges. */
6626 if (INTEGRAL_TYPE_P (TREE_TYPE (min))
6627 && ((vrp_val_is_min (min) || is_overflow_infinity (min))
6628 && (vrp_val_is_max (max) || is_overflow_infinity (max))))
6629 goto give_up;
6630
6631 /* The resulting set of equivalences is the intersection of
6632 the two sets. */
6633 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
6634 bitmap_and_into (vr0->equiv, vr1->equiv);
6635 else if (vr0->equiv && !vr1->equiv)
6636 bitmap_clear (vr0->equiv);
6637
6638 set_value_range (vr0, vr0->type, min, max, vr0->equiv);
6639 }
6640 else if (vr0->type == VR_ANTI_RANGE && vr1->type == VR_ANTI_RANGE)
6641 {
6642 /* Two anti-ranges meet only if their complements intersect.
6643 Only handle the case of identical ranges. */
6644 if (compare_values (vr0->min, vr1->min) == 0
6645 && compare_values (vr0->max, vr1->max) == 0
6646 && compare_values (vr0->min, vr0->max) == 0)
6647 {
6648 /* The resulting set of equivalences is the intersection of
6649 the two sets. */
6650 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
6651 bitmap_and_into (vr0->equiv, vr1->equiv);
6652 else if (vr0->equiv && !vr1->equiv)
6653 bitmap_clear (vr0->equiv);
6654 }
6655 else
6656 goto give_up;
6657 }
6658 else if (vr0->type == VR_ANTI_RANGE || vr1->type == VR_ANTI_RANGE)
6659 {
6660 /* For a numeric range [VAL1, VAL2] and an anti-range ~[VAL3, VAL4],
6661 only handle the case where the ranges have an empty intersection.
6662 The result of the meet operation is the anti-range. */
6663 if (!symbolic_range_p (vr0)
6664 && !symbolic_range_p (vr1)
6665 && !value_ranges_intersect_p (vr0, vr1))
6666 {
6667 /* Copy most of VR1 into VR0. Don't copy VR1's equivalence
6668 set. We need to compute the intersection of the two
6669 equivalence sets. */
6670 if (vr1->type == VR_ANTI_RANGE)
6671 set_value_range (vr0, vr1->type, vr1->min, vr1->max, vr0->equiv);
6672
6673 /* The resulting set of equivalences is the intersection of
6674 the two sets. */
6675 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
6676 bitmap_and_into (vr0->equiv, vr1->equiv);
6677 else if (vr0->equiv && !vr1->equiv)
6678 bitmap_clear (vr0->equiv);
6679 }
6680 else
6681 goto give_up;
6682 }
6683 else
6684 gcc_unreachable ();
6685
6686 return;
6687
6688 give_up:
6689 /* Failed to find an efficient meet. Before giving up and setting
6690 the result to VARYING, see if we can at least derive a useful
6691 anti-range. FIXME, all this nonsense about distinguishing
6692 anti-ranges from ranges is necessary because of the odd
6693 semantics of range_includes_zero_p and friends. */
6694 if (!symbolic_range_p (vr0)
6695 && ((vr0->type == VR_RANGE && !range_includes_zero_p (vr0))
6696 || (vr0->type == VR_ANTI_RANGE && range_includes_zero_p (vr0)))
6697 && !symbolic_range_p (vr1)
6698 && ((vr1->type == VR_RANGE && !range_includes_zero_p (vr1))
6699 || (vr1->type == VR_ANTI_RANGE && range_includes_zero_p (vr1))))
6700 {
6701 set_value_range_to_nonnull (vr0, TREE_TYPE (vr0->min));
6702
6703 /* Since this meet operation did not result from the meeting of
6704 two equivalent names, VR0 cannot have any equivalences. */
6705 if (vr0->equiv)
6706 bitmap_clear (vr0->equiv);
6707 }
6708 else
6709 set_value_range_to_varying (vr0);
6710 }
6711
6712
6713 /* Visit all arguments for PHI node PHI that flow through executable
6714 edges. If a valid value range can be derived from all the incoming
6715 value ranges, set a new range for the LHS of PHI. */
6716
6717 static enum ssa_prop_result
6718 vrp_visit_phi_node (gimple phi)
6719 {
6720 size_t i;
6721 tree lhs = PHI_RESULT (phi);
6722 value_range_t *lhs_vr = get_value_range (lhs);
6723 value_range_t vr_result = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
6724 int edges, old_edges;
6725 struct loop *l;
6726
6727 if (dump_file && (dump_flags & TDF_DETAILS))
6728 {
6729 fprintf (dump_file, "\nVisiting PHI node: ");
6730 print_gimple_stmt (dump_file, phi, 0, dump_flags);
6731 }
6732
6733 edges = 0;
6734 for (i = 0; i < gimple_phi_num_args (phi); i++)
6735 {
6736 edge e = gimple_phi_arg_edge (phi, i);
6737
6738 if (dump_file && (dump_flags & TDF_DETAILS))
6739 {
6740 fprintf (dump_file,
6741 "\n Argument #%d (%d -> %d %sexecutable)\n",
6742 (int) i, e->src->index, e->dest->index,
6743 (e->flags & EDGE_EXECUTABLE) ? "" : "not ");
6744 }
6745
6746 if (e->flags & EDGE_EXECUTABLE)
6747 {
6748 tree arg = PHI_ARG_DEF (phi, i);
6749 value_range_t vr_arg;
6750
6751 ++edges;
6752
6753 if (TREE_CODE (arg) == SSA_NAME)
6754 {
6755 vr_arg = *(get_value_range (arg));
6756 }
6757 else
6758 {
6759 if (is_overflow_infinity (arg))
6760 {
6761 arg = copy_node (arg);
6762 TREE_OVERFLOW (arg) = 0;
6763 }
6764
6765 vr_arg.type = VR_RANGE;
6766 vr_arg.min = arg;
6767 vr_arg.max = arg;
6768 vr_arg.equiv = NULL;
6769 }
6770
6771 if (dump_file && (dump_flags & TDF_DETAILS))
6772 {
6773 fprintf (dump_file, "\t");
6774 print_generic_expr (dump_file, arg, dump_flags);
6775 fprintf (dump_file, "\n\tValue: ");
6776 dump_value_range (dump_file, &vr_arg);
6777 fprintf (dump_file, "\n");
6778 }
6779
6780 vrp_meet (&vr_result, &vr_arg);
6781
6782 if (vr_result.type == VR_VARYING)
6783 break;
6784 }
6785 }
6786
6787 if (vr_result.type == VR_VARYING)
6788 goto varying;
6789 else if (vr_result.type == VR_UNDEFINED)
6790 goto update_range;
6791
6792 old_edges = vr_phi_edge_counts[SSA_NAME_VERSION (lhs)];
6793 vr_phi_edge_counts[SSA_NAME_VERSION (lhs)] = edges;
6794
6795 /* To prevent infinite iterations in the algorithm, derive ranges
6796 when the new value is slightly bigger or smaller than the
6797 previous one. We don't do this if we have seen a new executable
6798 edge; this helps us avoid an overflow infinity for conditionals
6799 which are not in a loop. */
6800 if (edges > 0
6801 && gimple_phi_num_args (phi) > 1
6802 && edges == old_edges)
6803 {
6804 int cmp_min = compare_values (lhs_vr->min, vr_result.min);
6805 int cmp_max = compare_values (lhs_vr->max, vr_result.max);
6806
6807 /* For non VR_RANGE or for pointers fall back to varying if
6808 the range changed. */
6809 if ((lhs_vr->type != VR_RANGE || vr_result.type != VR_RANGE
6810 || POINTER_TYPE_P (TREE_TYPE (lhs)))
6811 && (cmp_min != 0 || cmp_max != 0))
6812 goto varying;
6813
6814 /* If the new minimum is smaller or larger than the previous
6815 one, go all the way to -INF. In the first case, to avoid
6816 iterating millions of times to reach -INF, and in the
6817 other case to avoid infinite bouncing between different
6818 minimums. */
6819 if (cmp_min > 0 || cmp_min < 0)
6820 {
6821 if (!needs_overflow_infinity (TREE_TYPE (vr_result.min))
6822 || !vrp_var_may_overflow (lhs, phi))
6823 vr_result.min = TYPE_MIN_VALUE (TREE_TYPE (vr_result.min));
6824 else if (supports_overflow_infinity (TREE_TYPE (vr_result.min)))
6825 vr_result.min =
6826 negative_overflow_infinity (TREE_TYPE (vr_result.min));
6827 }
6828
6829 /* Similarly, if the new maximum is smaller or larger than
6830 the previous one, go all the way to +INF. */
6831 if (cmp_max < 0 || cmp_max > 0)
6832 {
6833 if (!needs_overflow_infinity (TREE_TYPE (vr_result.max))
6834 || !vrp_var_may_overflow (lhs, phi))
6835 vr_result.max = TYPE_MAX_VALUE (TREE_TYPE (vr_result.max));
6836 else if (supports_overflow_infinity (TREE_TYPE (vr_result.max)))
6837 vr_result.max =
6838 positive_overflow_infinity (TREE_TYPE (vr_result.max));
6839 }
6840
6841 /* If we dropped either bound to +-INF then if this is a loop
6842 PHI node SCEV may known more about its value-range. */
6843 if ((cmp_min > 0 || cmp_min < 0
6844 || cmp_max < 0 || cmp_max > 0)
6845 && current_loops
6846 && (l = loop_containing_stmt (phi))
6847 && l->header == gimple_bb (phi))
6848 adjust_range_with_scev (&vr_result, l, phi, lhs);
6849
6850 /* If we will end up with a (-INF, +INF) range, set it to
6851 VARYING. Same if the previous max value was invalid for
6852 the type and we end up with vr_result.min > vr_result.max. */
6853 if ((vrp_val_is_max (vr_result.max)
6854 && vrp_val_is_min (vr_result.min))
6855 || compare_values (vr_result.min,
6856 vr_result.max) > 0)
6857 goto varying;
6858 }
6859
6860 /* If the new range is different than the previous value, keep
6861 iterating. */
6862 update_range:
6863 if (update_value_range (lhs, &vr_result))
6864 {
6865 if (dump_file && (dump_flags & TDF_DETAILS))
6866 {
6867 fprintf (dump_file, "Found new range for ");
6868 print_generic_expr (dump_file, lhs, 0);
6869 fprintf (dump_file, ": ");
6870 dump_value_range (dump_file, &vr_result);
6871 fprintf (dump_file, "\n\n");
6872 }
6873
6874 return SSA_PROP_INTERESTING;
6875 }
6876
6877 /* Nothing changed, don't add outgoing edges. */
6878 return SSA_PROP_NOT_INTERESTING;
6879
6880 /* No match found. Set the LHS to VARYING. */
6881 varying:
6882 set_value_range_to_varying (lhs_vr);
6883 return SSA_PROP_VARYING;
6884 }
6885
6886 /* Simplify boolean operations if the source is known
6887 to be already a boolean. */
6888 static bool
6889 simplify_truth_ops_using_ranges (gimple_stmt_iterator *gsi, gimple stmt)
6890 {
6891 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
6892 tree lhs, op0, op1;
6893 bool need_conversion;
6894
6895 /* We handle only !=/== case here. */
6896 gcc_assert (rhs_code == EQ_EXPR || rhs_code == NE_EXPR);
6897
6898 op0 = gimple_assign_rhs1 (stmt);
6899 if (!op_with_boolean_value_range_p (op0))
6900 return false;
6901
6902 op1 = gimple_assign_rhs2 (stmt);
6903 if (!op_with_boolean_value_range_p (op1))
6904 return false;
6905
6906 /* Reduce number of cases to handle to NE_EXPR. As there is no
6907 BIT_XNOR_EXPR we cannot replace A == B with a single statement. */
6908 if (rhs_code == EQ_EXPR)
6909 {
6910 if (TREE_CODE (op1) == INTEGER_CST)
6911 op1 = int_const_binop (BIT_XOR_EXPR, op1, integer_one_node);
6912 else
6913 return false;
6914 }
6915
6916 lhs = gimple_assign_lhs (stmt);
6917 need_conversion
6918 = !useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (op0));
6919
6920 /* Make sure to not sign-extend a 1-bit 1 when converting the result. */
6921 if (need_conversion
6922 && !TYPE_UNSIGNED (TREE_TYPE (op0))
6923 && TYPE_PRECISION (TREE_TYPE (op0)) == 1
6924 && TYPE_PRECISION (TREE_TYPE (lhs)) > 1)
6925 return false;
6926
6927 /* For A != 0 we can substitute A itself. */
6928 if (integer_zerop (op1))
6929 gimple_assign_set_rhs_with_ops (gsi,
6930 need_conversion
6931 ? NOP_EXPR : TREE_CODE (op0),
6932 op0, NULL_TREE);
6933 /* For A != B we substitute A ^ B. Either with conversion. */
6934 else if (need_conversion)
6935 {
6936 gimple newop;
6937 tree tem = create_tmp_reg (TREE_TYPE (op0), NULL);
6938 newop = gimple_build_assign_with_ops (BIT_XOR_EXPR, tem, op0, op1);
6939 tem = make_ssa_name (tem, newop);
6940 gimple_assign_set_lhs (newop, tem);
6941 gsi_insert_before (gsi, newop, GSI_SAME_STMT);
6942 update_stmt (newop);
6943 gimple_assign_set_rhs_with_ops (gsi, NOP_EXPR, tem, NULL_TREE);
6944 }
6945 /* Or without. */
6946 else
6947 gimple_assign_set_rhs_with_ops (gsi, BIT_XOR_EXPR, op0, op1);
6948 update_stmt (gsi_stmt (*gsi));
6949
6950 return true;
6951 }
6952
6953 /* Simplify a division or modulo operator to a right shift or
6954 bitwise and if the first operand is unsigned or is greater
6955 than zero and the second operand is an exact power of two. */
6956
6957 static bool
6958 simplify_div_or_mod_using_ranges (gimple stmt)
6959 {
6960 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
6961 tree val = NULL;
6962 tree op0 = gimple_assign_rhs1 (stmt);
6963 tree op1 = gimple_assign_rhs2 (stmt);
6964 value_range_t *vr = get_value_range (gimple_assign_rhs1 (stmt));
6965
6966 if (TYPE_UNSIGNED (TREE_TYPE (op0)))
6967 {
6968 val = integer_one_node;
6969 }
6970 else
6971 {
6972 bool sop = false;
6973
6974 val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, &sop);
6975
6976 if (val
6977 && sop
6978 && integer_onep (val)
6979 && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
6980 {
6981 location_t location;
6982
6983 if (!gimple_has_location (stmt))
6984 location = input_location;
6985 else
6986 location = gimple_location (stmt);
6987 warning_at (location, OPT_Wstrict_overflow,
6988 "assuming signed overflow does not occur when "
6989 "simplifying %</%> or %<%%%> to %<>>%> or %<&%>");
6990 }
6991 }
6992
6993 if (val && integer_onep (val))
6994 {
6995 tree t;
6996
6997 if (rhs_code == TRUNC_DIV_EXPR)
6998 {
6999 t = build_int_cst (integer_type_node, tree_log2 (op1));
7000 gimple_assign_set_rhs_code (stmt, RSHIFT_EXPR);
7001 gimple_assign_set_rhs1 (stmt, op0);
7002 gimple_assign_set_rhs2 (stmt, t);
7003 }
7004 else
7005 {
7006 t = build_int_cst (TREE_TYPE (op1), 1);
7007 t = int_const_binop (MINUS_EXPR, op1, t);
7008 t = fold_convert (TREE_TYPE (op0), t);
7009
7010 gimple_assign_set_rhs_code (stmt, BIT_AND_EXPR);
7011 gimple_assign_set_rhs1 (stmt, op0);
7012 gimple_assign_set_rhs2 (stmt, t);
7013 }
7014
7015 update_stmt (stmt);
7016 return true;
7017 }
7018
7019 return false;
7020 }
7021
7022 /* If the operand to an ABS_EXPR is >= 0, then eliminate the
7023 ABS_EXPR. If the operand is <= 0, then simplify the
7024 ABS_EXPR into a NEGATE_EXPR. */
7025
7026 static bool
7027 simplify_abs_using_ranges (gimple stmt)
7028 {
7029 tree val = NULL;
7030 tree op = gimple_assign_rhs1 (stmt);
7031 tree type = TREE_TYPE (op);
7032 value_range_t *vr = get_value_range (op);
7033
7034 if (TYPE_UNSIGNED (type))
7035 {
7036 val = integer_zero_node;
7037 }
7038 else if (vr)
7039 {
7040 bool sop = false;
7041
7042 val = compare_range_with_value (LE_EXPR, vr, integer_zero_node, &sop);
7043 if (!val)
7044 {
7045 sop = false;
7046 val = compare_range_with_value (GE_EXPR, vr, integer_zero_node,
7047 &sop);
7048
7049 if (val)
7050 {
7051 if (integer_zerop (val))
7052 val = integer_one_node;
7053 else if (integer_onep (val))
7054 val = integer_zero_node;
7055 }
7056 }
7057
7058 if (val
7059 && (integer_onep (val) || integer_zerop (val)))
7060 {
7061 if (sop && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
7062 {
7063 location_t location;
7064
7065 if (!gimple_has_location (stmt))
7066 location = input_location;
7067 else
7068 location = gimple_location (stmt);
7069 warning_at (location, OPT_Wstrict_overflow,
7070 "assuming signed overflow does not occur when "
7071 "simplifying %<abs (X)%> to %<X%> or %<-X%>");
7072 }
7073
7074 gimple_assign_set_rhs1 (stmt, op);
7075 if (integer_onep (val))
7076 gimple_assign_set_rhs_code (stmt, NEGATE_EXPR);
7077 else
7078 gimple_assign_set_rhs_code (stmt, SSA_NAME);
7079 update_stmt (stmt);
7080 return true;
7081 }
7082 }
7083
7084 return false;
7085 }
7086
7087 /* Optimize away redundant BIT_AND_EXPR and BIT_IOR_EXPR.
7088 If all the bits that are being cleared by & are already
7089 known to be zero from VR, or all the bits that are being
7090 set by | are already known to be one from VR, the bit
7091 operation is redundant. */
7092
7093 static bool
7094 simplify_bit_ops_using_ranges (gimple_stmt_iterator *gsi, gimple stmt)
7095 {
7096 tree op0 = gimple_assign_rhs1 (stmt);
7097 tree op1 = gimple_assign_rhs2 (stmt);
7098 tree op = NULL_TREE;
7099 value_range_t vr0 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
7100 value_range_t vr1 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
7101 double_int may_be_nonzero0, may_be_nonzero1;
7102 double_int must_be_nonzero0, must_be_nonzero1;
7103 double_int mask;
7104
7105 if (TREE_CODE (op0) == SSA_NAME)
7106 vr0 = *(get_value_range (op0));
7107 else if (is_gimple_min_invariant (op0))
7108 set_value_range_to_value (&vr0, op0, NULL);
7109 else
7110 return false;
7111
7112 if (TREE_CODE (op1) == SSA_NAME)
7113 vr1 = *(get_value_range (op1));
7114 else if (is_gimple_min_invariant (op1))
7115 set_value_range_to_value (&vr1, op1, NULL);
7116 else
7117 return false;
7118
7119 if (!zero_nonzero_bits_from_vr (&vr0, &may_be_nonzero0, &must_be_nonzero0))
7120 return false;
7121 if (!zero_nonzero_bits_from_vr (&vr1, &may_be_nonzero1, &must_be_nonzero1))
7122 return false;
7123
7124 switch (gimple_assign_rhs_code (stmt))
7125 {
7126 case BIT_AND_EXPR:
7127 mask = double_int_and_not (may_be_nonzero0, must_be_nonzero1);
7128 if (double_int_zero_p (mask))
7129 {
7130 op = op0;
7131 break;
7132 }
7133 mask = double_int_and_not (may_be_nonzero1, must_be_nonzero0);
7134 if (double_int_zero_p (mask))
7135 {
7136 op = op1;
7137 break;
7138 }
7139 break;
7140 case BIT_IOR_EXPR:
7141 mask = double_int_and_not (may_be_nonzero0, must_be_nonzero1);
7142 if (double_int_zero_p (mask))
7143 {
7144 op = op1;
7145 break;
7146 }
7147 mask = double_int_and_not (may_be_nonzero1, must_be_nonzero0);
7148 if (double_int_zero_p (mask))
7149 {
7150 op = op0;
7151 break;
7152 }
7153 break;
7154 default:
7155 gcc_unreachable ();
7156 }
7157
7158 if (op == NULL_TREE)
7159 return false;
7160
7161 gimple_assign_set_rhs_with_ops (gsi, TREE_CODE (op), op, NULL);
7162 update_stmt (gsi_stmt (*gsi));
7163 return true;
7164 }
7165
7166 /* We are comparing trees OP0 and OP1 using COND_CODE. OP0 has
7167 a known value range VR.
7168
7169 If there is one and only one value which will satisfy the
7170 conditional, then return that value. Else return NULL. */
7171
7172 static tree
7173 test_for_singularity (enum tree_code cond_code, tree op0,
7174 tree op1, value_range_t *vr)
7175 {
7176 tree min = NULL;
7177 tree max = NULL;
7178
7179 /* Extract minimum/maximum values which satisfy the
7180 the conditional as it was written. */
7181 if (cond_code == LE_EXPR || cond_code == LT_EXPR)
7182 {
7183 /* This should not be negative infinity; there is no overflow
7184 here. */
7185 min = TYPE_MIN_VALUE (TREE_TYPE (op0));
7186
7187 max = op1;
7188 if (cond_code == LT_EXPR && !is_overflow_infinity (max))
7189 {
7190 tree one = build_int_cst (TREE_TYPE (op0), 1);
7191 max = fold_build2 (MINUS_EXPR, TREE_TYPE (op0), max, one);
7192 if (EXPR_P (max))
7193 TREE_NO_WARNING (max) = 1;
7194 }
7195 }
7196 else if (cond_code == GE_EXPR || cond_code == GT_EXPR)
7197 {
7198 /* This should not be positive infinity; there is no overflow
7199 here. */
7200 max = TYPE_MAX_VALUE (TREE_TYPE (op0));
7201
7202 min = op1;
7203 if (cond_code == GT_EXPR && !is_overflow_infinity (min))
7204 {
7205 tree one = build_int_cst (TREE_TYPE (op0), 1);
7206 min = fold_build2 (PLUS_EXPR, TREE_TYPE (op0), min, one);
7207 if (EXPR_P (min))
7208 TREE_NO_WARNING (min) = 1;
7209 }
7210 }
7211
7212 /* Now refine the minimum and maximum values using any
7213 value range information we have for op0. */
7214 if (min && max)
7215 {
7216 if (compare_values (vr->min, min) == 1)
7217 min = vr->min;
7218 if (compare_values (vr->max, max) == -1)
7219 max = vr->max;
7220
7221 /* If the new min/max values have converged to a single value,
7222 then there is only one value which can satisfy the condition,
7223 return that value. */
7224 if (operand_equal_p (min, max, 0) && is_gimple_min_invariant (min))
7225 return min;
7226 }
7227 return NULL;
7228 }
7229
7230 /* Simplify a conditional using a relational operator to an equality
7231 test if the range information indicates only one value can satisfy
7232 the original conditional. */
7233
7234 static bool
7235 simplify_cond_using_ranges (gimple stmt)
7236 {
7237 tree op0 = gimple_cond_lhs (stmt);
7238 tree op1 = gimple_cond_rhs (stmt);
7239 enum tree_code cond_code = gimple_cond_code (stmt);
7240
7241 if (cond_code != NE_EXPR
7242 && cond_code != EQ_EXPR
7243 && TREE_CODE (op0) == SSA_NAME
7244 && INTEGRAL_TYPE_P (TREE_TYPE (op0))
7245 && is_gimple_min_invariant (op1))
7246 {
7247 value_range_t *vr = get_value_range (op0);
7248
7249 /* If we have range information for OP0, then we might be
7250 able to simplify this conditional. */
7251 if (vr->type == VR_RANGE)
7252 {
7253 tree new_tree = test_for_singularity (cond_code, op0, op1, vr);
7254
7255 if (new_tree)
7256 {
7257 if (dump_file)
7258 {
7259 fprintf (dump_file, "Simplified relational ");
7260 print_gimple_stmt (dump_file, stmt, 0, 0);
7261 fprintf (dump_file, " into ");
7262 }
7263
7264 gimple_cond_set_code (stmt, EQ_EXPR);
7265 gimple_cond_set_lhs (stmt, op0);
7266 gimple_cond_set_rhs (stmt, new_tree);
7267
7268 update_stmt (stmt);
7269
7270 if (dump_file)
7271 {
7272 print_gimple_stmt (dump_file, stmt, 0, 0);
7273 fprintf (dump_file, "\n");
7274 }
7275
7276 return true;
7277 }
7278
7279 /* Try again after inverting the condition. We only deal
7280 with integral types here, so no need to worry about
7281 issues with inverting FP comparisons. */
7282 cond_code = invert_tree_comparison (cond_code, false);
7283 new_tree = test_for_singularity (cond_code, op0, op1, vr);
7284
7285 if (new_tree)
7286 {
7287 if (dump_file)
7288 {
7289 fprintf (dump_file, "Simplified relational ");
7290 print_gimple_stmt (dump_file, stmt, 0, 0);
7291 fprintf (dump_file, " into ");
7292 }
7293
7294 gimple_cond_set_code (stmt, NE_EXPR);
7295 gimple_cond_set_lhs (stmt, op0);
7296 gimple_cond_set_rhs (stmt, new_tree);
7297
7298 update_stmt (stmt);
7299
7300 if (dump_file)
7301 {
7302 print_gimple_stmt (dump_file, stmt, 0, 0);
7303 fprintf (dump_file, "\n");
7304 }
7305
7306 return true;
7307 }
7308 }
7309 }
7310
7311 return false;
7312 }
7313
7314 /* Simplify a switch statement using the value range of the switch
7315 argument. */
7316
7317 static bool
7318 simplify_switch_using_ranges (gimple stmt)
7319 {
7320 tree op = gimple_switch_index (stmt);
7321 value_range_t *vr;
7322 bool take_default;
7323 edge e;
7324 edge_iterator ei;
7325 size_t i = 0, j = 0, n, n2;
7326 tree vec2;
7327 switch_update su;
7328
7329 if (TREE_CODE (op) == SSA_NAME)
7330 {
7331 vr = get_value_range (op);
7332
7333 /* We can only handle integer ranges. */
7334 if (vr->type != VR_RANGE
7335 || symbolic_range_p (vr))
7336 return false;
7337
7338 /* Find case label for min/max of the value range. */
7339 take_default = !find_case_label_range (stmt, vr->min, vr->max, &i, &j);
7340 }
7341 else if (TREE_CODE (op) == INTEGER_CST)
7342 {
7343 take_default = !find_case_label_index (stmt, 1, op, &i);
7344 if (take_default)
7345 {
7346 i = 1;
7347 j = 0;
7348 }
7349 else
7350 {
7351 j = i;
7352 }
7353 }
7354 else
7355 return false;
7356
7357 n = gimple_switch_num_labels (stmt);
7358
7359 /* Bail out if this is just all edges taken. */
7360 if (i == 1
7361 && j == n - 1
7362 && take_default)
7363 return false;
7364
7365 /* Build a new vector of taken case labels. */
7366 vec2 = make_tree_vec (j - i + 1 + (int)take_default);
7367 n2 = 0;
7368
7369 /* Add the default edge, if necessary. */
7370 if (take_default)
7371 TREE_VEC_ELT (vec2, n2++) = gimple_switch_default_label (stmt);
7372
7373 for (; i <= j; ++i, ++n2)
7374 TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, i);
7375
7376 /* Mark needed edges. */
7377 for (i = 0; i < n2; ++i)
7378 {
7379 e = find_edge (gimple_bb (stmt),
7380 label_to_block (CASE_LABEL (TREE_VEC_ELT (vec2, i))));
7381 e->aux = (void *)-1;
7382 }
7383
7384 /* Queue not needed edges for later removal. */
7385 FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
7386 {
7387 if (e->aux == (void *)-1)
7388 {
7389 e->aux = NULL;
7390 continue;
7391 }
7392
7393 if (dump_file && (dump_flags & TDF_DETAILS))
7394 {
7395 fprintf (dump_file, "removing unreachable case label\n");
7396 }
7397 VEC_safe_push (edge, heap, to_remove_edges, e);
7398 e->flags &= ~EDGE_EXECUTABLE;
7399 }
7400
7401 /* And queue an update for the stmt. */
7402 su.stmt = stmt;
7403 su.vec = vec2;
7404 VEC_safe_push (switch_update, heap, to_update_switch_stmts, &su);
7405 return false;
7406 }
7407
7408 /* Simplify an integral conversion from an SSA name in STMT. */
7409
7410 static bool
7411 simplify_conversion_using_ranges (gimple stmt)
7412 {
7413 tree innerop, middleop, finaltype;
7414 gimple def_stmt;
7415 value_range_t *innervr;
7416 bool inner_unsigned_p, middle_unsigned_p, final_unsigned_p;
7417 unsigned inner_prec, middle_prec, final_prec;
7418 double_int innermin, innermed, innermax, middlemin, middlemed, middlemax;
7419
7420 finaltype = TREE_TYPE (gimple_assign_lhs (stmt));
7421 if (!INTEGRAL_TYPE_P (finaltype))
7422 return false;
7423 middleop = gimple_assign_rhs1 (stmt);
7424 def_stmt = SSA_NAME_DEF_STMT (middleop);
7425 if (!is_gimple_assign (def_stmt)
7426 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)))
7427 return false;
7428 innerop = gimple_assign_rhs1 (def_stmt);
7429 if (TREE_CODE (innerop) != SSA_NAME)
7430 return false;
7431
7432 /* Get the value-range of the inner operand. */
7433 innervr = get_value_range (innerop);
7434 if (innervr->type != VR_RANGE
7435 || TREE_CODE (innervr->min) != INTEGER_CST
7436 || TREE_CODE (innervr->max) != INTEGER_CST)
7437 return false;
7438
7439 /* Simulate the conversion chain to check if the result is equal if
7440 the middle conversion is removed. */
7441 innermin = tree_to_double_int (innervr->min);
7442 innermax = tree_to_double_int (innervr->max);
7443
7444 inner_prec = TYPE_PRECISION (TREE_TYPE (innerop));
7445 middle_prec = TYPE_PRECISION (TREE_TYPE (middleop));
7446 final_prec = TYPE_PRECISION (finaltype);
7447
7448 /* If the first conversion is not injective, the second must not
7449 be widening. */
7450 if (double_int_cmp (double_int_sub (innermax, innermin),
7451 double_int_mask (middle_prec), true) > 0
7452 && middle_prec < final_prec)
7453 return false;
7454 /* We also want a medium value so that we can track the effect that
7455 narrowing conversions with sign change have. */
7456 inner_unsigned_p = TYPE_UNSIGNED (TREE_TYPE (innerop));
7457 if (inner_unsigned_p)
7458 innermed = double_int_rshift (double_int_mask (inner_prec),
7459 1, inner_prec, false);
7460 else
7461 innermed = double_int_zero;
7462 if (double_int_cmp (innermin, innermed, inner_unsigned_p) >= 0
7463 || double_int_cmp (innermed, innermax, inner_unsigned_p) >= 0)
7464 innermed = innermin;
7465
7466 middle_unsigned_p = TYPE_UNSIGNED (TREE_TYPE (middleop));
7467 middlemin = double_int_ext (innermin, middle_prec, middle_unsigned_p);
7468 middlemed = double_int_ext (innermed, middle_prec, middle_unsigned_p);
7469 middlemax = double_int_ext (innermax, middle_prec, middle_unsigned_p);
7470
7471 /* Require that the final conversion applied to both the original
7472 and the intermediate range produces the same result. */
7473 final_unsigned_p = TYPE_UNSIGNED (finaltype);
7474 if (!double_int_equal_p (double_int_ext (middlemin,
7475 final_prec, final_unsigned_p),
7476 double_int_ext (innermin,
7477 final_prec, final_unsigned_p))
7478 || !double_int_equal_p (double_int_ext (middlemed,
7479 final_prec, final_unsigned_p),
7480 double_int_ext (innermed,
7481 final_prec, final_unsigned_p))
7482 || !double_int_equal_p (double_int_ext (middlemax,
7483 final_prec, final_unsigned_p),
7484 double_int_ext (innermax,
7485 final_prec, final_unsigned_p)))
7486 return false;
7487
7488 gimple_assign_set_rhs1 (stmt, innerop);
7489 update_stmt (stmt);
7490 return true;
7491 }
7492
7493 /* Return whether the value range *VR fits in an integer type specified
7494 by PRECISION and UNSIGNED_P. */
7495
7496 static bool
7497 range_fits_type_p (value_range_t *vr, unsigned precision, bool unsigned_p)
7498 {
7499 tree src_type;
7500 unsigned src_precision;
7501 double_int tem;
7502
7503 /* We can only handle integral and pointer types. */
7504 src_type = TREE_TYPE (vr->min);
7505 if (!INTEGRAL_TYPE_P (src_type)
7506 && !POINTER_TYPE_P (src_type))
7507 return false;
7508
7509 /* An extension is always fine, so is an identity transform. */
7510 src_precision = TYPE_PRECISION (TREE_TYPE (vr->min));
7511 if (src_precision < precision
7512 || (src_precision == precision
7513 && TYPE_UNSIGNED (src_type) == unsigned_p))
7514 return true;
7515
7516 /* Now we can only handle ranges with constant bounds. */
7517 if (vr->type != VR_RANGE
7518 || TREE_CODE (vr->min) != INTEGER_CST
7519 || TREE_CODE (vr->max) != INTEGER_CST)
7520 return false;
7521
7522 /* For precision-preserving sign-changes the MSB of the double-int
7523 has to be clear. */
7524 if (src_precision == precision
7525 && (TREE_INT_CST_HIGH (vr->min) | TREE_INT_CST_HIGH (vr->max)) < 0)
7526 return false;
7527
7528 /* Then we can perform the conversion on both ends and compare
7529 the result for equality. */
7530 tem = double_int_ext (tree_to_double_int (vr->min), precision, unsigned_p);
7531 if (!double_int_equal_p (tree_to_double_int (vr->min), tem))
7532 return false;
7533 tem = double_int_ext (tree_to_double_int (vr->max), precision, unsigned_p);
7534 if (!double_int_equal_p (tree_to_double_int (vr->max), tem))
7535 return false;
7536
7537 return true;
7538 }
7539
7540 /* Simplify a conversion from integral SSA name to float in STMT. */
7541
7542 static bool
7543 simplify_float_conversion_using_ranges (gimple_stmt_iterator *gsi, gimple stmt)
7544 {
7545 tree rhs1 = gimple_assign_rhs1 (stmt);
7546 value_range_t *vr = get_value_range (rhs1);
7547 enum machine_mode fltmode = TYPE_MODE (TREE_TYPE (gimple_assign_lhs (stmt)));
7548 enum machine_mode mode;
7549 tree tem;
7550 gimple conv;
7551
7552 /* We can only handle constant ranges. */
7553 if (vr->type != VR_RANGE
7554 || TREE_CODE (vr->min) != INTEGER_CST
7555 || TREE_CODE (vr->max) != INTEGER_CST)
7556 return false;
7557
7558 /* First check if we can use a signed type in place of an unsigned. */
7559 if (TYPE_UNSIGNED (TREE_TYPE (rhs1))
7560 && (can_float_p (fltmode, TYPE_MODE (TREE_TYPE (rhs1)), 0)
7561 != CODE_FOR_nothing)
7562 && range_fits_type_p (vr, GET_MODE_PRECISION
7563 (TYPE_MODE (TREE_TYPE (rhs1))), 0))
7564 mode = TYPE_MODE (TREE_TYPE (rhs1));
7565 /* If we can do the conversion in the current input mode do nothing. */
7566 else if (can_float_p (fltmode, TYPE_MODE (TREE_TYPE (rhs1)),
7567 TYPE_UNSIGNED (TREE_TYPE (rhs1))))
7568 return false;
7569 /* Otherwise search for a mode we can use, starting from the narrowest
7570 integer mode available. */
7571 else
7572 {
7573 mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
7574 do
7575 {
7576 /* If we cannot do a signed conversion to float from mode
7577 or if the value-range does not fit in the signed type
7578 try with a wider mode. */
7579 if (can_float_p (fltmode, mode, 0) != CODE_FOR_nothing
7580 && range_fits_type_p (vr, GET_MODE_PRECISION (mode), 0))
7581 break;
7582
7583 mode = GET_MODE_WIDER_MODE (mode);
7584 /* But do not widen the input. Instead leave that to the
7585 optabs expansion code. */
7586 if (GET_MODE_PRECISION (mode) > TYPE_PRECISION (TREE_TYPE (rhs1)))
7587 return false;
7588 }
7589 while (mode != VOIDmode);
7590 if (mode == VOIDmode)
7591 return false;
7592 }
7593
7594 /* It works, insert a truncation or sign-change before the
7595 float conversion. */
7596 tem = create_tmp_var (build_nonstandard_integer_type
7597 (GET_MODE_PRECISION (mode), 0), NULL);
7598 conv = gimple_build_assign_with_ops (NOP_EXPR, tem, rhs1, NULL_TREE);
7599 tem = make_ssa_name (tem, conv);
7600 gimple_assign_set_lhs (conv, tem);
7601 gsi_insert_before (gsi, conv, GSI_SAME_STMT);
7602 gimple_assign_set_rhs1 (stmt, tem);
7603 update_stmt (stmt);
7604
7605 return true;
7606 }
7607
7608 /* Simplify STMT using ranges if possible. */
7609
7610 static bool
7611 simplify_stmt_using_ranges (gimple_stmt_iterator *gsi)
7612 {
7613 gimple stmt = gsi_stmt (*gsi);
7614 if (is_gimple_assign (stmt))
7615 {
7616 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
7617 tree rhs1 = gimple_assign_rhs1 (stmt);
7618
7619 switch (rhs_code)
7620 {
7621 case EQ_EXPR:
7622 case NE_EXPR:
7623 /* Transform EQ_EXPR, NE_EXPR into BIT_XOR_EXPR or identity
7624 if the RHS is zero or one, and the LHS are known to be boolean
7625 values. */
7626 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
7627 return simplify_truth_ops_using_ranges (gsi, stmt);
7628 break;
7629
7630 /* Transform TRUNC_DIV_EXPR and TRUNC_MOD_EXPR into RSHIFT_EXPR
7631 and BIT_AND_EXPR respectively if the first operand is greater
7632 than zero and the second operand is an exact power of two. */
7633 case TRUNC_DIV_EXPR:
7634 case TRUNC_MOD_EXPR:
7635 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
7636 && integer_pow2p (gimple_assign_rhs2 (stmt)))
7637 return simplify_div_or_mod_using_ranges (stmt);
7638 break;
7639
7640 /* Transform ABS (X) into X or -X as appropriate. */
7641 case ABS_EXPR:
7642 if (TREE_CODE (rhs1) == SSA_NAME
7643 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
7644 return simplify_abs_using_ranges (stmt);
7645 break;
7646
7647 case BIT_AND_EXPR:
7648 case BIT_IOR_EXPR:
7649 /* Optimize away BIT_AND_EXPR and BIT_IOR_EXPR
7650 if all the bits being cleared are already cleared or
7651 all the bits being set are already set. */
7652 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
7653 return simplify_bit_ops_using_ranges (gsi, stmt);
7654 break;
7655
7656 CASE_CONVERT:
7657 if (TREE_CODE (rhs1) == SSA_NAME
7658 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
7659 return simplify_conversion_using_ranges (stmt);
7660 break;
7661
7662 case FLOAT_EXPR:
7663 if (TREE_CODE (rhs1) == SSA_NAME
7664 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
7665 return simplify_float_conversion_using_ranges (gsi, stmt);
7666 break;
7667
7668 default:
7669 break;
7670 }
7671 }
7672 else if (gimple_code (stmt) == GIMPLE_COND)
7673 return simplify_cond_using_ranges (stmt);
7674 else if (gimple_code (stmt) == GIMPLE_SWITCH)
7675 return simplify_switch_using_ranges (stmt);
7676
7677 return false;
7678 }
7679
7680 /* If the statement pointed by SI has a predicate whose value can be
7681 computed using the value range information computed by VRP, compute
7682 its value and return true. Otherwise, return false. */
7683
7684 static bool
7685 fold_predicate_in (gimple_stmt_iterator *si)
7686 {
7687 bool assignment_p = false;
7688 tree val;
7689 gimple stmt = gsi_stmt (*si);
7690
7691 if (is_gimple_assign (stmt)
7692 && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison)
7693 {
7694 assignment_p = true;
7695 val = vrp_evaluate_conditional (gimple_assign_rhs_code (stmt),
7696 gimple_assign_rhs1 (stmt),
7697 gimple_assign_rhs2 (stmt),
7698 stmt);
7699 }
7700 else if (gimple_code (stmt) == GIMPLE_COND)
7701 val = vrp_evaluate_conditional (gimple_cond_code (stmt),
7702 gimple_cond_lhs (stmt),
7703 gimple_cond_rhs (stmt),
7704 stmt);
7705 else
7706 return false;
7707
7708 if (val)
7709 {
7710 if (assignment_p)
7711 val = fold_convert (gimple_expr_type (stmt), val);
7712
7713 if (dump_file)
7714 {
7715 fprintf (dump_file, "Folding predicate ");
7716 print_gimple_expr (dump_file, stmt, 0, 0);
7717 fprintf (dump_file, " to ");
7718 print_generic_expr (dump_file, val, 0);
7719 fprintf (dump_file, "\n");
7720 }
7721
7722 if (is_gimple_assign (stmt))
7723 gimple_assign_set_rhs_from_tree (si, val);
7724 else
7725 {
7726 gcc_assert (gimple_code (stmt) == GIMPLE_COND);
7727 if (integer_zerop (val))
7728 gimple_cond_make_false (stmt);
7729 else if (integer_onep (val))
7730 gimple_cond_make_true (stmt);
7731 else
7732 gcc_unreachable ();
7733 }
7734
7735 return true;
7736 }
7737
7738 return false;
7739 }
7740
7741 /* Callback for substitute_and_fold folding the stmt at *SI. */
7742
7743 static bool
7744 vrp_fold_stmt (gimple_stmt_iterator *si)
7745 {
7746 if (fold_predicate_in (si))
7747 return true;
7748
7749 return simplify_stmt_using_ranges (si);
7750 }
7751
7752 /* Stack of dest,src equivalency pairs that need to be restored after
7753 each attempt to thread a block's incoming edge to an outgoing edge.
7754
7755 A NULL entry is used to mark the end of pairs which need to be
7756 restored. */
7757 static VEC(tree,heap) *stack;
7758
7759 /* A trivial wrapper so that we can present the generic jump threading
7760 code with a simple API for simplifying statements. STMT is the
7761 statement we want to simplify, WITHIN_STMT provides the location
7762 for any overflow warnings. */
7763
7764 static tree
7765 simplify_stmt_for_jump_threading (gimple stmt, gimple within_stmt)
7766 {
7767 /* We only use VRP information to simplify conditionals. This is
7768 overly conservative, but it's unclear if doing more would be
7769 worth the compile time cost. */
7770 if (gimple_code (stmt) != GIMPLE_COND)
7771 return NULL;
7772
7773 return vrp_evaluate_conditional (gimple_cond_code (stmt),
7774 gimple_cond_lhs (stmt),
7775 gimple_cond_rhs (stmt), within_stmt);
7776 }
7777
7778 /* Blocks which have more than one predecessor and more than
7779 one successor present jump threading opportunities, i.e.,
7780 when the block is reached from a specific predecessor, we
7781 may be able to determine which of the outgoing edges will
7782 be traversed. When this optimization applies, we are able
7783 to avoid conditionals at runtime and we may expose secondary
7784 optimization opportunities.
7785
7786 This routine is effectively a driver for the generic jump
7787 threading code. It basically just presents the generic code
7788 with edges that may be suitable for jump threading.
7789
7790 Unlike DOM, we do not iterate VRP if jump threading was successful.
7791 While iterating may expose new opportunities for VRP, it is expected
7792 those opportunities would be very limited and the compile time cost
7793 to expose those opportunities would be significant.
7794
7795 As jump threading opportunities are discovered, they are registered
7796 for later realization. */
7797
7798 static void
7799 identify_jump_threads (void)
7800 {
7801 basic_block bb;
7802 gimple dummy;
7803 int i;
7804 edge e;
7805
7806 /* Ugh. When substituting values earlier in this pass we can
7807 wipe the dominance information. So rebuild the dominator
7808 information as we need it within the jump threading code. */
7809 calculate_dominance_info (CDI_DOMINATORS);
7810
7811 /* We do not allow VRP information to be used for jump threading
7812 across a back edge in the CFG. Otherwise it becomes too
7813 difficult to avoid eliminating loop exit tests. Of course
7814 EDGE_DFS_BACK is not accurate at this time so we have to
7815 recompute it. */
7816 mark_dfs_back_edges ();
7817
7818 /* Do not thread across edges we are about to remove. Just marking
7819 them as EDGE_DFS_BACK will do. */
7820 FOR_EACH_VEC_ELT (edge, to_remove_edges, i, e)
7821 e->flags |= EDGE_DFS_BACK;
7822
7823 /* Allocate our unwinder stack to unwind any temporary equivalences
7824 that might be recorded. */
7825 stack = VEC_alloc (tree, heap, 20);
7826
7827 /* To avoid lots of silly node creation, we create a single
7828 conditional and just modify it in-place when attempting to
7829 thread jumps. */
7830 dummy = gimple_build_cond (EQ_EXPR,
7831 integer_zero_node, integer_zero_node,
7832 NULL, NULL);
7833
7834 /* Walk through all the blocks finding those which present a
7835 potential jump threading opportunity. We could set this up
7836 as a dominator walker and record data during the walk, but
7837 I doubt it's worth the effort for the classes of jump
7838 threading opportunities we are trying to identify at this
7839 point in compilation. */
7840 FOR_EACH_BB (bb)
7841 {
7842 gimple last;
7843
7844 /* If the generic jump threading code does not find this block
7845 interesting, then there is nothing to do. */
7846 if (! potentially_threadable_block (bb))
7847 continue;
7848
7849 /* We only care about blocks ending in a COND_EXPR. While there
7850 may be some value in handling SWITCH_EXPR here, I doubt it's
7851 terribly important. */
7852 last = gsi_stmt (gsi_last_bb (bb));
7853
7854 /* We're basically looking for a switch or any kind of conditional with
7855 integral or pointer type arguments. Note the type of the second
7856 argument will be the same as the first argument, so no need to
7857 check it explicitly. */
7858 if (gimple_code (last) == GIMPLE_SWITCH
7859 || (gimple_code (last) == GIMPLE_COND
7860 && TREE_CODE (gimple_cond_lhs (last)) == SSA_NAME
7861 && (INTEGRAL_TYPE_P (TREE_TYPE (gimple_cond_lhs (last)))
7862 || POINTER_TYPE_P (TREE_TYPE (gimple_cond_lhs (last))))
7863 && (TREE_CODE (gimple_cond_rhs (last)) == SSA_NAME
7864 || is_gimple_min_invariant (gimple_cond_rhs (last)))))
7865 {
7866 edge_iterator ei;
7867
7868 /* We've got a block with multiple predecessors and multiple
7869 successors which also ends in a suitable conditional or
7870 switch statement. For each predecessor, see if we can thread
7871 it to a specific successor. */
7872 FOR_EACH_EDGE (e, ei, bb->preds)
7873 {
7874 /* Do not thread across back edges or abnormal edges
7875 in the CFG. */
7876 if (e->flags & (EDGE_DFS_BACK | EDGE_COMPLEX))
7877 continue;
7878
7879 thread_across_edge (dummy, e, true, &stack,
7880 simplify_stmt_for_jump_threading);
7881 }
7882 }
7883 }
7884
7885 /* We do not actually update the CFG or SSA graphs at this point as
7886 ASSERT_EXPRs are still in the IL and cfg cleanup code does not yet
7887 handle ASSERT_EXPRs gracefully. */
7888 }
7889
7890 /* We identified all the jump threading opportunities earlier, but could
7891 not transform the CFG at that time. This routine transforms the
7892 CFG and arranges for the dominator tree to be rebuilt if necessary.
7893
7894 Note the SSA graph update will occur during the normal TODO
7895 processing by the pass manager. */
7896 static void
7897 finalize_jump_threads (void)
7898 {
7899 thread_through_all_blocks (false);
7900 VEC_free (tree, heap, stack);
7901 }
7902
7903
7904 /* Traverse all the blocks folding conditionals with known ranges. */
7905
7906 static void
7907 vrp_finalize (void)
7908 {
7909 size_t i;
7910
7911 values_propagated = true;
7912
7913 if (dump_file)
7914 {
7915 fprintf (dump_file, "\nValue ranges after VRP:\n\n");
7916 dump_all_value_ranges (dump_file);
7917 fprintf (dump_file, "\n");
7918 }
7919
7920 substitute_and_fold (op_with_constant_singleton_value_range,
7921 vrp_fold_stmt, false);
7922
7923 if (warn_array_bounds)
7924 check_all_array_refs ();
7925
7926 /* We must identify jump threading opportunities before we release
7927 the datastructures built by VRP. */
7928 identify_jump_threads ();
7929
7930 /* Free allocated memory. */
7931 for (i = 0; i < num_vr_values; i++)
7932 if (vr_value[i])
7933 {
7934 BITMAP_FREE (vr_value[i]->equiv);
7935 free (vr_value[i]);
7936 }
7937
7938 free (vr_value);
7939 free (vr_phi_edge_counts);
7940
7941 /* So that we can distinguish between VRP data being available
7942 and not available. */
7943 vr_value = NULL;
7944 vr_phi_edge_counts = NULL;
7945 }
7946
7947
7948 /* Main entry point to VRP (Value Range Propagation). This pass is
7949 loosely based on J. R. C. Patterson, ``Accurate Static Branch
7950 Prediction by Value Range Propagation,'' in SIGPLAN Conference on
7951 Programming Language Design and Implementation, pp. 67-78, 1995.
7952 Also available at http://citeseer.ist.psu.edu/patterson95accurate.html
7953
7954 This is essentially an SSA-CCP pass modified to deal with ranges
7955 instead of constants.
7956
7957 While propagating ranges, we may find that two or more SSA name
7958 have equivalent, though distinct ranges. For instance,
7959
7960 1 x_9 = p_3->a;
7961 2 p_4 = ASSERT_EXPR <p_3, p_3 != 0>
7962 3 if (p_4 == q_2)
7963 4 p_5 = ASSERT_EXPR <p_4, p_4 == q_2>;
7964 5 endif
7965 6 if (q_2)
7966
7967 In the code above, pointer p_5 has range [q_2, q_2], but from the
7968 code we can also determine that p_5 cannot be NULL and, if q_2 had
7969 a non-varying range, p_5's range should also be compatible with it.
7970
7971 These equivalences are created by two expressions: ASSERT_EXPR and
7972 copy operations. Since p_5 is an assertion on p_4, and p_4 was the
7973 result of another assertion, then we can use the fact that p_5 and
7974 p_4 are equivalent when evaluating p_5's range.
7975
7976 Together with value ranges, we also propagate these equivalences
7977 between names so that we can take advantage of information from
7978 multiple ranges when doing final replacement. Note that this
7979 equivalency relation is transitive but not symmetric.
7980
7981 In the example above, p_5 is equivalent to p_4, q_2 and p_3, but we
7982 cannot assert that q_2 is equivalent to p_5 because q_2 may be used
7983 in contexts where that assertion does not hold (e.g., in line 6).
7984
7985 TODO, the main difference between this pass and Patterson's is that
7986 we do not propagate edge probabilities. We only compute whether
7987 edges can be taken or not. That is, instead of having a spectrum
7988 of jump probabilities between 0 and 1, we only deal with 0, 1 and
7989 DON'T KNOW. In the future, it may be worthwhile to propagate
7990 probabilities to aid branch prediction. */
7991
7992 static unsigned int
7993 execute_vrp (void)
7994 {
7995 int i;
7996 edge e;
7997 switch_update *su;
7998
7999 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
8000 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
8001 scev_initialize ();
8002
8003 insert_range_assertions ();
8004
8005 /* Estimate number of iterations - but do not use undefined behavior
8006 for this. We can't do this lazily as other functions may compute
8007 this using undefined behavior. */
8008 free_numbers_of_iterations_estimates ();
8009 estimate_numbers_of_iterations (false);
8010
8011 to_remove_edges = VEC_alloc (edge, heap, 10);
8012 to_update_switch_stmts = VEC_alloc (switch_update, heap, 5);
8013 threadedge_initialize_values ();
8014
8015 vrp_initialize ();
8016 ssa_propagate (vrp_visit_stmt, vrp_visit_phi_node);
8017 vrp_finalize ();
8018
8019 free_numbers_of_iterations_estimates ();
8020
8021 /* ASSERT_EXPRs must be removed before finalizing jump threads
8022 as finalizing jump threads calls the CFG cleanup code which
8023 does not properly handle ASSERT_EXPRs. */
8024 remove_range_assertions ();
8025
8026 /* If we exposed any new variables, go ahead and put them into
8027 SSA form now, before we handle jump threading. This simplifies
8028 interactions between rewriting of _DECL nodes into SSA form
8029 and rewriting SSA_NAME nodes into SSA form after block
8030 duplication and CFG manipulation. */
8031 update_ssa (TODO_update_ssa);
8032
8033 finalize_jump_threads ();
8034
8035 /* Remove dead edges from SWITCH_EXPR optimization. This leaves the
8036 CFG in a broken state and requires a cfg_cleanup run. */
8037 FOR_EACH_VEC_ELT (edge, to_remove_edges, i, e)
8038 remove_edge (e);
8039 /* Update SWITCH_EXPR case label vector. */
8040 FOR_EACH_VEC_ELT (switch_update, to_update_switch_stmts, i, su)
8041 {
8042 size_t j;
8043 size_t n = TREE_VEC_LENGTH (su->vec);
8044 tree label;
8045 gimple_switch_set_num_labels (su->stmt, n);
8046 for (j = 0; j < n; j++)
8047 gimple_switch_set_label (su->stmt, j, TREE_VEC_ELT (su->vec, j));
8048 /* As we may have replaced the default label with a regular one
8049 make sure to make it a real default label again. This ensures
8050 optimal expansion. */
8051 label = gimple_switch_default_label (su->stmt);
8052 CASE_LOW (label) = NULL_TREE;
8053 CASE_HIGH (label) = NULL_TREE;
8054 }
8055
8056 if (VEC_length (edge, to_remove_edges) > 0)
8057 free_dominance_info (CDI_DOMINATORS);
8058
8059 VEC_free (edge, heap, to_remove_edges);
8060 VEC_free (switch_update, heap, to_update_switch_stmts);
8061 threadedge_finalize_values ();
8062
8063 scev_finalize ();
8064 loop_optimizer_finalize ();
8065 return 0;
8066 }
8067
8068 static bool
8069 gate_vrp (void)
8070 {
8071 return flag_tree_vrp != 0;
8072 }
8073
8074 struct gimple_opt_pass pass_vrp =
8075 {
8076 {
8077 GIMPLE_PASS,
8078 "vrp", /* name */
8079 gate_vrp, /* gate */
8080 execute_vrp, /* execute */
8081 NULL, /* sub */
8082 NULL, /* next */
8083 0, /* static_pass_number */
8084 TV_TREE_VRP, /* tv_id */
8085 PROP_ssa, /* properties_required */
8086 0, /* properties_provided */
8087 0, /* properties_destroyed */
8088 0, /* todo_flags_start */
8089 TODO_cleanup_cfg
8090 | TODO_update_ssa
8091 | TODO_verify_ssa
8092 | TODO_verify_flow
8093 | TODO_ggc_collect /* todo_flags_finish */
8094 }
8095 };