d3922db038a84d0e161a729ec96bd56c754d8b86
[gcc.git] / gcc / tree-vrp.c
1 /* Support routines for Value Range Propagation (VRP).
2 Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
3 Free Software Foundation, Inc.
4 Contributed by Diego Novillo <dnovillo@redhat.com>.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
12
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "ggc.h"
27 #include "flags.h"
28 #include "tree.h"
29 #include "basic-block.h"
30 #include "tree-flow.h"
31 #include "tree-pass.h"
32 #include "tree-dump.h"
33 #include "timevar.h"
34 #include "tree-pretty-print.h"
35 #include "gimple-pretty-print.h"
36 #include "diagnostic-core.h"
37 #include "intl.h"
38 #include "cfgloop.h"
39 #include "tree-scalar-evolution.h"
40 #include "tree-ssa-propagate.h"
41 #include "tree-chrec.h"
42 #include "gimple-fold.h"
43 #include "expr.h"
44 #include "optabs.h"
45
46
47 /* Type of value ranges. See value_range_d for a description of these
48 types. */
49 enum value_range_type { VR_UNDEFINED, VR_RANGE, VR_ANTI_RANGE, VR_VARYING };
50
51 /* Range of values that can be associated with an SSA_NAME after VRP
52 has executed. */
53 struct value_range_d
54 {
55 /* Lattice value represented by this range. */
56 enum value_range_type type;
57
58 /* Minimum and maximum values represented by this range. These
59 values should be interpreted as follows:
60
61 - If TYPE is VR_UNDEFINED or VR_VARYING then MIN and MAX must
62 be NULL.
63
64 - If TYPE == VR_RANGE then MIN holds the minimum value and
65 MAX holds the maximum value of the range [MIN, MAX].
66
67 - If TYPE == ANTI_RANGE the variable is known to NOT
68 take any values in the range [MIN, MAX]. */
69 tree min;
70 tree max;
71
72 /* Set of SSA names whose value ranges are equivalent to this one.
73 This set is only valid when TYPE is VR_RANGE or VR_ANTI_RANGE. */
74 bitmap equiv;
75 };
76
77 typedef struct value_range_d value_range_t;
78
79 /* Set of SSA names found live during the RPO traversal of the function
80 for still active basic-blocks. */
81 static sbitmap *live;
82
83 /* Return true if the SSA name NAME is live on the edge E. */
84
85 static bool
86 live_on_edge (edge e, tree name)
87 {
88 return (live[e->dest->index]
89 && TEST_BIT (live[e->dest->index], SSA_NAME_VERSION (name)));
90 }
91
92 /* Local functions. */
93 static int compare_values (tree val1, tree val2);
94 static int compare_values_warnv (tree val1, tree val2, bool *);
95 static void vrp_meet (value_range_t *, value_range_t *);
96 static tree vrp_evaluate_conditional_warnv_with_ops (enum tree_code,
97 tree, tree, bool, bool *,
98 bool *);
99
100 /* Location information for ASSERT_EXPRs. Each instance of this
101 structure describes an ASSERT_EXPR for an SSA name. Since a single
102 SSA name may have more than one assertion associated with it, these
103 locations are kept in a linked list attached to the corresponding
104 SSA name. */
105 struct assert_locus_d
106 {
107 /* Basic block where the assertion would be inserted. */
108 basic_block bb;
109
110 /* Some assertions need to be inserted on an edge (e.g., assertions
111 generated by COND_EXPRs). In those cases, BB will be NULL. */
112 edge e;
113
114 /* Pointer to the statement that generated this assertion. */
115 gimple_stmt_iterator si;
116
117 /* Predicate code for the ASSERT_EXPR. Must be COMPARISON_CLASS_P. */
118 enum tree_code comp_code;
119
120 /* Value being compared against. */
121 tree val;
122
123 /* Expression to compare. */
124 tree expr;
125
126 /* Next node in the linked list. */
127 struct assert_locus_d *next;
128 };
129
130 typedef struct assert_locus_d *assert_locus_t;
131
132 /* If bit I is present, it means that SSA name N_i has a list of
133 assertions that should be inserted in the IL. */
134 static bitmap need_assert_for;
135
136 /* Array of locations lists where to insert assertions. ASSERTS_FOR[I]
137 holds a list of ASSERT_LOCUS_T nodes that describe where
138 ASSERT_EXPRs for SSA name N_I should be inserted. */
139 static assert_locus_t *asserts_for;
140
141 /* Value range array. After propagation, VR_VALUE[I] holds the range
142 of values that SSA name N_I may take. */
143 static unsigned num_vr_values;
144 static value_range_t **vr_value;
145 static bool values_propagated;
146
147 /* For a PHI node which sets SSA name N_I, VR_COUNTS[I] holds the
148 number of executable edges we saw the last time we visited the
149 node. */
150 static int *vr_phi_edge_counts;
151
152 typedef struct {
153 gimple stmt;
154 tree vec;
155 } switch_update;
156
157 static VEC (edge, heap) *to_remove_edges;
158 DEF_VEC_O(switch_update);
159 DEF_VEC_ALLOC_O(switch_update, heap);
160 static VEC (switch_update, heap) *to_update_switch_stmts;
161
162
163 /* Return the maximum value for TYPE. */
164
165 static inline tree
166 vrp_val_max (const_tree type)
167 {
168 if (!INTEGRAL_TYPE_P (type))
169 return NULL_TREE;
170
171 return TYPE_MAX_VALUE (type);
172 }
173
174 /* Return the minimum value for TYPE. */
175
176 static inline tree
177 vrp_val_min (const_tree type)
178 {
179 if (!INTEGRAL_TYPE_P (type))
180 return NULL_TREE;
181
182 return TYPE_MIN_VALUE (type);
183 }
184
185 /* Return whether VAL is equal to the maximum value of its type. This
186 will be true for a positive overflow infinity. We can't do a
187 simple equality comparison with TYPE_MAX_VALUE because C typedefs
188 and Ada subtypes can produce types whose TYPE_MAX_VALUE is not ==
189 to the integer constant with the same value in the type. */
190
191 static inline bool
192 vrp_val_is_max (const_tree val)
193 {
194 tree type_max = vrp_val_max (TREE_TYPE (val));
195 return (val == type_max
196 || (type_max != NULL_TREE
197 && operand_equal_p (val, type_max, 0)));
198 }
199
200 /* Return whether VAL is equal to the minimum value of its type. This
201 will be true for a negative overflow infinity. */
202
203 static inline bool
204 vrp_val_is_min (const_tree val)
205 {
206 tree type_min = vrp_val_min (TREE_TYPE (val));
207 return (val == type_min
208 || (type_min != NULL_TREE
209 && operand_equal_p (val, type_min, 0)));
210 }
211
212
213 /* Return whether TYPE should use an overflow infinity distinct from
214 TYPE_{MIN,MAX}_VALUE. We use an overflow infinity value to
215 represent a signed overflow during VRP computations. An infinity
216 is distinct from a half-range, which will go from some number to
217 TYPE_{MIN,MAX}_VALUE. */
218
219 static inline bool
220 needs_overflow_infinity (const_tree type)
221 {
222 return INTEGRAL_TYPE_P (type) && !TYPE_OVERFLOW_WRAPS (type);
223 }
224
225 /* Return whether TYPE can support our overflow infinity
226 representation: we use the TREE_OVERFLOW flag, which only exists
227 for constants. If TYPE doesn't support this, we don't optimize
228 cases which would require signed overflow--we drop them to
229 VARYING. */
230
231 static inline bool
232 supports_overflow_infinity (const_tree type)
233 {
234 tree min = vrp_val_min (type), max = vrp_val_max (type);
235 #ifdef ENABLE_CHECKING
236 gcc_assert (needs_overflow_infinity (type));
237 #endif
238 return (min != NULL_TREE
239 && CONSTANT_CLASS_P (min)
240 && max != NULL_TREE
241 && CONSTANT_CLASS_P (max));
242 }
243
244 /* VAL is the maximum or minimum value of a type. Return a
245 corresponding overflow infinity. */
246
247 static inline tree
248 make_overflow_infinity (tree val)
249 {
250 gcc_checking_assert (val != NULL_TREE && CONSTANT_CLASS_P (val));
251 val = copy_node (val);
252 TREE_OVERFLOW (val) = 1;
253 return val;
254 }
255
256 /* Return a negative overflow infinity for TYPE. */
257
258 static inline tree
259 negative_overflow_infinity (tree type)
260 {
261 gcc_checking_assert (supports_overflow_infinity (type));
262 return make_overflow_infinity (vrp_val_min (type));
263 }
264
265 /* Return a positive overflow infinity for TYPE. */
266
267 static inline tree
268 positive_overflow_infinity (tree type)
269 {
270 gcc_checking_assert (supports_overflow_infinity (type));
271 return make_overflow_infinity (vrp_val_max (type));
272 }
273
274 /* Return whether VAL is a negative overflow infinity. */
275
276 static inline bool
277 is_negative_overflow_infinity (const_tree val)
278 {
279 return (needs_overflow_infinity (TREE_TYPE (val))
280 && CONSTANT_CLASS_P (val)
281 && TREE_OVERFLOW (val)
282 && vrp_val_is_min (val));
283 }
284
285 /* Return whether VAL is a positive overflow infinity. */
286
287 static inline bool
288 is_positive_overflow_infinity (const_tree val)
289 {
290 return (needs_overflow_infinity (TREE_TYPE (val))
291 && CONSTANT_CLASS_P (val)
292 && TREE_OVERFLOW (val)
293 && vrp_val_is_max (val));
294 }
295
296 /* Return whether VAL is a positive or negative overflow infinity. */
297
298 static inline bool
299 is_overflow_infinity (const_tree val)
300 {
301 return (needs_overflow_infinity (TREE_TYPE (val))
302 && CONSTANT_CLASS_P (val)
303 && TREE_OVERFLOW (val)
304 && (vrp_val_is_min (val) || vrp_val_is_max (val)));
305 }
306
307 /* Return whether STMT has a constant rhs that is_overflow_infinity. */
308
309 static inline bool
310 stmt_overflow_infinity (gimple stmt)
311 {
312 if (is_gimple_assign (stmt)
313 && get_gimple_rhs_class (gimple_assign_rhs_code (stmt)) ==
314 GIMPLE_SINGLE_RHS)
315 return is_overflow_infinity (gimple_assign_rhs1 (stmt));
316 return false;
317 }
318
319 /* If VAL is now an overflow infinity, return VAL. Otherwise, return
320 the same value with TREE_OVERFLOW clear. This can be used to avoid
321 confusing a regular value with an overflow value. */
322
323 static inline tree
324 avoid_overflow_infinity (tree val)
325 {
326 if (!is_overflow_infinity (val))
327 return val;
328
329 if (vrp_val_is_max (val))
330 return vrp_val_max (TREE_TYPE (val));
331 else
332 {
333 gcc_checking_assert (vrp_val_is_min (val));
334 return vrp_val_min (TREE_TYPE (val));
335 }
336 }
337
338
339 /* Return true if ARG is marked with the nonnull attribute in the
340 current function signature. */
341
342 static bool
343 nonnull_arg_p (const_tree arg)
344 {
345 tree t, attrs, fntype;
346 unsigned HOST_WIDE_INT arg_num;
347
348 gcc_assert (TREE_CODE (arg) == PARM_DECL && POINTER_TYPE_P (TREE_TYPE (arg)));
349
350 /* The static chain decl is always non null. */
351 if (arg == cfun->static_chain_decl)
352 return true;
353
354 fntype = TREE_TYPE (current_function_decl);
355 attrs = lookup_attribute ("nonnull", TYPE_ATTRIBUTES (fntype));
356
357 /* If "nonnull" wasn't specified, we know nothing about the argument. */
358 if (attrs == NULL_TREE)
359 return false;
360
361 /* If "nonnull" applies to all the arguments, then ARG is non-null. */
362 if (TREE_VALUE (attrs) == NULL_TREE)
363 return true;
364
365 /* Get the position number for ARG in the function signature. */
366 for (arg_num = 1, t = DECL_ARGUMENTS (current_function_decl);
367 t;
368 t = DECL_CHAIN (t), arg_num++)
369 {
370 if (t == arg)
371 break;
372 }
373
374 gcc_assert (t == arg);
375
376 /* Now see if ARG_NUM is mentioned in the nonnull list. */
377 for (t = TREE_VALUE (attrs); t; t = TREE_CHAIN (t))
378 {
379 if (compare_tree_int (TREE_VALUE (t), arg_num) == 0)
380 return true;
381 }
382
383 return false;
384 }
385
386
387 /* Set value range VR to VR_VARYING. */
388
389 static inline void
390 set_value_range_to_varying (value_range_t *vr)
391 {
392 vr->type = VR_VARYING;
393 vr->min = vr->max = NULL_TREE;
394 if (vr->equiv)
395 bitmap_clear (vr->equiv);
396 }
397
398
399 /* Set value range VR to {T, MIN, MAX, EQUIV}. */
400
401 static void
402 set_value_range (value_range_t *vr, enum value_range_type t, tree min,
403 tree max, bitmap equiv)
404 {
405 #if defined ENABLE_CHECKING
406 /* Check the validity of the range. */
407 if (t == VR_RANGE || t == VR_ANTI_RANGE)
408 {
409 int cmp;
410
411 gcc_assert (min && max);
412
413 if (INTEGRAL_TYPE_P (TREE_TYPE (min)) && t == VR_ANTI_RANGE)
414 gcc_assert (!vrp_val_is_min (min) || !vrp_val_is_max (max));
415
416 cmp = compare_values (min, max);
417 gcc_assert (cmp == 0 || cmp == -1 || cmp == -2);
418
419 if (needs_overflow_infinity (TREE_TYPE (min)))
420 gcc_assert (!is_overflow_infinity (min)
421 || !is_overflow_infinity (max));
422 }
423
424 if (t == VR_UNDEFINED || t == VR_VARYING)
425 gcc_assert (min == NULL_TREE && max == NULL_TREE);
426
427 if (t == VR_UNDEFINED || t == VR_VARYING)
428 gcc_assert (equiv == NULL || bitmap_empty_p (equiv));
429 #endif
430
431 vr->type = t;
432 vr->min = min;
433 vr->max = max;
434
435 /* Since updating the equivalence set involves deep copying the
436 bitmaps, only do it if absolutely necessary. */
437 if (vr->equiv == NULL
438 && equiv != NULL)
439 vr->equiv = BITMAP_ALLOC (NULL);
440
441 if (equiv != vr->equiv)
442 {
443 if (equiv && !bitmap_empty_p (equiv))
444 bitmap_copy (vr->equiv, equiv);
445 else
446 bitmap_clear (vr->equiv);
447 }
448 }
449
450
451 /* Set value range VR to the canonical form of {T, MIN, MAX, EQUIV}.
452 This means adjusting T, MIN and MAX representing the case of a
453 wrapping range with MAX < MIN covering [MIN, type_max] U [type_min, MAX]
454 as anti-rage ~[MAX+1, MIN-1]. Likewise for wrapping anti-ranges.
455 In corner cases where MAX+1 or MIN-1 wraps this will fall back
456 to varying.
457 This routine exists to ease canonicalization in the case where we
458 extract ranges from var + CST op limit. */
459
460 static void
461 set_and_canonicalize_value_range (value_range_t *vr, enum value_range_type t,
462 tree min, tree max, bitmap equiv)
463 {
464 /* Nothing to canonicalize for symbolic or unknown or varying ranges. */
465 if ((t != VR_RANGE
466 && t != VR_ANTI_RANGE)
467 || TREE_CODE (min) != INTEGER_CST
468 || TREE_CODE (max) != INTEGER_CST)
469 {
470 set_value_range (vr, t, min, max, equiv);
471 return;
472 }
473
474 /* Wrong order for min and max, to swap them and the VR type we need
475 to adjust them. */
476 if (tree_int_cst_lt (max, min))
477 {
478 tree one = build_int_cst (TREE_TYPE (min), 1);
479 tree tmp = int_const_binop (PLUS_EXPR, max, one);
480 max = int_const_binop (MINUS_EXPR, min, one);
481 min = tmp;
482
483 /* There's one corner case, if we had [C+1, C] before we now have
484 that again. But this represents an empty value range, so drop
485 to varying in this case. */
486 if (tree_int_cst_lt (max, min))
487 {
488 set_value_range_to_varying (vr);
489 return;
490 }
491
492 t = t == VR_RANGE ? VR_ANTI_RANGE : VR_RANGE;
493 }
494
495 /* Anti-ranges that can be represented as ranges should be so. */
496 if (t == VR_ANTI_RANGE)
497 {
498 bool is_min = vrp_val_is_min (min);
499 bool is_max = vrp_val_is_max (max);
500
501 if (is_min && is_max)
502 {
503 /* We cannot deal with empty ranges, drop to varying. */
504 set_value_range_to_varying (vr);
505 return;
506 }
507 else if (is_min
508 /* As a special exception preserve non-null ranges. */
509 && !(TYPE_UNSIGNED (TREE_TYPE (min))
510 && integer_zerop (max)))
511 {
512 tree one = build_int_cst (TREE_TYPE (max), 1);
513 min = int_const_binop (PLUS_EXPR, max, one);
514 max = vrp_val_max (TREE_TYPE (max));
515 t = VR_RANGE;
516 }
517 else if (is_max)
518 {
519 tree one = build_int_cst (TREE_TYPE (min), 1);
520 max = int_const_binop (MINUS_EXPR, min, one);
521 min = vrp_val_min (TREE_TYPE (min));
522 t = VR_RANGE;
523 }
524 }
525
526 set_value_range (vr, t, min, max, equiv);
527 }
528
529 /* Copy value range FROM into value range TO. */
530
531 static inline void
532 copy_value_range (value_range_t *to, value_range_t *from)
533 {
534 set_value_range (to, from->type, from->min, from->max, from->equiv);
535 }
536
537 /* Set value range VR to a single value. This function is only called
538 with values we get from statements, and exists to clear the
539 TREE_OVERFLOW flag so that we don't think we have an overflow
540 infinity when we shouldn't. */
541
542 static inline void
543 set_value_range_to_value (value_range_t *vr, tree val, bitmap equiv)
544 {
545 gcc_assert (is_gimple_min_invariant (val));
546 val = avoid_overflow_infinity (val);
547 set_value_range (vr, VR_RANGE, val, val, equiv);
548 }
549
550 /* Set value range VR to a non-negative range of type TYPE.
551 OVERFLOW_INFINITY indicates whether to use an overflow infinity
552 rather than TYPE_MAX_VALUE; this should be true if we determine
553 that the range is nonnegative based on the assumption that signed
554 overflow does not occur. */
555
556 static inline void
557 set_value_range_to_nonnegative (value_range_t *vr, tree type,
558 bool overflow_infinity)
559 {
560 tree zero;
561
562 if (overflow_infinity && !supports_overflow_infinity (type))
563 {
564 set_value_range_to_varying (vr);
565 return;
566 }
567
568 zero = build_int_cst (type, 0);
569 set_value_range (vr, VR_RANGE, zero,
570 (overflow_infinity
571 ? positive_overflow_infinity (type)
572 : TYPE_MAX_VALUE (type)),
573 vr->equiv);
574 }
575
576 /* Set value range VR to a non-NULL range of type TYPE. */
577
578 static inline void
579 set_value_range_to_nonnull (value_range_t *vr, tree type)
580 {
581 tree zero = build_int_cst (type, 0);
582 set_value_range (vr, VR_ANTI_RANGE, zero, zero, vr->equiv);
583 }
584
585
586 /* Set value range VR to a NULL range of type TYPE. */
587
588 static inline void
589 set_value_range_to_null (value_range_t *vr, tree type)
590 {
591 set_value_range_to_value (vr, build_int_cst (type, 0), vr->equiv);
592 }
593
594
595 /* Set value range VR to a range of a truthvalue of type TYPE. */
596
597 static inline void
598 set_value_range_to_truthvalue (value_range_t *vr, tree type)
599 {
600 if (TYPE_PRECISION (type) == 1)
601 set_value_range_to_varying (vr);
602 else
603 set_value_range (vr, VR_RANGE,
604 build_int_cst (type, 0), build_int_cst (type, 1),
605 vr->equiv);
606 }
607
608
609 /* Set value range VR to VR_UNDEFINED. */
610
611 static inline void
612 set_value_range_to_undefined (value_range_t *vr)
613 {
614 vr->type = VR_UNDEFINED;
615 vr->min = vr->max = NULL_TREE;
616 if (vr->equiv)
617 bitmap_clear (vr->equiv);
618 }
619
620
621 /* If abs (min) < abs (max), set VR to [-max, max], if
622 abs (min) >= abs (max), set VR to [-min, min]. */
623
624 static void
625 abs_extent_range (value_range_t *vr, tree min, tree max)
626 {
627 int cmp;
628
629 gcc_assert (TREE_CODE (min) == INTEGER_CST);
630 gcc_assert (TREE_CODE (max) == INTEGER_CST);
631 gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (min)));
632 gcc_assert (!TYPE_UNSIGNED (TREE_TYPE (min)));
633 min = fold_unary (ABS_EXPR, TREE_TYPE (min), min);
634 max = fold_unary (ABS_EXPR, TREE_TYPE (max), max);
635 if (TREE_OVERFLOW (min) || TREE_OVERFLOW (max))
636 {
637 set_value_range_to_varying (vr);
638 return;
639 }
640 cmp = compare_values (min, max);
641 if (cmp == -1)
642 min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), max);
643 else if (cmp == 0 || cmp == 1)
644 {
645 max = min;
646 min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), min);
647 }
648 else
649 {
650 set_value_range_to_varying (vr);
651 return;
652 }
653 set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL);
654 }
655
656
657 /* Return value range information for VAR.
658
659 If we have no values ranges recorded (ie, VRP is not running), then
660 return NULL. Otherwise create an empty range if none existed for VAR. */
661
662 static value_range_t *
663 get_value_range (const_tree var)
664 {
665 static const struct value_range_d vr_const_varying
666 = { VR_VARYING, NULL_TREE, NULL_TREE, NULL };
667 value_range_t *vr;
668 tree sym;
669 unsigned ver = SSA_NAME_VERSION (var);
670
671 /* If we have no recorded ranges, then return NULL. */
672 if (! vr_value)
673 return NULL;
674
675 /* If we query the range for a new SSA name return an unmodifiable VARYING.
676 We should get here at most from the substitute-and-fold stage which
677 will never try to change values. */
678 if (ver >= num_vr_values)
679 return CONST_CAST (value_range_t *, &vr_const_varying);
680
681 vr = vr_value[ver];
682 if (vr)
683 return vr;
684
685 /* After propagation finished do not allocate new value-ranges. */
686 if (values_propagated)
687 return CONST_CAST (value_range_t *, &vr_const_varying);
688
689 /* Create a default value range. */
690 vr_value[ver] = vr = XCNEW (value_range_t);
691
692 /* Defer allocating the equivalence set. */
693 vr->equiv = NULL;
694
695 /* If VAR is a default definition of a parameter, the variable can
696 take any value in VAR's type. */
697 sym = SSA_NAME_VAR (var);
698 if (SSA_NAME_IS_DEFAULT_DEF (var)
699 && TREE_CODE (sym) == PARM_DECL)
700 {
701 /* Try to use the "nonnull" attribute to create ~[0, 0]
702 anti-ranges for pointers. Note that this is only valid with
703 default definitions of PARM_DECLs. */
704 if (POINTER_TYPE_P (TREE_TYPE (sym))
705 && nonnull_arg_p (sym))
706 set_value_range_to_nonnull (vr, TREE_TYPE (sym));
707 else
708 set_value_range_to_varying (vr);
709 }
710
711 return vr;
712 }
713
714 /* Return true, if VAL1 and VAL2 are equal values for VRP purposes. */
715
716 static inline bool
717 vrp_operand_equal_p (const_tree val1, const_tree val2)
718 {
719 if (val1 == val2)
720 return true;
721 if (!val1 || !val2 || !operand_equal_p (val1, val2, 0))
722 return false;
723 if (is_overflow_infinity (val1))
724 return is_overflow_infinity (val2);
725 return true;
726 }
727
728 /* Return true, if the bitmaps B1 and B2 are equal. */
729
730 static inline bool
731 vrp_bitmap_equal_p (const_bitmap b1, const_bitmap b2)
732 {
733 return (b1 == b2
734 || ((!b1 || bitmap_empty_p (b1))
735 && (!b2 || bitmap_empty_p (b2)))
736 || (b1 && b2
737 && bitmap_equal_p (b1, b2)));
738 }
739
740 /* Update the value range and equivalence set for variable VAR to
741 NEW_VR. Return true if NEW_VR is different from VAR's previous
742 value.
743
744 NOTE: This function assumes that NEW_VR is a temporary value range
745 object created for the sole purpose of updating VAR's range. The
746 storage used by the equivalence set from NEW_VR will be freed by
747 this function. Do not call update_value_range when NEW_VR
748 is the range object associated with another SSA name. */
749
750 static inline bool
751 update_value_range (const_tree var, value_range_t *new_vr)
752 {
753 value_range_t *old_vr;
754 bool is_new;
755
756 /* Update the value range, if necessary. */
757 old_vr = get_value_range (var);
758 is_new = old_vr->type != new_vr->type
759 || !vrp_operand_equal_p (old_vr->min, new_vr->min)
760 || !vrp_operand_equal_p (old_vr->max, new_vr->max)
761 || !vrp_bitmap_equal_p (old_vr->equiv, new_vr->equiv);
762
763 if (is_new)
764 set_value_range (old_vr, new_vr->type, new_vr->min, new_vr->max,
765 new_vr->equiv);
766
767 BITMAP_FREE (new_vr->equiv);
768
769 return is_new;
770 }
771
772
773 /* Add VAR and VAR's equivalence set to EQUIV. This is the central
774 point where equivalence processing can be turned on/off. */
775
776 static void
777 add_equivalence (bitmap *equiv, const_tree var)
778 {
779 unsigned ver = SSA_NAME_VERSION (var);
780 value_range_t *vr = vr_value[ver];
781
782 if (*equiv == NULL)
783 *equiv = BITMAP_ALLOC (NULL);
784 bitmap_set_bit (*equiv, ver);
785 if (vr && vr->equiv)
786 bitmap_ior_into (*equiv, vr->equiv);
787 }
788
789
790 /* Return true if VR is ~[0, 0]. */
791
792 static inline bool
793 range_is_nonnull (value_range_t *vr)
794 {
795 return vr->type == VR_ANTI_RANGE
796 && integer_zerop (vr->min)
797 && integer_zerop (vr->max);
798 }
799
800
801 /* Return true if VR is [0, 0]. */
802
803 static inline bool
804 range_is_null (value_range_t *vr)
805 {
806 return vr->type == VR_RANGE
807 && integer_zerop (vr->min)
808 && integer_zerop (vr->max);
809 }
810
811 /* Return true if max and min of VR are INTEGER_CST. It's not necessary
812 a singleton. */
813
814 static inline bool
815 range_int_cst_p (value_range_t *vr)
816 {
817 return (vr->type == VR_RANGE
818 && TREE_CODE (vr->max) == INTEGER_CST
819 && TREE_CODE (vr->min) == INTEGER_CST
820 && !TREE_OVERFLOW (vr->max)
821 && !TREE_OVERFLOW (vr->min));
822 }
823
824 /* Return true if VR is a INTEGER_CST singleton. */
825
826 static inline bool
827 range_int_cst_singleton_p (value_range_t *vr)
828 {
829 return (range_int_cst_p (vr)
830 && tree_int_cst_equal (vr->min, vr->max));
831 }
832
833 /* Return true if value range VR involves at least one symbol. */
834
835 static inline bool
836 symbolic_range_p (value_range_t *vr)
837 {
838 return (!is_gimple_min_invariant (vr->min)
839 || !is_gimple_min_invariant (vr->max));
840 }
841
842 /* Return true if value range VR uses an overflow infinity. */
843
844 static inline bool
845 overflow_infinity_range_p (value_range_t *vr)
846 {
847 return (vr->type == VR_RANGE
848 && (is_overflow_infinity (vr->min)
849 || is_overflow_infinity (vr->max)));
850 }
851
852 /* Return false if we can not make a valid comparison based on VR;
853 this will be the case if it uses an overflow infinity and overflow
854 is not undefined (i.e., -fno-strict-overflow is in effect).
855 Otherwise return true, and set *STRICT_OVERFLOW_P to true if VR
856 uses an overflow infinity. */
857
858 static bool
859 usable_range_p (value_range_t *vr, bool *strict_overflow_p)
860 {
861 gcc_assert (vr->type == VR_RANGE);
862 if (is_overflow_infinity (vr->min))
863 {
864 *strict_overflow_p = true;
865 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr->min)))
866 return false;
867 }
868 if (is_overflow_infinity (vr->max))
869 {
870 *strict_overflow_p = true;
871 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr->max)))
872 return false;
873 }
874 return true;
875 }
876
877
878 /* Return true if the result of assignment STMT is know to be non-negative.
879 If the return value is based on the assumption that signed overflow is
880 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
881 *STRICT_OVERFLOW_P.*/
882
883 static bool
884 gimple_assign_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p)
885 {
886 enum tree_code code = gimple_assign_rhs_code (stmt);
887 switch (get_gimple_rhs_class (code))
888 {
889 case GIMPLE_UNARY_RHS:
890 return tree_unary_nonnegative_warnv_p (gimple_assign_rhs_code (stmt),
891 gimple_expr_type (stmt),
892 gimple_assign_rhs1 (stmt),
893 strict_overflow_p);
894 case GIMPLE_BINARY_RHS:
895 return tree_binary_nonnegative_warnv_p (gimple_assign_rhs_code (stmt),
896 gimple_expr_type (stmt),
897 gimple_assign_rhs1 (stmt),
898 gimple_assign_rhs2 (stmt),
899 strict_overflow_p);
900 case GIMPLE_TERNARY_RHS:
901 return false;
902 case GIMPLE_SINGLE_RHS:
903 return tree_single_nonnegative_warnv_p (gimple_assign_rhs1 (stmt),
904 strict_overflow_p);
905 case GIMPLE_INVALID_RHS:
906 gcc_unreachable ();
907 default:
908 gcc_unreachable ();
909 }
910 }
911
912 /* Return true if return value of call STMT is know to be non-negative.
913 If the return value is based on the assumption that signed overflow is
914 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
915 *STRICT_OVERFLOW_P.*/
916
917 static bool
918 gimple_call_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p)
919 {
920 tree arg0 = gimple_call_num_args (stmt) > 0 ?
921 gimple_call_arg (stmt, 0) : NULL_TREE;
922 tree arg1 = gimple_call_num_args (stmt) > 1 ?
923 gimple_call_arg (stmt, 1) : NULL_TREE;
924
925 return tree_call_nonnegative_warnv_p (gimple_expr_type (stmt),
926 gimple_call_fndecl (stmt),
927 arg0,
928 arg1,
929 strict_overflow_p);
930 }
931
932 /* Return true if STMT is know to to compute a non-negative value.
933 If the return value is based on the assumption that signed overflow is
934 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
935 *STRICT_OVERFLOW_P.*/
936
937 static bool
938 gimple_stmt_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p)
939 {
940 switch (gimple_code (stmt))
941 {
942 case GIMPLE_ASSIGN:
943 return gimple_assign_nonnegative_warnv_p (stmt, strict_overflow_p);
944 case GIMPLE_CALL:
945 return gimple_call_nonnegative_warnv_p (stmt, strict_overflow_p);
946 default:
947 gcc_unreachable ();
948 }
949 }
950
951 /* Return true if the result of assignment STMT is know to be non-zero.
952 If the return value is based on the assumption that signed overflow is
953 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
954 *STRICT_OVERFLOW_P.*/
955
956 static bool
957 gimple_assign_nonzero_warnv_p (gimple stmt, bool *strict_overflow_p)
958 {
959 enum tree_code code = gimple_assign_rhs_code (stmt);
960 switch (get_gimple_rhs_class (code))
961 {
962 case GIMPLE_UNARY_RHS:
963 return tree_unary_nonzero_warnv_p (gimple_assign_rhs_code (stmt),
964 gimple_expr_type (stmt),
965 gimple_assign_rhs1 (stmt),
966 strict_overflow_p);
967 case GIMPLE_BINARY_RHS:
968 return tree_binary_nonzero_warnv_p (gimple_assign_rhs_code (stmt),
969 gimple_expr_type (stmt),
970 gimple_assign_rhs1 (stmt),
971 gimple_assign_rhs2 (stmt),
972 strict_overflow_p);
973 case GIMPLE_TERNARY_RHS:
974 return false;
975 case GIMPLE_SINGLE_RHS:
976 return tree_single_nonzero_warnv_p (gimple_assign_rhs1 (stmt),
977 strict_overflow_p);
978 case GIMPLE_INVALID_RHS:
979 gcc_unreachable ();
980 default:
981 gcc_unreachable ();
982 }
983 }
984
985 /* Return true if STMT is know to to compute a non-zero value.
986 If the return value is based on the assumption that signed overflow is
987 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
988 *STRICT_OVERFLOW_P.*/
989
990 static bool
991 gimple_stmt_nonzero_warnv_p (gimple stmt, bool *strict_overflow_p)
992 {
993 switch (gimple_code (stmt))
994 {
995 case GIMPLE_ASSIGN:
996 return gimple_assign_nonzero_warnv_p (stmt, strict_overflow_p);
997 case GIMPLE_CALL:
998 return gimple_alloca_call_p (stmt);
999 default:
1000 gcc_unreachable ();
1001 }
1002 }
1003
1004 /* Like tree_expr_nonzero_warnv_p, but this function uses value ranges
1005 obtained so far. */
1006
1007 static bool
1008 vrp_stmt_computes_nonzero (gimple stmt, bool *strict_overflow_p)
1009 {
1010 if (gimple_stmt_nonzero_warnv_p (stmt, strict_overflow_p))
1011 return true;
1012
1013 /* If we have an expression of the form &X->a, then the expression
1014 is nonnull if X is nonnull. */
1015 if (is_gimple_assign (stmt)
1016 && gimple_assign_rhs_code (stmt) == ADDR_EXPR)
1017 {
1018 tree expr = gimple_assign_rhs1 (stmt);
1019 tree base = get_base_address (TREE_OPERAND (expr, 0));
1020
1021 if (base != NULL_TREE
1022 && TREE_CODE (base) == MEM_REF
1023 && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME)
1024 {
1025 value_range_t *vr = get_value_range (TREE_OPERAND (base, 0));
1026 if (range_is_nonnull (vr))
1027 return true;
1028 }
1029 }
1030
1031 return false;
1032 }
1033
1034 /* Returns true if EXPR is a valid value (as expected by compare_values) --
1035 a gimple invariant, or SSA_NAME +- CST. */
1036
1037 static bool
1038 valid_value_p (tree expr)
1039 {
1040 if (TREE_CODE (expr) == SSA_NAME)
1041 return true;
1042
1043 if (TREE_CODE (expr) == PLUS_EXPR
1044 || TREE_CODE (expr) == MINUS_EXPR)
1045 return (TREE_CODE (TREE_OPERAND (expr, 0)) == SSA_NAME
1046 && TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST);
1047
1048 return is_gimple_min_invariant (expr);
1049 }
1050
1051 /* Return
1052 1 if VAL < VAL2
1053 0 if !(VAL < VAL2)
1054 -2 if those are incomparable. */
1055 static inline int
1056 operand_less_p (tree val, tree val2)
1057 {
1058 /* LT is folded faster than GE and others. Inline the common case. */
1059 if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
1060 {
1061 if (TYPE_UNSIGNED (TREE_TYPE (val)))
1062 return INT_CST_LT_UNSIGNED (val, val2);
1063 else
1064 {
1065 if (INT_CST_LT (val, val2))
1066 return 1;
1067 }
1068 }
1069 else
1070 {
1071 tree tcmp;
1072
1073 fold_defer_overflow_warnings ();
1074
1075 tcmp = fold_binary_to_constant (LT_EXPR, boolean_type_node, val, val2);
1076
1077 fold_undefer_and_ignore_overflow_warnings ();
1078
1079 if (!tcmp
1080 || TREE_CODE (tcmp) != INTEGER_CST)
1081 return -2;
1082
1083 if (!integer_zerop (tcmp))
1084 return 1;
1085 }
1086
1087 /* val >= val2, not considering overflow infinity. */
1088 if (is_negative_overflow_infinity (val))
1089 return is_negative_overflow_infinity (val2) ? 0 : 1;
1090 else if (is_positive_overflow_infinity (val2))
1091 return is_positive_overflow_infinity (val) ? 0 : 1;
1092
1093 return 0;
1094 }
1095
1096 /* Compare two values VAL1 and VAL2. Return
1097
1098 -2 if VAL1 and VAL2 cannot be compared at compile-time,
1099 -1 if VAL1 < VAL2,
1100 0 if VAL1 == VAL2,
1101 +1 if VAL1 > VAL2, and
1102 +2 if VAL1 != VAL2
1103
1104 This is similar to tree_int_cst_compare but supports pointer values
1105 and values that cannot be compared at compile time.
1106
1107 If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to
1108 true if the return value is only valid if we assume that signed
1109 overflow is undefined. */
1110
1111 static int
1112 compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p)
1113 {
1114 if (val1 == val2)
1115 return 0;
1116
1117 /* Below we rely on the fact that VAL1 and VAL2 are both pointers or
1118 both integers. */
1119 gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1))
1120 == POINTER_TYPE_P (TREE_TYPE (val2)));
1121 /* Convert the two values into the same type. This is needed because
1122 sizetype causes sign extension even for unsigned types. */
1123 val2 = fold_convert (TREE_TYPE (val1), val2);
1124 STRIP_USELESS_TYPE_CONVERSION (val2);
1125
1126 if ((TREE_CODE (val1) == SSA_NAME
1127 || TREE_CODE (val1) == PLUS_EXPR
1128 || TREE_CODE (val1) == MINUS_EXPR)
1129 && (TREE_CODE (val2) == SSA_NAME
1130 || TREE_CODE (val2) == PLUS_EXPR
1131 || TREE_CODE (val2) == MINUS_EXPR))
1132 {
1133 tree n1, c1, n2, c2;
1134 enum tree_code code1, code2;
1135
1136 /* If VAL1 and VAL2 are of the form 'NAME [+-] CST' or 'NAME',
1137 return -1 or +1 accordingly. If VAL1 and VAL2 don't use the
1138 same name, return -2. */
1139 if (TREE_CODE (val1) == SSA_NAME)
1140 {
1141 code1 = SSA_NAME;
1142 n1 = val1;
1143 c1 = NULL_TREE;
1144 }
1145 else
1146 {
1147 code1 = TREE_CODE (val1);
1148 n1 = TREE_OPERAND (val1, 0);
1149 c1 = TREE_OPERAND (val1, 1);
1150 if (tree_int_cst_sgn (c1) == -1)
1151 {
1152 if (is_negative_overflow_infinity (c1))
1153 return -2;
1154 c1 = fold_unary_to_constant (NEGATE_EXPR, TREE_TYPE (c1), c1);
1155 if (!c1)
1156 return -2;
1157 code1 = code1 == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR;
1158 }
1159 }
1160
1161 if (TREE_CODE (val2) == SSA_NAME)
1162 {
1163 code2 = SSA_NAME;
1164 n2 = val2;
1165 c2 = NULL_TREE;
1166 }
1167 else
1168 {
1169 code2 = TREE_CODE (val2);
1170 n2 = TREE_OPERAND (val2, 0);
1171 c2 = TREE_OPERAND (val2, 1);
1172 if (tree_int_cst_sgn (c2) == -1)
1173 {
1174 if (is_negative_overflow_infinity (c2))
1175 return -2;
1176 c2 = fold_unary_to_constant (NEGATE_EXPR, TREE_TYPE (c2), c2);
1177 if (!c2)
1178 return -2;
1179 code2 = code2 == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR;
1180 }
1181 }
1182
1183 /* Both values must use the same name. */
1184 if (n1 != n2)
1185 return -2;
1186
1187 if (code1 == SSA_NAME
1188 && code2 == SSA_NAME)
1189 /* NAME == NAME */
1190 return 0;
1191
1192 /* If overflow is defined we cannot simplify more. */
1193 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1)))
1194 return -2;
1195
1196 if (strict_overflow_p != NULL
1197 && (code1 == SSA_NAME || !TREE_NO_WARNING (val1))
1198 && (code2 == SSA_NAME || !TREE_NO_WARNING (val2)))
1199 *strict_overflow_p = true;
1200
1201 if (code1 == SSA_NAME)
1202 {
1203 if (code2 == PLUS_EXPR)
1204 /* NAME < NAME + CST */
1205 return -1;
1206 else if (code2 == MINUS_EXPR)
1207 /* NAME > NAME - CST */
1208 return 1;
1209 }
1210 else if (code1 == PLUS_EXPR)
1211 {
1212 if (code2 == SSA_NAME)
1213 /* NAME + CST > NAME */
1214 return 1;
1215 else if (code2 == PLUS_EXPR)
1216 /* NAME + CST1 > NAME + CST2, if CST1 > CST2 */
1217 return compare_values_warnv (c1, c2, strict_overflow_p);
1218 else if (code2 == MINUS_EXPR)
1219 /* NAME + CST1 > NAME - CST2 */
1220 return 1;
1221 }
1222 else if (code1 == MINUS_EXPR)
1223 {
1224 if (code2 == SSA_NAME)
1225 /* NAME - CST < NAME */
1226 return -1;
1227 else if (code2 == PLUS_EXPR)
1228 /* NAME - CST1 < NAME + CST2 */
1229 return -1;
1230 else if (code2 == MINUS_EXPR)
1231 /* NAME - CST1 > NAME - CST2, if CST1 < CST2. Notice that
1232 C1 and C2 are swapped in the call to compare_values. */
1233 return compare_values_warnv (c2, c1, strict_overflow_p);
1234 }
1235
1236 gcc_unreachable ();
1237 }
1238
1239 /* We cannot compare non-constants. */
1240 if (!is_gimple_min_invariant (val1) || !is_gimple_min_invariant (val2))
1241 return -2;
1242
1243 if (!POINTER_TYPE_P (TREE_TYPE (val1)))
1244 {
1245 /* We cannot compare overflowed values, except for overflow
1246 infinities. */
1247 if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2))
1248 {
1249 if (strict_overflow_p != NULL)
1250 *strict_overflow_p = true;
1251 if (is_negative_overflow_infinity (val1))
1252 return is_negative_overflow_infinity (val2) ? 0 : -1;
1253 else if (is_negative_overflow_infinity (val2))
1254 return 1;
1255 else if (is_positive_overflow_infinity (val1))
1256 return is_positive_overflow_infinity (val2) ? 0 : 1;
1257 else if (is_positive_overflow_infinity (val2))
1258 return -1;
1259 return -2;
1260 }
1261
1262 return tree_int_cst_compare (val1, val2);
1263 }
1264 else
1265 {
1266 tree t;
1267
1268 /* First see if VAL1 and VAL2 are not the same. */
1269 if (val1 == val2 || operand_equal_p (val1, val2, 0))
1270 return 0;
1271
1272 /* If VAL1 is a lower address than VAL2, return -1. */
1273 if (operand_less_p (val1, val2) == 1)
1274 return -1;
1275
1276 /* If VAL1 is a higher address than VAL2, return +1. */
1277 if (operand_less_p (val2, val1) == 1)
1278 return 1;
1279
1280 /* If VAL1 is different than VAL2, return +2.
1281 For integer constants we either have already returned -1 or 1
1282 or they are equivalent. We still might succeed in proving
1283 something about non-trivial operands. */
1284 if (TREE_CODE (val1) != INTEGER_CST
1285 || TREE_CODE (val2) != INTEGER_CST)
1286 {
1287 t = fold_binary_to_constant (NE_EXPR, boolean_type_node, val1, val2);
1288 if (t && integer_onep (t))
1289 return 2;
1290 }
1291
1292 return -2;
1293 }
1294 }
1295
1296 /* Compare values like compare_values_warnv, but treat comparisons of
1297 nonconstants which rely on undefined overflow as incomparable. */
1298
1299 static int
1300 compare_values (tree val1, tree val2)
1301 {
1302 bool sop;
1303 int ret;
1304
1305 sop = false;
1306 ret = compare_values_warnv (val1, val2, &sop);
1307 if (sop
1308 && (!is_gimple_min_invariant (val1) || !is_gimple_min_invariant (val2)))
1309 ret = -2;
1310 return ret;
1311 }
1312
1313
1314 /* Return 1 if VAL is inside value range VR (VR->MIN <= VAL <= VR->MAX),
1315 0 if VAL is not inside VR,
1316 -2 if we cannot tell either way.
1317
1318 FIXME, the current semantics of this functions are a bit quirky
1319 when taken in the context of VRP. In here we do not care
1320 about VR's type. If VR is the anti-range ~[3, 5] the call
1321 value_inside_range (4, VR) will return 1.
1322
1323 This is counter-intuitive in a strict sense, but the callers
1324 currently expect this. They are calling the function
1325 merely to determine whether VR->MIN <= VAL <= VR->MAX. The
1326 callers are applying the VR_RANGE/VR_ANTI_RANGE semantics
1327 themselves.
1328
1329 This also applies to value_ranges_intersect_p and
1330 range_includes_zero_p. The semantics of VR_RANGE and
1331 VR_ANTI_RANGE should be encoded here, but that also means
1332 adapting the users of these functions to the new semantics.
1333
1334 Benchmark compile/20001226-1.c compilation time after changing this
1335 function. */
1336
1337 static inline int
1338 value_inside_range (tree val, value_range_t * vr)
1339 {
1340 int cmp1, cmp2;
1341
1342 cmp1 = operand_less_p (val, vr->min);
1343 if (cmp1 == -2)
1344 return -2;
1345 if (cmp1 == 1)
1346 return 0;
1347
1348 cmp2 = operand_less_p (vr->max, val);
1349 if (cmp2 == -2)
1350 return -2;
1351
1352 return !cmp2;
1353 }
1354
1355
1356 /* Return true if value ranges VR0 and VR1 have a non-empty
1357 intersection.
1358
1359 Benchmark compile/20001226-1.c compilation time after changing this
1360 function.
1361 */
1362
1363 static inline bool
1364 value_ranges_intersect_p (value_range_t *vr0, value_range_t *vr1)
1365 {
1366 /* The value ranges do not intersect if the maximum of the first range is
1367 less than the minimum of the second range or vice versa.
1368 When those relations are unknown, we can't do any better. */
1369 if (operand_less_p (vr0->max, vr1->min) != 0)
1370 return false;
1371 if (operand_less_p (vr1->max, vr0->min) != 0)
1372 return false;
1373 return true;
1374 }
1375
1376
1377 /* Return true if VR includes the value zero, false otherwise. FIXME,
1378 currently this will return false for an anti-range like ~[-4, 3].
1379 This will be wrong when the semantics of value_inside_range are
1380 modified (currently the users of this function expect these
1381 semantics). */
1382
1383 static inline bool
1384 range_includes_zero_p (value_range_t *vr)
1385 {
1386 tree zero;
1387
1388 gcc_assert (vr->type != VR_UNDEFINED
1389 && vr->type != VR_VARYING
1390 && !symbolic_range_p (vr));
1391
1392 zero = build_int_cst (TREE_TYPE (vr->min), 0);
1393 return (value_inside_range (zero, vr) == 1);
1394 }
1395
1396 /* Return true if *VR is know to only contain nonnegative values. */
1397
1398 static inline bool
1399 value_range_nonnegative_p (value_range_t *vr)
1400 {
1401 /* Testing for VR_ANTI_RANGE is not useful here as any anti-range
1402 which would return a useful value should be encoded as a
1403 VR_RANGE. */
1404 if (vr->type == VR_RANGE)
1405 {
1406 int result = compare_values (vr->min, integer_zero_node);
1407 return (result == 0 || result == 1);
1408 }
1409
1410 return false;
1411 }
1412
1413 /* Return true if T, an SSA_NAME, is known to be nonnegative. Return
1414 false otherwise or if no value range information is available. */
1415
1416 bool
1417 ssa_name_nonnegative_p (const_tree t)
1418 {
1419 value_range_t *vr = get_value_range (t);
1420
1421 if (INTEGRAL_TYPE_P (t)
1422 && TYPE_UNSIGNED (t))
1423 return true;
1424
1425 if (!vr)
1426 return false;
1427
1428 return value_range_nonnegative_p (vr);
1429 }
1430
1431 /* If *VR has a value rante that is a single constant value return that,
1432 otherwise return NULL_TREE. */
1433
1434 static tree
1435 value_range_constant_singleton (value_range_t *vr)
1436 {
1437 if (vr->type == VR_RANGE
1438 && operand_equal_p (vr->min, vr->max, 0)
1439 && is_gimple_min_invariant (vr->min))
1440 return vr->min;
1441
1442 return NULL_TREE;
1443 }
1444
1445 /* If OP has a value range with a single constant value return that,
1446 otherwise return NULL_TREE. This returns OP itself if OP is a
1447 constant. */
1448
1449 static tree
1450 op_with_constant_singleton_value_range (tree op)
1451 {
1452 if (is_gimple_min_invariant (op))
1453 return op;
1454
1455 if (TREE_CODE (op) != SSA_NAME)
1456 return NULL_TREE;
1457
1458 return value_range_constant_singleton (get_value_range (op));
1459 }
1460
1461 /* Return true if op is in a boolean [0, 1] value-range. */
1462
1463 static bool
1464 op_with_boolean_value_range_p (tree op)
1465 {
1466 value_range_t *vr;
1467
1468 if (TYPE_PRECISION (TREE_TYPE (op)) == 1)
1469 return true;
1470
1471 if (integer_zerop (op)
1472 || integer_onep (op))
1473 return true;
1474
1475 if (TREE_CODE (op) != SSA_NAME)
1476 return false;
1477
1478 vr = get_value_range (op);
1479 return (vr->type == VR_RANGE
1480 && integer_zerop (vr->min)
1481 && integer_onep (vr->max));
1482 }
1483
1484 /* Extract value range information from an ASSERT_EXPR EXPR and store
1485 it in *VR_P. */
1486
1487 static void
1488 extract_range_from_assert (value_range_t *vr_p, tree expr)
1489 {
1490 tree var, cond, limit, min, max, type;
1491 value_range_t *var_vr, *limit_vr;
1492 enum tree_code cond_code;
1493
1494 var = ASSERT_EXPR_VAR (expr);
1495 cond = ASSERT_EXPR_COND (expr);
1496
1497 gcc_assert (COMPARISON_CLASS_P (cond));
1498
1499 /* Find VAR in the ASSERT_EXPR conditional. */
1500 if (var == TREE_OPERAND (cond, 0)
1501 || TREE_CODE (TREE_OPERAND (cond, 0)) == PLUS_EXPR
1502 || TREE_CODE (TREE_OPERAND (cond, 0)) == NOP_EXPR)
1503 {
1504 /* If the predicate is of the form VAR COMP LIMIT, then we just
1505 take LIMIT from the RHS and use the same comparison code. */
1506 cond_code = TREE_CODE (cond);
1507 limit = TREE_OPERAND (cond, 1);
1508 cond = TREE_OPERAND (cond, 0);
1509 }
1510 else
1511 {
1512 /* If the predicate is of the form LIMIT COMP VAR, then we need
1513 to flip around the comparison code to create the proper range
1514 for VAR. */
1515 cond_code = swap_tree_comparison (TREE_CODE (cond));
1516 limit = TREE_OPERAND (cond, 0);
1517 cond = TREE_OPERAND (cond, 1);
1518 }
1519
1520 limit = avoid_overflow_infinity (limit);
1521
1522 type = TREE_TYPE (var);
1523 gcc_assert (limit != var);
1524
1525 /* For pointer arithmetic, we only keep track of pointer equality
1526 and inequality. */
1527 if (POINTER_TYPE_P (type) && cond_code != NE_EXPR && cond_code != EQ_EXPR)
1528 {
1529 set_value_range_to_varying (vr_p);
1530 return;
1531 }
1532
1533 /* If LIMIT is another SSA name and LIMIT has a range of its own,
1534 try to use LIMIT's range to avoid creating symbolic ranges
1535 unnecessarily. */
1536 limit_vr = (TREE_CODE (limit) == SSA_NAME) ? get_value_range (limit) : NULL;
1537
1538 /* LIMIT's range is only interesting if it has any useful information. */
1539 if (limit_vr
1540 && (limit_vr->type == VR_UNDEFINED
1541 || limit_vr->type == VR_VARYING
1542 || symbolic_range_p (limit_vr)))
1543 limit_vr = NULL;
1544
1545 /* Initially, the new range has the same set of equivalences of
1546 VAR's range. This will be revised before returning the final
1547 value. Since assertions may be chained via mutually exclusive
1548 predicates, we will need to trim the set of equivalences before
1549 we are done. */
1550 gcc_assert (vr_p->equiv == NULL);
1551 add_equivalence (&vr_p->equiv, var);
1552
1553 /* Extract a new range based on the asserted comparison for VAR and
1554 LIMIT's value range. Notice that if LIMIT has an anti-range, we
1555 will only use it for equality comparisons (EQ_EXPR). For any
1556 other kind of assertion, we cannot derive a range from LIMIT's
1557 anti-range that can be used to describe the new range. For
1558 instance, ASSERT_EXPR <x_2, x_2 <= b_4>. If b_4 is ~[2, 10],
1559 then b_4 takes on the ranges [-INF, 1] and [11, +INF]. There is
1560 no single range for x_2 that could describe LE_EXPR, so we might
1561 as well build the range [b_4, +INF] for it.
1562 One special case we handle is extracting a range from a
1563 range test encoded as (unsigned)var + CST <= limit. */
1564 if (TREE_CODE (cond) == NOP_EXPR
1565 || TREE_CODE (cond) == PLUS_EXPR)
1566 {
1567 if (TREE_CODE (cond) == PLUS_EXPR)
1568 {
1569 min = fold_build1 (NEGATE_EXPR, TREE_TYPE (TREE_OPERAND (cond, 1)),
1570 TREE_OPERAND (cond, 1));
1571 max = int_const_binop (PLUS_EXPR, limit, min);
1572 cond = TREE_OPERAND (cond, 0);
1573 }
1574 else
1575 {
1576 min = build_int_cst (TREE_TYPE (var), 0);
1577 max = limit;
1578 }
1579
1580 /* Make sure to not set TREE_OVERFLOW on the final type
1581 conversion. We are willingly interpreting large positive
1582 unsigned values as negative singed values here. */
1583 min = force_fit_type_double (TREE_TYPE (var), tree_to_double_int (min),
1584 0, false);
1585 max = force_fit_type_double (TREE_TYPE (var), tree_to_double_int (max),
1586 0, false);
1587
1588 /* We can transform a max, min range to an anti-range or
1589 vice-versa. Use set_and_canonicalize_value_range which does
1590 this for us. */
1591 if (cond_code == LE_EXPR)
1592 set_and_canonicalize_value_range (vr_p, VR_RANGE,
1593 min, max, vr_p->equiv);
1594 else if (cond_code == GT_EXPR)
1595 set_and_canonicalize_value_range (vr_p, VR_ANTI_RANGE,
1596 min, max, vr_p->equiv);
1597 else
1598 gcc_unreachable ();
1599 }
1600 else if (cond_code == EQ_EXPR)
1601 {
1602 enum value_range_type range_type;
1603
1604 if (limit_vr)
1605 {
1606 range_type = limit_vr->type;
1607 min = limit_vr->min;
1608 max = limit_vr->max;
1609 }
1610 else
1611 {
1612 range_type = VR_RANGE;
1613 min = limit;
1614 max = limit;
1615 }
1616
1617 set_value_range (vr_p, range_type, min, max, vr_p->equiv);
1618
1619 /* When asserting the equality VAR == LIMIT and LIMIT is another
1620 SSA name, the new range will also inherit the equivalence set
1621 from LIMIT. */
1622 if (TREE_CODE (limit) == SSA_NAME)
1623 add_equivalence (&vr_p->equiv, limit);
1624 }
1625 else if (cond_code == NE_EXPR)
1626 {
1627 /* As described above, when LIMIT's range is an anti-range and
1628 this assertion is an inequality (NE_EXPR), then we cannot
1629 derive anything from the anti-range. For instance, if
1630 LIMIT's range was ~[0, 0], the assertion 'VAR != LIMIT' does
1631 not imply that VAR's range is [0, 0]. So, in the case of
1632 anti-ranges, we just assert the inequality using LIMIT and
1633 not its anti-range.
1634
1635 If LIMIT_VR is a range, we can only use it to build a new
1636 anti-range if LIMIT_VR is a single-valued range. For
1637 instance, if LIMIT_VR is [0, 1], the predicate
1638 VAR != [0, 1] does not mean that VAR's range is ~[0, 1].
1639 Rather, it means that for value 0 VAR should be ~[0, 0]
1640 and for value 1, VAR should be ~[1, 1]. We cannot
1641 represent these ranges.
1642
1643 The only situation in which we can build a valid
1644 anti-range is when LIMIT_VR is a single-valued range
1645 (i.e., LIMIT_VR->MIN == LIMIT_VR->MAX). In that case,
1646 build the anti-range ~[LIMIT_VR->MIN, LIMIT_VR->MAX]. */
1647 if (limit_vr
1648 && limit_vr->type == VR_RANGE
1649 && compare_values (limit_vr->min, limit_vr->max) == 0)
1650 {
1651 min = limit_vr->min;
1652 max = limit_vr->max;
1653 }
1654 else
1655 {
1656 /* In any other case, we cannot use LIMIT's range to build a
1657 valid anti-range. */
1658 min = max = limit;
1659 }
1660
1661 /* If MIN and MAX cover the whole range for their type, then
1662 just use the original LIMIT. */
1663 if (INTEGRAL_TYPE_P (type)
1664 && vrp_val_is_min (min)
1665 && vrp_val_is_max (max))
1666 min = max = limit;
1667
1668 set_value_range (vr_p, VR_ANTI_RANGE, min, max, vr_p->equiv);
1669 }
1670 else if (cond_code == LE_EXPR || cond_code == LT_EXPR)
1671 {
1672 min = TYPE_MIN_VALUE (type);
1673
1674 if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE)
1675 max = limit;
1676 else
1677 {
1678 /* If LIMIT_VR is of the form [N1, N2], we need to build the
1679 range [MIN, N2] for LE_EXPR and [MIN, N2 - 1] for
1680 LT_EXPR. */
1681 max = limit_vr->max;
1682 }
1683
1684 /* If the maximum value forces us to be out of bounds, simply punt.
1685 It would be pointless to try and do anything more since this
1686 all should be optimized away above us. */
1687 if ((cond_code == LT_EXPR
1688 && compare_values (max, min) == 0)
1689 || (CONSTANT_CLASS_P (max) && TREE_OVERFLOW (max)))
1690 set_value_range_to_varying (vr_p);
1691 else
1692 {
1693 /* For LT_EXPR, we create the range [MIN, MAX - 1]. */
1694 if (cond_code == LT_EXPR)
1695 {
1696 if (TYPE_PRECISION (TREE_TYPE (max)) == 1
1697 && !TYPE_UNSIGNED (TREE_TYPE (max)))
1698 max = fold_build2 (PLUS_EXPR, TREE_TYPE (max), max,
1699 build_int_cst (TREE_TYPE (max), -1));
1700 else
1701 max = fold_build2 (MINUS_EXPR, TREE_TYPE (max), max,
1702 build_int_cst (TREE_TYPE (max), 1));
1703 if (EXPR_P (max))
1704 TREE_NO_WARNING (max) = 1;
1705 }
1706
1707 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1708 }
1709 }
1710 else if (cond_code == GE_EXPR || cond_code == GT_EXPR)
1711 {
1712 max = TYPE_MAX_VALUE (type);
1713
1714 if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE)
1715 min = limit;
1716 else
1717 {
1718 /* If LIMIT_VR is of the form [N1, N2], we need to build the
1719 range [N1, MAX] for GE_EXPR and [N1 + 1, MAX] for
1720 GT_EXPR. */
1721 min = limit_vr->min;
1722 }
1723
1724 /* If the minimum value forces us to be out of bounds, simply punt.
1725 It would be pointless to try and do anything more since this
1726 all should be optimized away above us. */
1727 if ((cond_code == GT_EXPR
1728 && compare_values (min, max) == 0)
1729 || (CONSTANT_CLASS_P (min) && TREE_OVERFLOW (min)))
1730 set_value_range_to_varying (vr_p);
1731 else
1732 {
1733 /* For GT_EXPR, we create the range [MIN + 1, MAX]. */
1734 if (cond_code == GT_EXPR)
1735 {
1736 if (TYPE_PRECISION (TREE_TYPE (min)) == 1
1737 && !TYPE_UNSIGNED (TREE_TYPE (min)))
1738 min = fold_build2 (MINUS_EXPR, TREE_TYPE (min), min,
1739 build_int_cst (TREE_TYPE (min), -1));
1740 else
1741 min = fold_build2 (PLUS_EXPR, TREE_TYPE (min), min,
1742 build_int_cst (TREE_TYPE (min), 1));
1743 if (EXPR_P (min))
1744 TREE_NO_WARNING (min) = 1;
1745 }
1746
1747 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1748 }
1749 }
1750 else
1751 gcc_unreachable ();
1752
1753 /* If VAR already had a known range, it may happen that the new
1754 range we have computed and VAR's range are not compatible. For
1755 instance,
1756
1757 if (p_5 == NULL)
1758 p_6 = ASSERT_EXPR <p_5, p_5 == NULL>;
1759 x_7 = p_6->fld;
1760 p_8 = ASSERT_EXPR <p_6, p_6 != NULL>;
1761
1762 While the above comes from a faulty program, it will cause an ICE
1763 later because p_8 and p_6 will have incompatible ranges and at
1764 the same time will be considered equivalent. A similar situation
1765 would arise from
1766
1767 if (i_5 > 10)
1768 i_6 = ASSERT_EXPR <i_5, i_5 > 10>;
1769 if (i_5 < 5)
1770 i_7 = ASSERT_EXPR <i_6, i_6 < 5>;
1771
1772 Again i_6 and i_7 will have incompatible ranges. It would be
1773 pointless to try and do anything with i_7's range because
1774 anything dominated by 'if (i_5 < 5)' will be optimized away.
1775 Note, due to the wa in which simulation proceeds, the statement
1776 i_7 = ASSERT_EXPR <...> we would never be visited because the
1777 conditional 'if (i_5 < 5)' always evaluates to false. However,
1778 this extra check does not hurt and may protect against future
1779 changes to VRP that may get into a situation similar to the
1780 NULL pointer dereference example.
1781
1782 Note that these compatibility tests are only needed when dealing
1783 with ranges or a mix of range and anti-range. If VAR_VR and VR_P
1784 are both anti-ranges, they will always be compatible, because two
1785 anti-ranges will always have a non-empty intersection. */
1786
1787 var_vr = get_value_range (var);
1788
1789 /* We may need to make adjustments when VR_P and VAR_VR are numeric
1790 ranges or anti-ranges. */
1791 if (vr_p->type == VR_VARYING
1792 || vr_p->type == VR_UNDEFINED
1793 || var_vr->type == VR_VARYING
1794 || var_vr->type == VR_UNDEFINED
1795 || symbolic_range_p (vr_p)
1796 || symbolic_range_p (var_vr))
1797 return;
1798
1799 if (var_vr->type == VR_RANGE && vr_p->type == VR_RANGE)
1800 {
1801 /* If the two ranges have a non-empty intersection, we can
1802 refine the resulting range. Since the assert expression
1803 creates an equivalency and at the same time it asserts a
1804 predicate, we can take the intersection of the two ranges to
1805 get better precision. */
1806 if (value_ranges_intersect_p (var_vr, vr_p))
1807 {
1808 /* Use the larger of the two minimums. */
1809 if (compare_values (vr_p->min, var_vr->min) == -1)
1810 min = var_vr->min;
1811 else
1812 min = vr_p->min;
1813
1814 /* Use the smaller of the two maximums. */
1815 if (compare_values (vr_p->max, var_vr->max) == 1)
1816 max = var_vr->max;
1817 else
1818 max = vr_p->max;
1819
1820 set_value_range (vr_p, vr_p->type, min, max, vr_p->equiv);
1821 }
1822 else
1823 {
1824 /* The two ranges do not intersect, set the new range to
1825 VARYING, because we will not be able to do anything
1826 meaningful with it. */
1827 set_value_range_to_varying (vr_p);
1828 }
1829 }
1830 else if ((var_vr->type == VR_RANGE && vr_p->type == VR_ANTI_RANGE)
1831 || (var_vr->type == VR_ANTI_RANGE && vr_p->type == VR_RANGE))
1832 {
1833 /* A range and an anti-range will cancel each other only if
1834 their ends are the same. For instance, in the example above,
1835 p_8's range ~[0, 0] and p_6's range [0, 0] are incompatible,
1836 so VR_P should be set to VR_VARYING. */
1837 if (compare_values (var_vr->min, vr_p->min) == 0
1838 && compare_values (var_vr->max, vr_p->max) == 0)
1839 set_value_range_to_varying (vr_p);
1840 else
1841 {
1842 tree min, max, anti_min, anti_max, real_min, real_max;
1843 int cmp;
1844
1845 /* We want to compute the logical AND of the two ranges;
1846 there are three cases to consider.
1847
1848
1849 1. The VR_ANTI_RANGE range is completely within the
1850 VR_RANGE and the endpoints of the ranges are
1851 different. In that case the resulting range
1852 should be whichever range is more precise.
1853 Typically that will be the VR_RANGE.
1854
1855 2. The VR_ANTI_RANGE is completely disjoint from
1856 the VR_RANGE. In this case the resulting range
1857 should be the VR_RANGE.
1858
1859 3. There is some overlap between the VR_ANTI_RANGE
1860 and the VR_RANGE.
1861
1862 3a. If the high limit of the VR_ANTI_RANGE resides
1863 within the VR_RANGE, then the result is a new
1864 VR_RANGE starting at the high limit of the
1865 VR_ANTI_RANGE + 1 and extending to the
1866 high limit of the original VR_RANGE.
1867
1868 3b. If the low limit of the VR_ANTI_RANGE resides
1869 within the VR_RANGE, then the result is a new
1870 VR_RANGE starting at the low limit of the original
1871 VR_RANGE and extending to the low limit of the
1872 VR_ANTI_RANGE - 1. */
1873 if (vr_p->type == VR_ANTI_RANGE)
1874 {
1875 anti_min = vr_p->min;
1876 anti_max = vr_p->max;
1877 real_min = var_vr->min;
1878 real_max = var_vr->max;
1879 }
1880 else
1881 {
1882 anti_min = var_vr->min;
1883 anti_max = var_vr->max;
1884 real_min = vr_p->min;
1885 real_max = vr_p->max;
1886 }
1887
1888
1889 /* Case 1, VR_ANTI_RANGE completely within VR_RANGE,
1890 not including any endpoints. */
1891 if (compare_values (anti_max, real_max) == -1
1892 && compare_values (anti_min, real_min) == 1)
1893 {
1894 /* If the range is covering the whole valid range of
1895 the type keep the anti-range. */
1896 if (!vrp_val_is_min (real_min)
1897 || !vrp_val_is_max (real_max))
1898 set_value_range (vr_p, VR_RANGE, real_min,
1899 real_max, vr_p->equiv);
1900 }
1901 /* Case 2, VR_ANTI_RANGE completely disjoint from
1902 VR_RANGE. */
1903 else if (compare_values (anti_min, real_max) == 1
1904 || compare_values (anti_max, real_min) == -1)
1905 {
1906 set_value_range (vr_p, VR_RANGE, real_min,
1907 real_max, vr_p->equiv);
1908 }
1909 /* Case 3a, the anti-range extends into the low
1910 part of the real range. Thus creating a new
1911 low for the real range. */
1912 else if (((cmp = compare_values (anti_max, real_min)) == 1
1913 || cmp == 0)
1914 && compare_values (anti_max, real_max) == -1)
1915 {
1916 gcc_assert (!is_positive_overflow_infinity (anti_max));
1917 if (needs_overflow_infinity (TREE_TYPE (anti_max))
1918 && vrp_val_is_max (anti_max))
1919 {
1920 if (!supports_overflow_infinity (TREE_TYPE (var_vr->min)))
1921 {
1922 set_value_range_to_varying (vr_p);
1923 return;
1924 }
1925 min = positive_overflow_infinity (TREE_TYPE (var_vr->min));
1926 }
1927 else if (!POINTER_TYPE_P (TREE_TYPE (var_vr->min)))
1928 {
1929 if (TYPE_PRECISION (TREE_TYPE (var_vr->min)) == 1
1930 && !TYPE_UNSIGNED (TREE_TYPE (var_vr->min)))
1931 min = fold_build2 (MINUS_EXPR, TREE_TYPE (var_vr->min),
1932 anti_max,
1933 build_int_cst (TREE_TYPE (var_vr->min),
1934 -1));
1935 else
1936 min = fold_build2 (PLUS_EXPR, TREE_TYPE (var_vr->min),
1937 anti_max,
1938 build_int_cst (TREE_TYPE (var_vr->min),
1939 1));
1940 }
1941 else
1942 min = fold_build_pointer_plus_hwi (anti_max, 1);
1943 max = real_max;
1944 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1945 }
1946 /* Case 3b, the anti-range extends into the high
1947 part of the real range. Thus creating a new
1948 higher for the real range. */
1949 else if (compare_values (anti_min, real_min) == 1
1950 && ((cmp = compare_values (anti_min, real_max)) == -1
1951 || cmp == 0))
1952 {
1953 gcc_assert (!is_negative_overflow_infinity (anti_min));
1954 if (needs_overflow_infinity (TREE_TYPE (anti_min))
1955 && vrp_val_is_min (anti_min))
1956 {
1957 if (!supports_overflow_infinity (TREE_TYPE (var_vr->min)))
1958 {
1959 set_value_range_to_varying (vr_p);
1960 return;
1961 }
1962 max = negative_overflow_infinity (TREE_TYPE (var_vr->min));
1963 }
1964 else if (!POINTER_TYPE_P (TREE_TYPE (var_vr->min)))
1965 {
1966 if (TYPE_PRECISION (TREE_TYPE (var_vr->min)) == 1
1967 && !TYPE_UNSIGNED (TREE_TYPE (var_vr->min)))
1968 max = fold_build2 (PLUS_EXPR, TREE_TYPE (var_vr->min),
1969 anti_min,
1970 build_int_cst (TREE_TYPE (var_vr->min),
1971 -1));
1972 else
1973 max = fold_build2 (MINUS_EXPR, TREE_TYPE (var_vr->min),
1974 anti_min,
1975 build_int_cst (TREE_TYPE (var_vr->min),
1976 1));
1977 }
1978 else
1979 max = fold_build_pointer_plus_hwi (anti_min, -1);
1980 min = real_min;
1981 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1982 }
1983 }
1984 }
1985 }
1986
1987
1988 /* Extract range information from SSA name VAR and store it in VR. If
1989 VAR has an interesting range, use it. Otherwise, create the
1990 range [VAR, VAR] and return it. This is useful in situations where
1991 we may have conditionals testing values of VARYING names. For
1992 instance,
1993
1994 x_3 = y_5;
1995 if (x_3 > y_5)
1996 ...
1997
1998 Even if y_5 is deemed VARYING, we can determine that x_3 > y_5 is
1999 always false. */
2000
2001 static void
2002 extract_range_from_ssa_name (value_range_t *vr, tree var)
2003 {
2004 value_range_t *var_vr = get_value_range (var);
2005
2006 if (var_vr->type != VR_UNDEFINED && var_vr->type != VR_VARYING)
2007 copy_value_range (vr, var_vr);
2008 else
2009 set_value_range (vr, VR_RANGE, var, var, NULL);
2010
2011 add_equivalence (&vr->equiv, var);
2012 }
2013
2014
2015 /* Wrapper around int_const_binop. If the operation overflows and we
2016 are not using wrapping arithmetic, then adjust the result to be
2017 -INF or +INF depending on CODE, VAL1 and VAL2. This can return
2018 NULL_TREE if we need to use an overflow infinity representation but
2019 the type does not support it. */
2020
2021 static tree
2022 vrp_int_const_binop (enum tree_code code, tree val1, tree val2)
2023 {
2024 tree res;
2025
2026 res = int_const_binop (code, val1, val2);
2027
2028 /* If we are using unsigned arithmetic, operate symbolically
2029 on -INF and +INF as int_const_binop only handles signed overflow. */
2030 if (TYPE_UNSIGNED (TREE_TYPE (val1)))
2031 {
2032 int checkz = compare_values (res, val1);
2033 bool overflow = false;
2034
2035 /* Ensure that res = val1 [+*] val2 >= val1
2036 or that res = val1 - val2 <= val1. */
2037 if ((code == PLUS_EXPR
2038 && !(checkz == 1 || checkz == 0))
2039 || (code == MINUS_EXPR
2040 && !(checkz == 0 || checkz == -1)))
2041 {
2042 overflow = true;
2043 }
2044 /* Checking for multiplication overflow is done by dividing the
2045 output of the multiplication by the first input of the
2046 multiplication. If the result of that division operation is
2047 not equal to the second input of the multiplication, then the
2048 multiplication overflowed. */
2049 else if (code == MULT_EXPR && !integer_zerop (val1))
2050 {
2051 tree tmp = int_const_binop (TRUNC_DIV_EXPR,
2052 res,
2053 val1);
2054 int check = compare_values (tmp, val2);
2055
2056 if (check != 0)
2057 overflow = true;
2058 }
2059
2060 if (overflow)
2061 {
2062 res = copy_node (res);
2063 TREE_OVERFLOW (res) = 1;
2064 }
2065
2066 }
2067 else if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (val1)))
2068 /* If the singed operation wraps then int_const_binop has done
2069 everything we want. */
2070 ;
2071 else if ((TREE_OVERFLOW (res)
2072 && !TREE_OVERFLOW (val1)
2073 && !TREE_OVERFLOW (val2))
2074 || is_overflow_infinity (val1)
2075 || is_overflow_infinity (val2))
2076 {
2077 /* If the operation overflowed but neither VAL1 nor VAL2 are
2078 overflown, return -INF or +INF depending on the operation
2079 and the combination of signs of the operands. */
2080 int sgn1 = tree_int_cst_sgn (val1);
2081 int sgn2 = tree_int_cst_sgn (val2);
2082
2083 if (needs_overflow_infinity (TREE_TYPE (res))
2084 && !supports_overflow_infinity (TREE_TYPE (res)))
2085 return NULL_TREE;
2086
2087 /* We have to punt on adding infinities of different signs,
2088 since we can't tell what the sign of the result should be.
2089 Likewise for subtracting infinities of the same sign. */
2090 if (((code == PLUS_EXPR && sgn1 != sgn2)
2091 || (code == MINUS_EXPR && sgn1 == sgn2))
2092 && is_overflow_infinity (val1)
2093 && is_overflow_infinity (val2))
2094 return NULL_TREE;
2095
2096 /* Don't try to handle division or shifting of infinities. */
2097 if ((code == TRUNC_DIV_EXPR
2098 || code == FLOOR_DIV_EXPR
2099 || code == CEIL_DIV_EXPR
2100 || code == EXACT_DIV_EXPR
2101 || code == ROUND_DIV_EXPR
2102 || code == RSHIFT_EXPR)
2103 && (is_overflow_infinity (val1)
2104 || is_overflow_infinity (val2)))
2105 return NULL_TREE;
2106
2107 /* Notice that we only need to handle the restricted set of
2108 operations handled by extract_range_from_binary_expr.
2109 Among them, only multiplication, addition and subtraction
2110 can yield overflow without overflown operands because we
2111 are working with integral types only... except in the
2112 case VAL1 = -INF and VAL2 = -1 which overflows to +INF
2113 for division too. */
2114
2115 /* For multiplication, the sign of the overflow is given
2116 by the comparison of the signs of the operands. */
2117 if ((code == MULT_EXPR && sgn1 == sgn2)
2118 /* For addition, the operands must be of the same sign
2119 to yield an overflow. Its sign is therefore that
2120 of one of the operands, for example the first. For
2121 infinite operands X + -INF is negative, not positive. */
2122 || (code == PLUS_EXPR
2123 && (sgn1 >= 0
2124 ? !is_negative_overflow_infinity (val2)
2125 : is_positive_overflow_infinity (val2)))
2126 /* For subtraction, non-infinite operands must be of
2127 different signs to yield an overflow. Its sign is
2128 therefore that of the first operand or the opposite of
2129 that of the second operand. A first operand of 0 counts
2130 as positive here, for the corner case 0 - (-INF), which
2131 overflows, but must yield +INF. For infinite operands 0
2132 - INF is negative, not positive. */
2133 || (code == MINUS_EXPR
2134 && (sgn1 >= 0
2135 ? !is_positive_overflow_infinity (val2)
2136 : is_negative_overflow_infinity (val2)))
2137 /* We only get in here with positive shift count, so the
2138 overflow direction is the same as the sign of val1.
2139 Actually rshift does not overflow at all, but we only
2140 handle the case of shifting overflowed -INF and +INF. */
2141 || (code == RSHIFT_EXPR
2142 && sgn1 >= 0)
2143 /* For division, the only case is -INF / -1 = +INF. */
2144 || code == TRUNC_DIV_EXPR
2145 || code == FLOOR_DIV_EXPR
2146 || code == CEIL_DIV_EXPR
2147 || code == EXACT_DIV_EXPR
2148 || code == ROUND_DIV_EXPR)
2149 return (needs_overflow_infinity (TREE_TYPE (res))
2150 ? positive_overflow_infinity (TREE_TYPE (res))
2151 : TYPE_MAX_VALUE (TREE_TYPE (res)));
2152 else
2153 return (needs_overflow_infinity (TREE_TYPE (res))
2154 ? negative_overflow_infinity (TREE_TYPE (res))
2155 : TYPE_MIN_VALUE (TREE_TYPE (res)));
2156 }
2157
2158 return res;
2159 }
2160
2161
2162 /* For range VR compute two double_int bitmasks. In *MAY_BE_NONZERO
2163 bitmask if some bit is unset, it means for all numbers in the range
2164 the bit is 0, otherwise it might be 0 or 1. In *MUST_BE_NONZERO
2165 bitmask if some bit is set, it means for all numbers in the range
2166 the bit is 1, otherwise it might be 0 or 1. */
2167
2168 static bool
2169 zero_nonzero_bits_from_vr (value_range_t *vr,
2170 double_int *may_be_nonzero,
2171 double_int *must_be_nonzero)
2172 {
2173 *may_be_nonzero = double_int_minus_one;
2174 *must_be_nonzero = double_int_zero;
2175 if (!range_int_cst_p (vr))
2176 return false;
2177
2178 if (range_int_cst_singleton_p (vr))
2179 {
2180 *may_be_nonzero = tree_to_double_int (vr->min);
2181 *must_be_nonzero = *may_be_nonzero;
2182 }
2183 else if (tree_int_cst_sgn (vr->min) >= 0
2184 || tree_int_cst_sgn (vr->max) < 0)
2185 {
2186 double_int dmin = tree_to_double_int (vr->min);
2187 double_int dmax = tree_to_double_int (vr->max);
2188 double_int xor_mask = double_int_xor (dmin, dmax);
2189 *may_be_nonzero = double_int_ior (dmin, dmax);
2190 *must_be_nonzero = double_int_and (dmin, dmax);
2191 if (xor_mask.high != 0)
2192 {
2193 unsigned HOST_WIDE_INT mask
2194 = ((unsigned HOST_WIDE_INT) 1
2195 << floor_log2 (xor_mask.high)) - 1;
2196 may_be_nonzero->low = ALL_ONES;
2197 may_be_nonzero->high |= mask;
2198 must_be_nonzero->low = 0;
2199 must_be_nonzero->high &= ~mask;
2200 }
2201 else if (xor_mask.low != 0)
2202 {
2203 unsigned HOST_WIDE_INT mask
2204 = ((unsigned HOST_WIDE_INT) 1
2205 << floor_log2 (xor_mask.low)) - 1;
2206 may_be_nonzero->low |= mask;
2207 must_be_nonzero->low &= ~mask;
2208 }
2209 }
2210
2211 return true;
2212 }
2213
2214 /* Helper to extract a value-range *VR for a multiplicative operation
2215 *VR0 CODE *VR1. */
2216
2217 static void
2218 extract_range_from_multiplicative_op_1 (value_range_t *vr,
2219 enum tree_code code,
2220 value_range_t *vr0, value_range_t *vr1)
2221 {
2222 enum value_range_type type;
2223 tree val[4];
2224 size_t i;
2225 tree min, max;
2226 bool sop;
2227 int cmp;
2228
2229 /* Multiplications, divisions and shifts are a bit tricky to handle,
2230 depending on the mix of signs we have in the two ranges, we
2231 need to operate on different values to get the minimum and
2232 maximum values for the new range. One approach is to figure
2233 out all the variations of range combinations and do the
2234 operations.
2235
2236 However, this involves several calls to compare_values and it
2237 is pretty convoluted. It's simpler to do the 4 operations
2238 (MIN0 OP MIN1, MIN0 OP MAX1, MAX0 OP MIN1 and MAX0 OP MAX0 OP
2239 MAX1) and then figure the smallest and largest values to form
2240 the new range. */
2241 gcc_assert (code == MULT_EXPR
2242 || code == TRUNC_DIV_EXPR
2243 || code == FLOOR_DIV_EXPR
2244 || code == CEIL_DIV_EXPR
2245 || code == EXACT_DIV_EXPR
2246 || code == ROUND_DIV_EXPR
2247 || code == RSHIFT_EXPR);
2248 gcc_assert ((vr0->type == VR_RANGE
2249 || (code == MULT_EXPR && vr0->type == VR_ANTI_RANGE))
2250 && vr0->type == vr1->type);
2251
2252 type = vr0->type;
2253
2254 /* Compute the 4 cross operations. */
2255 sop = false;
2256 val[0] = vrp_int_const_binop (code, vr0->min, vr1->min);
2257 if (val[0] == NULL_TREE)
2258 sop = true;
2259
2260 if (vr1->max == vr1->min)
2261 val[1] = NULL_TREE;
2262 else
2263 {
2264 val[1] = vrp_int_const_binop (code, vr0->min, vr1->max);
2265 if (val[1] == NULL_TREE)
2266 sop = true;
2267 }
2268
2269 if (vr0->max == vr0->min)
2270 val[2] = NULL_TREE;
2271 else
2272 {
2273 val[2] = vrp_int_const_binop (code, vr0->max, vr1->min);
2274 if (val[2] == NULL_TREE)
2275 sop = true;
2276 }
2277
2278 if (vr0->min == vr0->max || vr1->min == vr1->max)
2279 val[3] = NULL_TREE;
2280 else
2281 {
2282 val[3] = vrp_int_const_binop (code, vr0->max, vr1->max);
2283 if (val[3] == NULL_TREE)
2284 sop = true;
2285 }
2286
2287 if (sop)
2288 {
2289 set_value_range_to_varying (vr);
2290 return;
2291 }
2292
2293 /* Set MIN to the minimum of VAL[i] and MAX to the maximum
2294 of VAL[i]. */
2295 min = val[0];
2296 max = val[0];
2297 for (i = 1; i < 4; i++)
2298 {
2299 if (!is_gimple_min_invariant (min)
2300 || (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
2301 || !is_gimple_min_invariant (max)
2302 || (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
2303 break;
2304
2305 if (val[i])
2306 {
2307 if (!is_gimple_min_invariant (val[i])
2308 || (TREE_OVERFLOW (val[i])
2309 && !is_overflow_infinity (val[i])))
2310 {
2311 /* If we found an overflowed value, set MIN and MAX
2312 to it so that we set the resulting range to
2313 VARYING. */
2314 min = max = val[i];
2315 break;
2316 }
2317
2318 if (compare_values (val[i], min) == -1)
2319 min = val[i];
2320
2321 if (compare_values (val[i], max) == 1)
2322 max = val[i];
2323 }
2324 }
2325
2326 /* If either MIN or MAX overflowed, then set the resulting range to
2327 VARYING. But we do accept an overflow infinity
2328 representation. */
2329 if (min == NULL_TREE
2330 || !is_gimple_min_invariant (min)
2331 || (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
2332 || max == NULL_TREE
2333 || !is_gimple_min_invariant (max)
2334 || (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
2335 {
2336 set_value_range_to_varying (vr);
2337 return;
2338 }
2339
2340 /* We punt if:
2341 1) [-INF, +INF]
2342 2) [-INF, +-INF(OVF)]
2343 3) [+-INF(OVF), +INF]
2344 4) [+-INF(OVF), +-INF(OVF)]
2345 We learn nothing when we have INF and INF(OVF) on both sides.
2346 Note that we do accept [-INF, -INF] and [+INF, +INF] without
2347 overflow. */
2348 if ((vrp_val_is_min (min) || is_overflow_infinity (min))
2349 && (vrp_val_is_max (max) || is_overflow_infinity (max)))
2350 {
2351 set_value_range_to_varying (vr);
2352 return;
2353 }
2354
2355 cmp = compare_values (min, max);
2356 if (cmp == -2 || cmp == 1)
2357 {
2358 /* If the new range has its limits swapped around (MIN > MAX),
2359 then the operation caused one of them to wrap around, mark
2360 the new range VARYING. */
2361 set_value_range_to_varying (vr);
2362 }
2363 else
2364 set_value_range (vr, type, min, max, NULL);
2365 }
2366
2367 /* Extract range information from a binary operation CODE based on
2368 the ranges of each of its operands, *VR0 and *VR1 with resulting
2369 type EXPR_TYPE. The resulting range is stored in *VR. */
2370
2371 static void
2372 extract_range_from_binary_expr_1 (value_range_t *vr,
2373 enum tree_code code, tree expr_type,
2374 value_range_t *vr0_, value_range_t *vr1_)
2375 {
2376 value_range_t vr0 = *vr0_, vr1 = *vr1_;
2377 enum value_range_type type;
2378 tree min = NULL_TREE, max = NULL_TREE;
2379 int cmp;
2380
2381 if (!INTEGRAL_TYPE_P (expr_type)
2382 && !POINTER_TYPE_P (expr_type))
2383 {
2384 set_value_range_to_varying (vr);
2385 return;
2386 }
2387
2388 /* Not all binary expressions can be applied to ranges in a
2389 meaningful way. Handle only arithmetic operations. */
2390 if (code != PLUS_EXPR
2391 && code != MINUS_EXPR
2392 && code != POINTER_PLUS_EXPR
2393 && code != MULT_EXPR
2394 && code != TRUNC_DIV_EXPR
2395 && code != FLOOR_DIV_EXPR
2396 && code != CEIL_DIV_EXPR
2397 && code != EXACT_DIV_EXPR
2398 && code != ROUND_DIV_EXPR
2399 && code != TRUNC_MOD_EXPR
2400 && code != RSHIFT_EXPR
2401 && code != MIN_EXPR
2402 && code != MAX_EXPR
2403 && code != BIT_AND_EXPR
2404 && code != BIT_IOR_EXPR
2405 && code != BIT_XOR_EXPR)
2406 {
2407 set_value_range_to_varying (vr);
2408 return;
2409 }
2410
2411 /* If both ranges are UNDEFINED, so is the result. */
2412 if (vr0.type == VR_UNDEFINED && vr1.type == VR_UNDEFINED)
2413 {
2414 set_value_range_to_undefined (vr);
2415 return;
2416 }
2417 /* If one of the ranges is UNDEFINED drop it to VARYING for the following
2418 code. At some point we may want to special-case operations that
2419 have UNDEFINED result for all or some value-ranges of the not UNDEFINED
2420 operand. */
2421 else if (vr0.type == VR_UNDEFINED)
2422 set_value_range_to_varying (&vr0);
2423 else if (vr1.type == VR_UNDEFINED)
2424 set_value_range_to_varying (&vr1);
2425
2426 /* The type of the resulting value range defaults to VR0.TYPE. */
2427 type = vr0.type;
2428
2429 /* Refuse to operate on VARYING ranges, ranges of different kinds
2430 and symbolic ranges. As an exception, we allow BIT_AND_EXPR
2431 because we may be able to derive a useful range even if one of
2432 the operands is VR_VARYING or symbolic range. Similarly for
2433 divisions. TODO, we may be able to derive anti-ranges in
2434 some cases. */
2435 if (code != BIT_AND_EXPR
2436 && code != BIT_IOR_EXPR
2437 && code != TRUNC_DIV_EXPR
2438 && code != FLOOR_DIV_EXPR
2439 && code != CEIL_DIV_EXPR
2440 && code != EXACT_DIV_EXPR
2441 && code != ROUND_DIV_EXPR
2442 && code != TRUNC_MOD_EXPR
2443 && (vr0.type == VR_VARYING
2444 || vr1.type == VR_VARYING
2445 || vr0.type != vr1.type
2446 || symbolic_range_p (&vr0)
2447 || symbolic_range_p (&vr1)))
2448 {
2449 set_value_range_to_varying (vr);
2450 return;
2451 }
2452
2453 /* Now evaluate the expression to determine the new range. */
2454 if (POINTER_TYPE_P (expr_type))
2455 {
2456 if (code == MIN_EXPR || code == MAX_EXPR)
2457 {
2458 /* For MIN/MAX expressions with pointers, we only care about
2459 nullness, if both are non null, then the result is nonnull.
2460 If both are null, then the result is null. Otherwise they
2461 are varying. */
2462 if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1))
2463 set_value_range_to_nonnull (vr, expr_type);
2464 else if (range_is_null (&vr0) && range_is_null (&vr1))
2465 set_value_range_to_null (vr, expr_type);
2466 else
2467 set_value_range_to_varying (vr);
2468 }
2469 else if (code == POINTER_PLUS_EXPR)
2470 {
2471 /* For pointer types, we are really only interested in asserting
2472 whether the expression evaluates to non-NULL. */
2473 if (range_is_nonnull (&vr0) || range_is_nonnull (&vr1))
2474 set_value_range_to_nonnull (vr, expr_type);
2475 else if (range_is_null (&vr0) && range_is_null (&vr1))
2476 set_value_range_to_null (vr, expr_type);
2477 else
2478 set_value_range_to_varying (vr);
2479 }
2480 else if (code == BIT_AND_EXPR)
2481 {
2482 /* For pointer types, we are really only interested in asserting
2483 whether the expression evaluates to non-NULL. */
2484 if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1))
2485 set_value_range_to_nonnull (vr, expr_type);
2486 else if (range_is_null (&vr0) || range_is_null (&vr1))
2487 set_value_range_to_null (vr, expr_type);
2488 else
2489 set_value_range_to_varying (vr);
2490 }
2491 else
2492 set_value_range_to_varying (vr);
2493
2494 return;
2495 }
2496
2497 /* For integer ranges, apply the operation to each end of the
2498 range and see what we end up with. */
2499 if (code == PLUS_EXPR)
2500 {
2501 /* If we have a PLUS_EXPR with two VR_ANTI_RANGEs, drop to
2502 VR_VARYING. It would take more effort to compute a precise
2503 range for such a case. For example, if we have op0 == 1 and
2504 op1 == -1 with their ranges both being ~[0,0], we would have
2505 op0 + op1 == 0, so we cannot claim that the sum is in ~[0,0].
2506 Note that we are guaranteed to have vr0.type == vr1.type at
2507 this point. */
2508 if (vr0.type == VR_ANTI_RANGE)
2509 {
2510 set_value_range_to_varying (vr);
2511 return;
2512 }
2513
2514 /* For operations that make the resulting range directly
2515 proportional to the original ranges, apply the operation to
2516 the same end of each range. */
2517 min = vrp_int_const_binop (code, vr0.min, vr1.min);
2518 max = vrp_int_const_binop (code, vr0.max, vr1.max);
2519
2520 /* If both additions overflowed the range kind is still correct.
2521 This happens regularly with subtracting something in unsigned
2522 arithmetic.
2523 ??? See PR30318 for all the cases we do not handle. */
2524 if ((TREE_OVERFLOW (min) && !is_overflow_infinity (min))
2525 && (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
2526 {
2527 min = build_int_cst_wide (TREE_TYPE (min),
2528 TREE_INT_CST_LOW (min),
2529 TREE_INT_CST_HIGH (min));
2530 max = build_int_cst_wide (TREE_TYPE (max),
2531 TREE_INT_CST_LOW (max),
2532 TREE_INT_CST_HIGH (max));
2533 }
2534 }
2535 else if (code == MIN_EXPR
2536 || code == MAX_EXPR)
2537 {
2538 if (vr0.type == VR_ANTI_RANGE)
2539 {
2540 /* For MIN_EXPR and MAX_EXPR with two VR_ANTI_RANGEs,
2541 the resulting VR_ANTI_RANGE is the same - intersection
2542 of the two ranges. */
2543 min = vrp_int_const_binop (MAX_EXPR, vr0.min, vr1.min);
2544 max = vrp_int_const_binop (MIN_EXPR, vr0.max, vr1.max);
2545 }
2546 else
2547 {
2548 /* For operations that make the resulting range directly
2549 proportional to the original ranges, apply the operation to
2550 the same end of each range. */
2551 min = vrp_int_const_binop (code, vr0.min, vr1.min);
2552 max = vrp_int_const_binop (code, vr0.max, vr1.max);
2553 }
2554 }
2555 else if (code == MULT_EXPR)
2556 {
2557 /* If we have an unsigned MULT_EXPR with two VR_ANTI_RANGEs,
2558 drop to VR_VARYING. It would take more effort to compute a
2559 precise range for such a case. For example, if we have
2560 op0 == 65536 and op1 == 65536 with their ranges both being
2561 ~[0,0] on a 32-bit machine, we would have op0 * op1 == 0, so
2562 we cannot claim that the product is in ~[0,0]. Note that we
2563 are guaranteed to have vr0.type == vr1.type at this
2564 point. */
2565 if (vr0.type == VR_ANTI_RANGE
2566 && !TYPE_OVERFLOW_UNDEFINED (expr_type))
2567 {
2568 set_value_range_to_varying (vr);
2569 return;
2570 }
2571
2572 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2573 return;
2574 }
2575 else if (code == RSHIFT_EXPR)
2576 {
2577 /* If we have a RSHIFT_EXPR with any shift values outside [0..prec-1],
2578 then drop to VR_VARYING. Outside of this range we get undefined
2579 behavior from the shift operation. We cannot even trust
2580 SHIFT_COUNT_TRUNCATED at this stage, because that applies to rtl
2581 shifts, and the operation at the tree level may be widened. */
2582 if (vr1.type != VR_RANGE
2583 || !value_range_nonnegative_p (&vr1)
2584 || TREE_CODE (vr1.max) != INTEGER_CST
2585 || compare_tree_int (vr1.max, TYPE_PRECISION (expr_type) - 1) == 1)
2586 {
2587 set_value_range_to_varying (vr);
2588 return;
2589 }
2590
2591 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2592 return;
2593 }
2594 else if (code == TRUNC_DIV_EXPR
2595 || code == FLOOR_DIV_EXPR
2596 || code == CEIL_DIV_EXPR
2597 || code == EXACT_DIV_EXPR
2598 || code == ROUND_DIV_EXPR)
2599 {
2600 if (vr0.type != VR_RANGE || symbolic_range_p (&vr0))
2601 {
2602 /* For division, if op1 has VR_RANGE but op0 does not, something
2603 can be deduced just from that range. Say [min, max] / [4, max]
2604 gives [min / 4, max / 4] range. */
2605 if (vr1.type == VR_RANGE
2606 && !symbolic_range_p (&vr1)
2607 && !range_includes_zero_p (&vr1))
2608 {
2609 vr0.type = type = VR_RANGE;
2610 vr0.min = vrp_val_min (expr_type);
2611 vr0.max = vrp_val_max (expr_type);
2612 }
2613 else
2614 {
2615 set_value_range_to_varying (vr);
2616 return;
2617 }
2618 }
2619
2620 /* For divisions, if flag_non_call_exceptions is true, we must
2621 not eliminate a division by zero. */
2622 if (cfun->can_throw_non_call_exceptions
2623 && (vr1.type != VR_RANGE
2624 || symbolic_range_p (&vr1)
2625 || range_includes_zero_p (&vr1)))
2626 {
2627 set_value_range_to_varying (vr);
2628 return;
2629 }
2630
2631 /* For divisions, if op0 is VR_RANGE, we can deduce a range
2632 even if op1 is VR_VARYING, VR_ANTI_RANGE, symbolic or can
2633 include 0. */
2634 if (vr0.type == VR_RANGE
2635 && (vr1.type != VR_RANGE
2636 || symbolic_range_p (&vr1)
2637 || range_includes_zero_p (&vr1)))
2638 {
2639 tree zero = build_int_cst (TREE_TYPE (vr0.min), 0);
2640 int cmp;
2641
2642 min = NULL_TREE;
2643 max = NULL_TREE;
2644 if (TYPE_UNSIGNED (expr_type)
2645 || value_range_nonnegative_p (&vr1))
2646 {
2647 /* For unsigned division or when divisor is known
2648 to be non-negative, the range has to cover
2649 all numbers from 0 to max for positive max
2650 and all numbers from min to 0 for negative min. */
2651 cmp = compare_values (vr0.max, zero);
2652 if (cmp == -1)
2653 max = zero;
2654 else if (cmp == 0 || cmp == 1)
2655 max = vr0.max;
2656 else
2657 type = VR_VARYING;
2658 cmp = compare_values (vr0.min, zero);
2659 if (cmp == 1)
2660 min = zero;
2661 else if (cmp == 0 || cmp == -1)
2662 min = vr0.min;
2663 else
2664 type = VR_VARYING;
2665 }
2666 else
2667 {
2668 /* Otherwise the range is -max .. max or min .. -min
2669 depending on which bound is bigger in absolute value,
2670 as the division can change the sign. */
2671 abs_extent_range (vr, vr0.min, vr0.max);
2672 return;
2673 }
2674 if (type == VR_VARYING)
2675 {
2676 set_value_range_to_varying (vr);
2677 return;
2678 }
2679 }
2680 else
2681 {
2682 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2683 return;
2684 }
2685 }
2686 else if (code == TRUNC_MOD_EXPR)
2687 {
2688 if (vr1.type != VR_RANGE
2689 || symbolic_range_p (&vr1)
2690 || range_includes_zero_p (&vr1)
2691 || vrp_val_is_min (vr1.min))
2692 {
2693 set_value_range_to_varying (vr);
2694 return;
2695 }
2696 type = VR_RANGE;
2697 /* Compute MAX <|vr1.min|, |vr1.max|> - 1. */
2698 max = fold_unary_to_constant (ABS_EXPR, expr_type, vr1.min);
2699 if (tree_int_cst_lt (max, vr1.max))
2700 max = vr1.max;
2701 max = int_const_binop (MINUS_EXPR, max, integer_one_node);
2702 /* If the dividend is non-negative the modulus will be
2703 non-negative as well. */
2704 if (TYPE_UNSIGNED (expr_type)
2705 || value_range_nonnegative_p (&vr0))
2706 min = build_int_cst (TREE_TYPE (max), 0);
2707 else
2708 min = fold_unary_to_constant (NEGATE_EXPR, expr_type, max);
2709 }
2710 else if (code == MINUS_EXPR)
2711 {
2712 /* If we have a MINUS_EXPR with two VR_ANTI_RANGEs, drop to
2713 VR_VARYING. It would take more effort to compute a precise
2714 range for such a case. For example, if we have op0 == 1 and
2715 op1 == 1 with their ranges both being ~[0,0], we would have
2716 op0 - op1 == 0, so we cannot claim that the difference is in
2717 ~[0,0]. Note that we are guaranteed to have
2718 vr0.type == vr1.type at this point. */
2719 if (vr0.type == VR_ANTI_RANGE)
2720 {
2721 set_value_range_to_varying (vr);
2722 return;
2723 }
2724
2725 /* For MINUS_EXPR, apply the operation to the opposite ends of
2726 each range. */
2727 min = vrp_int_const_binop (code, vr0.min, vr1.max);
2728 max = vrp_int_const_binop (code, vr0.max, vr1.min);
2729 }
2730 else if (code == BIT_AND_EXPR || code == BIT_IOR_EXPR || code == BIT_XOR_EXPR)
2731 {
2732 bool int_cst_range0, int_cst_range1;
2733 double_int may_be_nonzero0, may_be_nonzero1;
2734 double_int must_be_nonzero0, must_be_nonzero1;
2735
2736 int_cst_range0 = zero_nonzero_bits_from_vr (&vr0, &may_be_nonzero0,
2737 &must_be_nonzero0);
2738 int_cst_range1 = zero_nonzero_bits_from_vr (&vr1, &may_be_nonzero1,
2739 &must_be_nonzero1);
2740
2741 type = VR_RANGE;
2742 if (code == BIT_AND_EXPR)
2743 {
2744 double_int dmax;
2745 min = double_int_to_tree (expr_type,
2746 double_int_and (must_be_nonzero0,
2747 must_be_nonzero1));
2748 dmax = double_int_and (may_be_nonzero0, may_be_nonzero1);
2749 /* If both input ranges contain only negative values we can
2750 truncate the result range maximum to the minimum of the
2751 input range maxima. */
2752 if (int_cst_range0 && int_cst_range1
2753 && tree_int_cst_sgn (vr0.max) < 0
2754 && tree_int_cst_sgn (vr1.max) < 0)
2755 {
2756 dmax = double_int_min (dmax, tree_to_double_int (vr0.max),
2757 TYPE_UNSIGNED (expr_type));
2758 dmax = double_int_min (dmax, tree_to_double_int (vr1.max),
2759 TYPE_UNSIGNED (expr_type));
2760 }
2761 /* If either input range contains only non-negative values
2762 we can truncate the result range maximum to the respective
2763 maximum of the input range. */
2764 if (int_cst_range0 && tree_int_cst_sgn (vr0.min) >= 0)
2765 dmax = double_int_min (dmax, tree_to_double_int (vr0.max),
2766 TYPE_UNSIGNED (expr_type));
2767 if (int_cst_range1 && tree_int_cst_sgn (vr1.min) >= 0)
2768 dmax = double_int_min (dmax, tree_to_double_int (vr1.max),
2769 TYPE_UNSIGNED (expr_type));
2770 max = double_int_to_tree (expr_type, dmax);
2771 }
2772 else if (code == BIT_IOR_EXPR)
2773 {
2774 double_int dmin;
2775 max = double_int_to_tree (expr_type,
2776 double_int_ior (may_be_nonzero0,
2777 may_be_nonzero1));
2778 dmin = double_int_ior (must_be_nonzero0, must_be_nonzero1);
2779 /* If the input ranges contain only positive values we can
2780 truncate the minimum of the result range to the maximum
2781 of the input range minima. */
2782 if (int_cst_range0 && int_cst_range1
2783 && tree_int_cst_sgn (vr0.min) >= 0
2784 && tree_int_cst_sgn (vr1.min) >= 0)
2785 {
2786 dmin = double_int_max (dmin, tree_to_double_int (vr0.min),
2787 TYPE_UNSIGNED (expr_type));
2788 dmin = double_int_max (dmin, tree_to_double_int (vr1.min),
2789 TYPE_UNSIGNED (expr_type));
2790 }
2791 /* If either input range contains only negative values
2792 we can truncate the minimum of the result range to the
2793 respective minimum range. */
2794 if (int_cst_range0 && tree_int_cst_sgn (vr0.max) < 0)
2795 dmin = double_int_max (dmin, tree_to_double_int (vr0.min),
2796 TYPE_UNSIGNED (expr_type));
2797 if (int_cst_range1 && tree_int_cst_sgn (vr1.max) < 0)
2798 dmin = double_int_max (dmin, tree_to_double_int (vr1.min),
2799 TYPE_UNSIGNED (expr_type));
2800 min = double_int_to_tree (expr_type, dmin);
2801 }
2802 else if (code == BIT_XOR_EXPR)
2803 {
2804 double_int result_zero_bits, result_one_bits;
2805 result_zero_bits
2806 = double_int_ior (double_int_and (must_be_nonzero0,
2807 must_be_nonzero1),
2808 double_int_not
2809 (double_int_ior (may_be_nonzero0,
2810 may_be_nonzero1)));
2811 result_one_bits
2812 = double_int_ior (double_int_and
2813 (must_be_nonzero0,
2814 double_int_not (may_be_nonzero1)),
2815 double_int_and
2816 (must_be_nonzero1,
2817 double_int_not (may_be_nonzero0)));
2818 max = double_int_to_tree (expr_type,
2819 double_int_not (result_zero_bits));
2820 min = double_int_to_tree (expr_type, result_one_bits);
2821 /* If the range has all positive or all negative values the
2822 result is better than VARYING. */
2823 if (tree_int_cst_sgn (min) < 0
2824 || tree_int_cst_sgn (max) >= 0)
2825 ;
2826 else
2827 max = min = NULL_TREE;
2828 }
2829 }
2830 else
2831 gcc_unreachable ();
2832
2833 /* If either MIN or MAX overflowed, then set the resulting range to
2834 VARYING. But we do accept an overflow infinity
2835 representation. */
2836 if (min == NULL_TREE
2837 || !is_gimple_min_invariant (min)
2838 || (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
2839 || max == NULL_TREE
2840 || !is_gimple_min_invariant (max)
2841 || (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
2842 {
2843 set_value_range_to_varying (vr);
2844 return;
2845 }
2846
2847 /* We punt if:
2848 1) [-INF, +INF]
2849 2) [-INF, +-INF(OVF)]
2850 3) [+-INF(OVF), +INF]
2851 4) [+-INF(OVF), +-INF(OVF)]
2852 We learn nothing when we have INF and INF(OVF) on both sides.
2853 Note that we do accept [-INF, -INF] and [+INF, +INF] without
2854 overflow. */
2855 if ((vrp_val_is_min (min) || is_overflow_infinity (min))
2856 && (vrp_val_is_max (max) || is_overflow_infinity (max)))
2857 {
2858 set_value_range_to_varying (vr);
2859 return;
2860 }
2861
2862 cmp = compare_values (min, max);
2863 if (cmp == -2 || cmp == 1)
2864 {
2865 /* If the new range has its limits swapped around (MIN > MAX),
2866 then the operation caused one of them to wrap around, mark
2867 the new range VARYING. */
2868 set_value_range_to_varying (vr);
2869 }
2870 else
2871 set_value_range (vr, type, min, max, NULL);
2872 }
2873
2874 /* Extract range information from a binary expression OP0 CODE OP1 based on
2875 the ranges of each of its operands with resulting type EXPR_TYPE.
2876 The resulting range is stored in *VR. */
2877
2878 static void
2879 extract_range_from_binary_expr (value_range_t *vr,
2880 enum tree_code code,
2881 tree expr_type, tree op0, tree op1)
2882 {
2883 value_range_t vr0 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
2884 value_range_t vr1 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
2885
2886 /* Get value ranges for each operand. For constant operands, create
2887 a new value range with the operand to simplify processing. */
2888 if (TREE_CODE (op0) == SSA_NAME)
2889 vr0 = *(get_value_range (op0));
2890 else if (is_gimple_min_invariant (op0))
2891 set_value_range_to_value (&vr0, op0, NULL);
2892 else
2893 set_value_range_to_varying (&vr0);
2894
2895 if (TREE_CODE (op1) == SSA_NAME)
2896 vr1 = *(get_value_range (op1));
2897 else if (is_gimple_min_invariant (op1))
2898 set_value_range_to_value (&vr1, op1, NULL);
2899 else
2900 set_value_range_to_varying (&vr1);
2901
2902 extract_range_from_binary_expr_1 (vr, code, expr_type, &vr0, &vr1);
2903 }
2904
2905 /* Extract range information from a unary operation CODE based on
2906 the range of its operand *VR0 with type OP0_TYPE with resulting type TYPE.
2907 The The resulting range is stored in *VR. */
2908
2909 static void
2910 extract_range_from_unary_expr_1 (value_range_t *vr,
2911 enum tree_code code, tree type,
2912 value_range_t *vr0_, tree op0_type)
2913 {
2914 value_range_t vr0 = *vr0_;
2915
2916 /* VRP only operates on integral and pointer types. */
2917 if (!(INTEGRAL_TYPE_P (op0_type)
2918 || POINTER_TYPE_P (op0_type))
2919 || !(INTEGRAL_TYPE_P (type)
2920 || POINTER_TYPE_P (type)))
2921 {
2922 set_value_range_to_varying (vr);
2923 return;
2924 }
2925
2926 /* If VR0 is UNDEFINED, so is the result. */
2927 if (vr0.type == VR_UNDEFINED)
2928 {
2929 set_value_range_to_undefined (vr);
2930 return;
2931 }
2932
2933 if (CONVERT_EXPR_CODE_P (code))
2934 {
2935 tree inner_type = op0_type;
2936 tree outer_type = type;
2937
2938 /* If the expression evaluates to a pointer, we are only interested in
2939 determining if it evaluates to NULL [0, 0] or non-NULL (~[0, 0]). */
2940 if (POINTER_TYPE_P (type))
2941 {
2942 if (range_is_nonnull (&vr0))
2943 set_value_range_to_nonnull (vr, type);
2944 else if (range_is_null (&vr0))
2945 set_value_range_to_null (vr, type);
2946 else
2947 set_value_range_to_varying (vr);
2948 return;
2949 }
2950
2951 /* If VR0 is varying and we increase the type precision, assume
2952 a full range for the following transformation. */
2953 if (vr0.type == VR_VARYING
2954 && INTEGRAL_TYPE_P (inner_type)
2955 && TYPE_PRECISION (inner_type) < TYPE_PRECISION (outer_type))
2956 {
2957 vr0.type = VR_RANGE;
2958 vr0.min = TYPE_MIN_VALUE (inner_type);
2959 vr0.max = TYPE_MAX_VALUE (inner_type);
2960 }
2961
2962 /* If VR0 is a constant range or anti-range and the conversion is
2963 not truncating we can convert the min and max values and
2964 canonicalize the resulting range. Otherwise we can do the
2965 conversion if the size of the range is less than what the
2966 precision of the target type can represent and the range is
2967 not an anti-range. */
2968 if ((vr0.type == VR_RANGE
2969 || vr0.type == VR_ANTI_RANGE)
2970 && TREE_CODE (vr0.min) == INTEGER_CST
2971 && TREE_CODE (vr0.max) == INTEGER_CST
2972 && (!is_overflow_infinity (vr0.min)
2973 || (vr0.type == VR_RANGE
2974 && TYPE_PRECISION (outer_type) > TYPE_PRECISION (inner_type)
2975 && needs_overflow_infinity (outer_type)
2976 && supports_overflow_infinity (outer_type)))
2977 && (!is_overflow_infinity (vr0.max)
2978 || (vr0.type == VR_RANGE
2979 && TYPE_PRECISION (outer_type) > TYPE_PRECISION (inner_type)
2980 && needs_overflow_infinity (outer_type)
2981 && supports_overflow_infinity (outer_type)))
2982 && (TYPE_PRECISION (outer_type) >= TYPE_PRECISION (inner_type)
2983 || (vr0.type == VR_RANGE
2984 && integer_zerop (int_const_binop (RSHIFT_EXPR,
2985 int_const_binop (MINUS_EXPR, vr0.max, vr0.min),
2986 size_int (TYPE_PRECISION (outer_type)))))))
2987 {
2988 tree new_min, new_max;
2989 if (is_overflow_infinity (vr0.min))
2990 new_min = negative_overflow_infinity (outer_type);
2991 else
2992 new_min = force_fit_type_double (outer_type,
2993 tree_to_double_int (vr0.min),
2994 0, false);
2995 if (is_overflow_infinity (vr0.max))
2996 new_max = positive_overflow_infinity (outer_type);
2997 else
2998 new_max = force_fit_type_double (outer_type,
2999 tree_to_double_int (vr0.max),
3000 0, false);
3001 set_and_canonicalize_value_range (vr, vr0.type,
3002 new_min, new_max, NULL);
3003 return;
3004 }
3005
3006 set_value_range_to_varying (vr);
3007 return;
3008 }
3009 else if (code == NEGATE_EXPR)
3010 {
3011 /* -X is simply 0 - X, so re-use existing code that also handles
3012 anti-ranges fine. */
3013 value_range_t zero = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
3014 set_value_range_to_value (&zero, build_int_cst (type, 0), NULL);
3015 extract_range_from_binary_expr_1 (vr, MINUS_EXPR, type, &zero, &vr0);
3016 return;
3017 }
3018 else if (code == ABS_EXPR)
3019 {
3020 tree min, max;
3021 int cmp;
3022
3023 /* Pass through vr0 in the easy cases. */
3024 if (TYPE_UNSIGNED (type)
3025 || value_range_nonnegative_p (&vr0))
3026 {
3027 copy_value_range (vr, &vr0);
3028 return;
3029 }
3030
3031 /* For the remaining varying or symbolic ranges we can't do anything
3032 useful. */
3033 if (vr0.type == VR_VARYING
3034 || symbolic_range_p (&vr0))
3035 {
3036 set_value_range_to_varying (vr);
3037 return;
3038 }
3039
3040 /* -TYPE_MIN_VALUE = TYPE_MIN_VALUE with flag_wrapv so we can't get a
3041 useful range. */
3042 if (!TYPE_OVERFLOW_UNDEFINED (type)
3043 && ((vr0.type == VR_RANGE
3044 && vrp_val_is_min (vr0.min))
3045 || (vr0.type == VR_ANTI_RANGE
3046 && !vrp_val_is_min (vr0.min))))
3047 {
3048 set_value_range_to_varying (vr);
3049 return;
3050 }
3051
3052 /* ABS_EXPR may flip the range around, if the original range
3053 included negative values. */
3054 if (is_overflow_infinity (vr0.min))
3055 min = positive_overflow_infinity (type);
3056 else if (!vrp_val_is_min (vr0.min))
3057 min = fold_unary_to_constant (code, type, vr0.min);
3058 else if (!needs_overflow_infinity (type))
3059 min = TYPE_MAX_VALUE (type);
3060 else if (supports_overflow_infinity (type))
3061 min = positive_overflow_infinity (type);
3062 else
3063 {
3064 set_value_range_to_varying (vr);
3065 return;
3066 }
3067
3068 if (is_overflow_infinity (vr0.max))
3069 max = positive_overflow_infinity (type);
3070 else if (!vrp_val_is_min (vr0.max))
3071 max = fold_unary_to_constant (code, type, vr0.max);
3072 else if (!needs_overflow_infinity (type))
3073 max = TYPE_MAX_VALUE (type);
3074 else if (supports_overflow_infinity (type)
3075 /* We shouldn't generate [+INF, +INF] as set_value_range
3076 doesn't like this and ICEs. */
3077 && !is_positive_overflow_infinity (min))
3078 max = positive_overflow_infinity (type);
3079 else
3080 {
3081 set_value_range_to_varying (vr);
3082 return;
3083 }
3084
3085 cmp = compare_values (min, max);
3086
3087 /* If a VR_ANTI_RANGEs contains zero, then we have
3088 ~[-INF, min(MIN, MAX)]. */
3089 if (vr0.type == VR_ANTI_RANGE)
3090 {
3091 if (range_includes_zero_p (&vr0))
3092 {
3093 /* Take the lower of the two values. */
3094 if (cmp != 1)
3095 max = min;
3096
3097 /* Create ~[-INF, min (abs(MIN), abs(MAX))]
3098 or ~[-INF + 1, min (abs(MIN), abs(MAX))] when
3099 flag_wrapv is set and the original anti-range doesn't include
3100 TYPE_MIN_VALUE, remember -TYPE_MIN_VALUE = TYPE_MIN_VALUE. */
3101 if (TYPE_OVERFLOW_WRAPS (type))
3102 {
3103 tree type_min_value = TYPE_MIN_VALUE (type);
3104
3105 min = (vr0.min != type_min_value
3106 ? int_const_binop (PLUS_EXPR, type_min_value,
3107 integer_one_node)
3108 : type_min_value);
3109 }
3110 else
3111 {
3112 if (overflow_infinity_range_p (&vr0))
3113 min = negative_overflow_infinity (type);
3114 else
3115 min = TYPE_MIN_VALUE (type);
3116 }
3117 }
3118 else
3119 {
3120 /* All else has failed, so create the range [0, INF], even for
3121 flag_wrapv since TYPE_MIN_VALUE is in the original
3122 anti-range. */
3123 vr0.type = VR_RANGE;
3124 min = build_int_cst (type, 0);
3125 if (needs_overflow_infinity (type))
3126 {
3127 if (supports_overflow_infinity (type))
3128 max = positive_overflow_infinity (type);
3129 else
3130 {
3131 set_value_range_to_varying (vr);
3132 return;
3133 }
3134 }
3135 else
3136 max = TYPE_MAX_VALUE (type);
3137 }
3138 }
3139
3140 /* If the range contains zero then we know that the minimum value in the
3141 range will be zero. */
3142 else if (range_includes_zero_p (&vr0))
3143 {
3144 if (cmp == 1)
3145 max = min;
3146 min = build_int_cst (type, 0);
3147 }
3148 else
3149 {
3150 /* If the range was reversed, swap MIN and MAX. */
3151 if (cmp == 1)
3152 {
3153 tree t = min;
3154 min = max;
3155 max = t;
3156 }
3157 }
3158
3159 cmp = compare_values (min, max);
3160 if (cmp == -2 || cmp == 1)
3161 {
3162 /* If the new range has its limits swapped around (MIN > MAX),
3163 then the operation caused one of them to wrap around, mark
3164 the new range VARYING. */
3165 set_value_range_to_varying (vr);
3166 }
3167 else
3168 set_value_range (vr, vr0.type, min, max, NULL);
3169 return;
3170 }
3171 else if (code == BIT_NOT_EXPR)
3172 {
3173 /* ~X is simply -1 - X, so re-use existing code that also handles
3174 anti-ranges fine. */
3175 value_range_t minusone = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
3176 set_value_range_to_value (&minusone, build_int_cst (type, -1), NULL);
3177 extract_range_from_binary_expr_1 (vr, MINUS_EXPR,
3178 type, &minusone, &vr0);
3179 return;
3180 }
3181 else if (code == PAREN_EXPR)
3182 {
3183 copy_value_range (vr, &vr0);
3184 return;
3185 }
3186
3187 /* For unhandled operations fall back to varying. */
3188 set_value_range_to_varying (vr);
3189 return;
3190 }
3191
3192
3193 /* Extract range information from a unary expression CODE OP0 based on
3194 the range of its operand with resulting type TYPE.
3195 The resulting range is stored in *VR. */
3196
3197 static void
3198 extract_range_from_unary_expr (value_range_t *vr, enum tree_code code,
3199 tree type, tree op0)
3200 {
3201 value_range_t vr0 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
3202
3203 /* Get value ranges for the operand. For constant operands, create
3204 a new value range with the operand to simplify processing. */
3205 if (TREE_CODE (op0) == SSA_NAME)
3206 vr0 = *(get_value_range (op0));
3207 else if (is_gimple_min_invariant (op0))
3208 set_value_range_to_value (&vr0, op0, NULL);
3209 else
3210 set_value_range_to_varying (&vr0);
3211
3212 extract_range_from_unary_expr_1 (vr, code, type, &vr0, TREE_TYPE (op0));
3213 }
3214
3215
3216 /* Extract range information from a conditional expression STMT based on
3217 the ranges of each of its operands and the expression code. */
3218
3219 static void
3220 extract_range_from_cond_expr (value_range_t *vr, gimple stmt)
3221 {
3222 tree op0, op1;
3223 value_range_t vr0 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
3224 value_range_t vr1 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
3225
3226 /* Get value ranges for each operand. For constant operands, create
3227 a new value range with the operand to simplify processing. */
3228 op0 = gimple_assign_rhs2 (stmt);
3229 if (TREE_CODE (op0) == SSA_NAME)
3230 vr0 = *(get_value_range (op0));
3231 else if (is_gimple_min_invariant (op0))
3232 set_value_range_to_value (&vr0, op0, NULL);
3233 else
3234 set_value_range_to_varying (&vr0);
3235
3236 op1 = gimple_assign_rhs3 (stmt);
3237 if (TREE_CODE (op1) == SSA_NAME)
3238 vr1 = *(get_value_range (op1));
3239 else if (is_gimple_min_invariant (op1))
3240 set_value_range_to_value (&vr1, op1, NULL);
3241 else
3242 set_value_range_to_varying (&vr1);
3243
3244 /* The resulting value range is the union of the operand ranges */
3245 vrp_meet (&vr0, &vr1);
3246 copy_value_range (vr, &vr0);
3247 }
3248
3249
3250 /* Extract range information from a comparison expression EXPR based
3251 on the range of its operand and the expression code. */
3252
3253 static void
3254 extract_range_from_comparison (value_range_t *vr, enum tree_code code,
3255 tree type, tree op0, tree op1)
3256 {
3257 bool sop = false;
3258 tree val;
3259
3260 val = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, false, &sop,
3261 NULL);
3262
3263 /* A disadvantage of using a special infinity as an overflow
3264 representation is that we lose the ability to record overflow
3265 when we don't have an infinity. So we have to ignore a result
3266 which relies on overflow. */
3267
3268 if (val && !is_overflow_infinity (val) && !sop)
3269 {
3270 /* Since this expression was found on the RHS of an assignment,
3271 its type may be different from _Bool. Convert VAL to EXPR's
3272 type. */
3273 val = fold_convert (type, val);
3274 if (is_gimple_min_invariant (val))
3275 set_value_range_to_value (vr, val, vr->equiv);
3276 else
3277 set_value_range (vr, VR_RANGE, val, val, vr->equiv);
3278 }
3279 else
3280 /* The result of a comparison is always true or false. */
3281 set_value_range_to_truthvalue (vr, type);
3282 }
3283
3284 /* Try to derive a nonnegative or nonzero range out of STMT relying
3285 primarily on generic routines in fold in conjunction with range data.
3286 Store the result in *VR */
3287
3288 static void
3289 extract_range_basic (value_range_t *vr, gimple stmt)
3290 {
3291 bool sop = false;
3292 tree type = gimple_expr_type (stmt);
3293
3294 if (INTEGRAL_TYPE_P (type)
3295 && gimple_stmt_nonnegative_warnv_p (stmt, &sop))
3296 set_value_range_to_nonnegative (vr, type,
3297 sop || stmt_overflow_infinity (stmt));
3298 else if (vrp_stmt_computes_nonzero (stmt, &sop)
3299 && !sop)
3300 set_value_range_to_nonnull (vr, type);
3301 else
3302 set_value_range_to_varying (vr);
3303 }
3304
3305
3306 /* Try to compute a useful range out of assignment STMT and store it
3307 in *VR. */
3308
3309 static void
3310 extract_range_from_assignment (value_range_t *vr, gimple stmt)
3311 {
3312 enum tree_code code = gimple_assign_rhs_code (stmt);
3313
3314 if (code == ASSERT_EXPR)
3315 extract_range_from_assert (vr, gimple_assign_rhs1 (stmt));
3316 else if (code == SSA_NAME)
3317 extract_range_from_ssa_name (vr, gimple_assign_rhs1 (stmt));
3318 else if (TREE_CODE_CLASS (code) == tcc_binary)
3319 extract_range_from_binary_expr (vr, gimple_assign_rhs_code (stmt),
3320 gimple_expr_type (stmt),
3321 gimple_assign_rhs1 (stmt),
3322 gimple_assign_rhs2 (stmt));
3323 else if (TREE_CODE_CLASS (code) == tcc_unary)
3324 extract_range_from_unary_expr (vr, gimple_assign_rhs_code (stmt),
3325 gimple_expr_type (stmt),
3326 gimple_assign_rhs1 (stmt));
3327 else if (code == COND_EXPR)
3328 extract_range_from_cond_expr (vr, stmt);
3329 else if (TREE_CODE_CLASS (code) == tcc_comparison)
3330 extract_range_from_comparison (vr, gimple_assign_rhs_code (stmt),
3331 gimple_expr_type (stmt),
3332 gimple_assign_rhs1 (stmt),
3333 gimple_assign_rhs2 (stmt));
3334 else if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS
3335 && is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
3336 set_value_range_to_value (vr, gimple_assign_rhs1 (stmt), NULL);
3337 else
3338 set_value_range_to_varying (vr);
3339
3340 if (vr->type == VR_VARYING)
3341 extract_range_basic (vr, stmt);
3342 }
3343
3344 /* Given a range VR, a LOOP and a variable VAR, determine whether it
3345 would be profitable to adjust VR using scalar evolution information
3346 for VAR. If so, update VR with the new limits. */
3347
3348 static void
3349 adjust_range_with_scev (value_range_t *vr, struct loop *loop,
3350 gimple stmt, tree var)
3351 {
3352 tree init, step, chrec, tmin, tmax, min, max, type, tem;
3353 enum ev_direction dir;
3354
3355 /* TODO. Don't adjust anti-ranges. An anti-range may provide
3356 better opportunities than a regular range, but I'm not sure. */
3357 if (vr->type == VR_ANTI_RANGE)
3358 return;
3359
3360 chrec = instantiate_parameters (loop, analyze_scalar_evolution (loop, var));
3361
3362 /* Like in PR19590, scev can return a constant function. */
3363 if (is_gimple_min_invariant (chrec))
3364 {
3365 set_value_range_to_value (vr, chrec, vr->equiv);
3366 return;
3367 }
3368
3369 if (TREE_CODE (chrec) != POLYNOMIAL_CHREC)
3370 return;
3371
3372 init = initial_condition_in_loop_num (chrec, loop->num);
3373 tem = op_with_constant_singleton_value_range (init);
3374 if (tem)
3375 init = tem;
3376 step = evolution_part_in_loop_num (chrec, loop->num);
3377 tem = op_with_constant_singleton_value_range (step);
3378 if (tem)
3379 step = tem;
3380
3381 /* If STEP is symbolic, we can't know whether INIT will be the
3382 minimum or maximum value in the range. Also, unless INIT is
3383 a simple expression, compare_values and possibly other functions
3384 in tree-vrp won't be able to handle it. */
3385 if (step == NULL_TREE
3386 || !is_gimple_min_invariant (step)
3387 || !valid_value_p (init))
3388 return;
3389
3390 dir = scev_direction (chrec);
3391 if (/* Do not adjust ranges if we do not know whether the iv increases
3392 or decreases, ... */
3393 dir == EV_DIR_UNKNOWN
3394 /* ... or if it may wrap. */
3395 || scev_probably_wraps_p (init, step, stmt, get_chrec_loop (chrec),
3396 true))
3397 return;
3398
3399 /* We use TYPE_MIN_VALUE and TYPE_MAX_VALUE here instead of
3400 negative_overflow_infinity and positive_overflow_infinity,
3401 because we have concluded that the loop probably does not
3402 wrap. */
3403
3404 type = TREE_TYPE (var);
3405 if (POINTER_TYPE_P (type) || !TYPE_MIN_VALUE (type))
3406 tmin = lower_bound_in_type (type, type);
3407 else
3408 tmin = TYPE_MIN_VALUE (type);
3409 if (POINTER_TYPE_P (type) || !TYPE_MAX_VALUE (type))
3410 tmax = upper_bound_in_type (type, type);
3411 else
3412 tmax = TYPE_MAX_VALUE (type);
3413
3414 /* Try to use estimated number of iterations for the loop to constrain the
3415 final value in the evolution. */
3416 if (TREE_CODE (step) == INTEGER_CST
3417 && is_gimple_val (init)
3418 && (TREE_CODE (init) != SSA_NAME
3419 || get_value_range (init)->type == VR_RANGE))
3420 {
3421 double_int nit;
3422
3423 if (estimated_loop_iterations (loop, true, &nit))
3424 {
3425 value_range_t maxvr = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
3426 double_int dtmp;
3427 bool unsigned_p = TYPE_UNSIGNED (TREE_TYPE (step));
3428 int overflow = 0;
3429
3430 dtmp = double_int_mul_with_sign (tree_to_double_int (step), nit,
3431 unsigned_p, &overflow);
3432 /* If the multiplication overflowed we can't do a meaningful
3433 adjustment. Likewise if the result doesn't fit in the type
3434 of the induction variable. For a signed type we have to
3435 check whether the result has the expected signedness which
3436 is that of the step as number of iterations is unsigned. */
3437 if (!overflow
3438 && double_int_fits_to_tree_p (TREE_TYPE (init), dtmp)
3439 && (unsigned_p
3440 || ((dtmp.high ^ TREE_INT_CST_HIGH (step)) >= 0)))
3441 {
3442 tem = double_int_to_tree (TREE_TYPE (init), dtmp);
3443 extract_range_from_binary_expr (&maxvr, PLUS_EXPR,
3444 TREE_TYPE (init), init, tem);
3445 /* Likewise if the addition did. */
3446 if (maxvr.type == VR_RANGE)
3447 {
3448 tmin = maxvr.min;
3449 tmax = maxvr.max;
3450 }
3451 }
3452 }
3453 }
3454
3455 if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED)
3456 {
3457 min = tmin;
3458 max = tmax;
3459
3460 /* For VARYING or UNDEFINED ranges, just about anything we get
3461 from scalar evolutions should be better. */
3462
3463 if (dir == EV_DIR_DECREASES)
3464 max = init;
3465 else
3466 min = init;
3467
3468 /* If we would create an invalid range, then just assume we
3469 know absolutely nothing. This may be over-conservative,
3470 but it's clearly safe, and should happen only in unreachable
3471 parts of code, or for invalid programs. */
3472 if (compare_values (min, max) == 1)
3473 return;
3474
3475 set_value_range (vr, VR_RANGE, min, max, vr->equiv);
3476 }
3477 else if (vr->type == VR_RANGE)
3478 {
3479 min = vr->min;
3480 max = vr->max;
3481
3482 if (dir == EV_DIR_DECREASES)
3483 {
3484 /* INIT is the maximum value. If INIT is lower than VR->MAX
3485 but no smaller than VR->MIN, set VR->MAX to INIT. */
3486 if (compare_values (init, max) == -1)
3487 max = init;
3488
3489 /* According to the loop information, the variable does not
3490 overflow. If we think it does, probably because of an
3491 overflow due to arithmetic on a different INF value,
3492 reset now. */
3493 if (is_negative_overflow_infinity (min)
3494 || compare_values (min, tmin) == -1)
3495 min = tmin;
3496
3497 }
3498 else
3499 {
3500 /* If INIT is bigger than VR->MIN, set VR->MIN to INIT. */
3501 if (compare_values (init, min) == 1)
3502 min = init;
3503
3504 if (is_positive_overflow_infinity (max)
3505 || compare_values (tmax, max) == -1)
3506 max = tmax;
3507 }
3508
3509 /* If we just created an invalid range with the minimum
3510 greater than the maximum, we fail conservatively.
3511 This should happen only in unreachable
3512 parts of code, or for invalid programs. */
3513 if (compare_values (min, max) == 1)
3514 return;
3515
3516 set_value_range (vr, VR_RANGE, min, max, vr->equiv);
3517 }
3518 }
3519
3520 /* Return true if VAR may overflow at STMT. This checks any available
3521 loop information to see if we can determine that VAR does not
3522 overflow. */
3523
3524 static bool
3525 vrp_var_may_overflow (tree var, gimple stmt)
3526 {
3527 struct loop *l;
3528 tree chrec, init, step;
3529
3530 if (current_loops == NULL)
3531 return true;
3532
3533 l = loop_containing_stmt (stmt);
3534 if (l == NULL
3535 || !loop_outer (l))
3536 return true;
3537
3538 chrec = instantiate_parameters (l, analyze_scalar_evolution (l, var));
3539 if (TREE_CODE (chrec) != POLYNOMIAL_CHREC)
3540 return true;
3541
3542 init = initial_condition_in_loop_num (chrec, l->num);
3543 step = evolution_part_in_loop_num (chrec, l->num);
3544
3545 if (step == NULL_TREE
3546 || !is_gimple_min_invariant (step)
3547 || !valid_value_p (init))
3548 return true;
3549
3550 /* If we get here, we know something useful about VAR based on the
3551 loop information. If it wraps, it may overflow. */
3552
3553 if (scev_probably_wraps_p (init, step, stmt, get_chrec_loop (chrec),
3554 true))
3555 return true;
3556
3557 if (dump_file && (dump_flags & TDF_DETAILS) != 0)
3558 {
3559 print_generic_expr (dump_file, var, 0);
3560 fprintf (dump_file, ": loop information indicates does not overflow\n");
3561 }
3562
3563 return false;
3564 }
3565
3566
3567 /* Given two numeric value ranges VR0, VR1 and a comparison code COMP:
3568
3569 - Return BOOLEAN_TRUE_NODE if VR0 COMP VR1 always returns true for
3570 all the values in the ranges.
3571
3572 - Return BOOLEAN_FALSE_NODE if the comparison always returns false.
3573
3574 - Return NULL_TREE if it is not always possible to determine the
3575 value of the comparison.
3576
3577 Also set *STRICT_OVERFLOW_P to indicate whether a range with an
3578 overflow infinity was used in the test. */
3579
3580
3581 static tree
3582 compare_ranges (enum tree_code comp, value_range_t *vr0, value_range_t *vr1,
3583 bool *strict_overflow_p)
3584 {
3585 /* VARYING or UNDEFINED ranges cannot be compared. */
3586 if (vr0->type == VR_VARYING
3587 || vr0->type == VR_UNDEFINED
3588 || vr1->type == VR_VARYING
3589 || vr1->type == VR_UNDEFINED)
3590 return NULL_TREE;
3591
3592 /* Anti-ranges need to be handled separately. */
3593 if (vr0->type == VR_ANTI_RANGE || vr1->type == VR_ANTI_RANGE)
3594 {
3595 /* If both are anti-ranges, then we cannot compute any
3596 comparison. */
3597 if (vr0->type == VR_ANTI_RANGE && vr1->type == VR_ANTI_RANGE)
3598 return NULL_TREE;
3599
3600 /* These comparisons are never statically computable. */
3601 if (comp == GT_EXPR
3602 || comp == GE_EXPR
3603 || comp == LT_EXPR
3604 || comp == LE_EXPR)
3605 return NULL_TREE;
3606
3607 /* Equality can be computed only between a range and an
3608 anti-range. ~[VAL1, VAL2] == [VAL1, VAL2] is always false. */
3609 if (vr0->type == VR_RANGE)
3610 {
3611 /* To simplify processing, make VR0 the anti-range. */
3612 value_range_t *tmp = vr0;
3613 vr0 = vr1;
3614 vr1 = tmp;
3615 }
3616
3617 gcc_assert (comp == NE_EXPR || comp == EQ_EXPR);
3618
3619 if (compare_values_warnv (vr0->min, vr1->min, strict_overflow_p) == 0
3620 && compare_values_warnv (vr0->max, vr1->max, strict_overflow_p) == 0)
3621 return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node;
3622
3623 return NULL_TREE;
3624 }
3625
3626 if (!usable_range_p (vr0, strict_overflow_p)
3627 || !usable_range_p (vr1, strict_overflow_p))
3628 return NULL_TREE;
3629
3630 /* Simplify processing. If COMP is GT_EXPR or GE_EXPR, switch the
3631 operands around and change the comparison code. */
3632 if (comp == GT_EXPR || comp == GE_EXPR)
3633 {
3634 value_range_t *tmp;
3635 comp = (comp == GT_EXPR) ? LT_EXPR : LE_EXPR;
3636 tmp = vr0;
3637 vr0 = vr1;
3638 vr1 = tmp;
3639 }
3640
3641 if (comp == EQ_EXPR)
3642 {
3643 /* Equality may only be computed if both ranges represent
3644 exactly one value. */
3645 if (compare_values_warnv (vr0->min, vr0->max, strict_overflow_p) == 0
3646 && compare_values_warnv (vr1->min, vr1->max, strict_overflow_p) == 0)
3647 {
3648 int cmp_min = compare_values_warnv (vr0->min, vr1->min,
3649 strict_overflow_p);
3650 int cmp_max = compare_values_warnv (vr0->max, vr1->max,
3651 strict_overflow_p);
3652 if (cmp_min == 0 && cmp_max == 0)
3653 return boolean_true_node;
3654 else if (cmp_min != -2 && cmp_max != -2)
3655 return boolean_false_node;
3656 }
3657 /* If [V0_MIN, V1_MAX] < [V1_MIN, V1_MAX] then V0 != V1. */
3658 else if (compare_values_warnv (vr0->min, vr1->max,
3659 strict_overflow_p) == 1
3660 || compare_values_warnv (vr1->min, vr0->max,
3661 strict_overflow_p) == 1)
3662 return boolean_false_node;
3663
3664 return NULL_TREE;
3665 }
3666 else if (comp == NE_EXPR)
3667 {
3668 int cmp1, cmp2;
3669
3670 /* If VR0 is completely to the left or completely to the right
3671 of VR1, they are always different. Notice that we need to
3672 make sure that both comparisons yield similar results to
3673 avoid comparing values that cannot be compared at
3674 compile-time. */
3675 cmp1 = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p);
3676 cmp2 = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p);
3677 if ((cmp1 == -1 && cmp2 == -1) || (cmp1 == 1 && cmp2 == 1))
3678 return boolean_true_node;
3679
3680 /* If VR0 and VR1 represent a single value and are identical,
3681 return false. */
3682 else if (compare_values_warnv (vr0->min, vr0->max,
3683 strict_overflow_p) == 0
3684 && compare_values_warnv (vr1->min, vr1->max,
3685 strict_overflow_p) == 0
3686 && compare_values_warnv (vr0->min, vr1->min,
3687 strict_overflow_p) == 0
3688 && compare_values_warnv (vr0->max, vr1->max,
3689 strict_overflow_p) == 0)
3690 return boolean_false_node;
3691
3692 /* Otherwise, they may or may not be different. */
3693 else
3694 return NULL_TREE;
3695 }
3696 else if (comp == LT_EXPR || comp == LE_EXPR)
3697 {
3698 int tst;
3699
3700 /* If VR0 is to the left of VR1, return true. */
3701 tst = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p);
3702 if ((comp == LT_EXPR && tst == -1)
3703 || (comp == LE_EXPR && (tst == -1 || tst == 0)))
3704 {
3705 if (overflow_infinity_range_p (vr0)
3706 || overflow_infinity_range_p (vr1))
3707 *strict_overflow_p = true;
3708 return boolean_true_node;
3709 }
3710
3711 /* If VR0 is to the right of VR1, return false. */
3712 tst = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p);
3713 if ((comp == LT_EXPR && (tst == 0 || tst == 1))
3714 || (comp == LE_EXPR && tst == 1))
3715 {
3716 if (overflow_infinity_range_p (vr0)
3717 || overflow_infinity_range_p (vr1))
3718 *strict_overflow_p = true;
3719 return boolean_false_node;
3720 }
3721
3722 /* Otherwise, we don't know. */
3723 return NULL_TREE;
3724 }
3725
3726 gcc_unreachable ();
3727 }
3728
3729
3730 /* Given a value range VR, a value VAL and a comparison code COMP, return
3731 BOOLEAN_TRUE_NODE if VR COMP VAL always returns true for all the
3732 values in VR. Return BOOLEAN_FALSE_NODE if the comparison
3733 always returns false. Return NULL_TREE if it is not always
3734 possible to determine the value of the comparison. Also set
3735 *STRICT_OVERFLOW_P to indicate whether a range with an overflow
3736 infinity was used in the test. */
3737
3738 static tree
3739 compare_range_with_value (enum tree_code comp, value_range_t *vr, tree val,
3740 bool *strict_overflow_p)
3741 {
3742 if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED)
3743 return NULL_TREE;
3744
3745 /* Anti-ranges need to be handled separately. */
3746 if (vr->type == VR_ANTI_RANGE)
3747 {
3748 /* For anti-ranges, the only predicates that we can compute at
3749 compile time are equality and inequality. */
3750 if (comp == GT_EXPR
3751 || comp == GE_EXPR
3752 || comp == LT_EXPR
3753 || comp == LE_EXPR)
3754 return NULL_TREE;
3755
3756 /* ~[VAL_1, VAL_2] OP VAL is known if VAL_1 <= VAL <= VAL_2. */
3757 if (value_inside_range (val, vr) == 1)
3758 return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node;
3759
3760 return NULL_TREE;
3761 }
3762
3763 if (!usable_range_p (vr, strict_overflow_p))
3764 return NULL_TREE;
3765
3766 if (comp == EQ_EXPR)
3767 {
3768 /* EQ_EXPR may only be computed if VR represents exactly
3769 one value. */
3770 if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0)
3771 {
3772 int cmp = compare_values_warnv (vr->min, val, strict_overflow_p);
3773 if (cmp == 0)
3774 return boolean_true_node;
3775 else if (cmp == -1 || cmp == 1 || cmp == 2)
3776 return boolean_false_node;
3777 }
3778 else if (compare_values_warnv (val, vr->min, strict_overflow_p) == -1
3779 || compare_values_warnv (vr->max, val, strict_overflow_p) == -1)
3780 return boolean_false_node;
3781
3782 return NULL_TREE;
3783 }
3784 else if (comp == NE_EXPR)
3785 {
3786 /* If VAL is not inside VR, then they are always different. */
3787 if (compare_values_warnv (vr->max, val, strict_overflow_p) == -1
3788 || compare_values_warnv (vr->min, val, strict_overflow_p) == 1)
3789 return boolean_true_node;
3790
3791 /* If VR represents exactly one value equal to VAL, then return
3792 false. */
3793 if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0
3794 && compare_values_warnv (vr->min, val, strict_overflow_p) == 0)
3795 return boolean_false_node;
3796
3797 /* Otherwise, they may or may not be different. */
3798 return NULL_TREE;
3799 }
3800 else if (comp == LT_EXPR || comp == LE_EXPR)
3801 {
3802 int tst;
3803
3804 /* If VR is to the left of VAL, return true. */
3805 tst = compare_values_warnv (vr->max, val, strict_overflow_p);
3806 if ((comp == LT_EXPR && tst == -1)
3807 || (comp == LE_EXPR && (tst == -1 || tst == 0)))
3808 {
3809 if (overflow_infinity_range_p (vr))
3810 *strict_overflow_p = true;
3811 return boolean_true_node;
3812 }
3813
3814 /* If VR is to the right of VAL, return false. */
3815 tst = compare_values_warnv (vr->min, val, strict_overflow_p);
3816 if ((comp == LT_EXPR && (tst == 0 || tst == 1))
3817 || (comp == LE_EXPR && tst == 1))
3818 {
3819 if (overflow_infinity_range_p (vr))
3820 *strict_overflow_p = true;
3821 return boolean_false_node;
3822 }
3823
3824 /* Otherwise, we don't know. */
3825 return NULL_TREE;
3826 }
3827 else if (comp == GT_EXPR || comp == GE_EXPR)
3828 {
3829 int tst;
3830
3831 /* If VR is to the right of VAL, return true. */
3832 tst = compare_values_warnv (vr->min, val, strict_overflow_p);
3833 if ((comp == GT_EXPR && tst == 1)
3834 || (comp == GE_EXPR && (tst == 0 || tst == 1)))
3835 {
3836 if (overflow_infinity_range_p (vr))
3837 *strict_overflow_p = true;
3838 return boolean_true_node;
3839 }
3840
3841 /* If VR is to the left of VAL, return false. */
3842 tst = compare_values_warnv (vr->max, val, strict_overflow_p);
3843 if ((comp == GT_EXPR && (tst == -1 || tst == 0))
3844 || (comp == GE_EXPR && tst == -1))
3845 {
3846 if (overflow_infinity_range_p (vr))
3847 *strict_overflow_p = true;
3848 return boolean_false_node;
3849 }
3850
3851 /* Otherwise, we don't know. */
3852 return NULL_TREE;
3853 }
3854
3855 gcc_unreachable ();
3856 }
3857
3858
3859 /* Debugging dumps. */
3860
3861 void dump_value_range (FILE *, value_range_t *);
3862 void debug_value_range (value_range_t *);
3863 void dump_all_value_ranges (FILE *);
3864 void debug_all_value_ranges (void);
3865 void dump_vr_equiv (FILE *, bitmap);
3866 void debug_vr_equiv (bitmap);
3867
3868
3869 /* Dump value range VR to FILE. */
3870
3871 void
3872 dump_value_range (FILE *file, value_range_t *vr)
3873 {
3874 if (vr == NULL)
3875 fprintf (file, "[]");
3876 else if (vr->type == VR_UNDEFINED)
3877 fprintf (file, "UNDEFINED");
3878 else if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
3879 {
3880 tree type = TREE_TYPE (vr->min);
3881
3882 fprintf (file, "%s[", (vr->type == VR_ANTI_RANGE) ? "~" : "");
3883
3884 if (is_negative_overflow_infinity (vr->min))
3885 fprintf (file, "-INF(OVF)");
3886 else if (INTEGRAL_TYPE_P (type)
3887 && !TYPE_UNSIGNED (type)
3888 && vrp_val_is_min (vr->min))
3889 fprintf (file, "-INF");
3890 else
3891 print_generic_expr (file, vr->min, 0);
3892
3893 fprintf (file, ", ");
3894
3895 if (is_positive_overflow_infinity (vr->max))
3896 fprintf (file, "+INF(OVF)");
3897 else if (INTEGRAL_TYPE_P (type)
3898 && vrp_val_is_max (vr->max))
3899 fprintf (file, "+INF");
3900 else
3901 print_generic_expr (file, vr->max, 0);
3902
3903 fprintf (file, "]");
3904
3905 if (vr->equiv)
3906 {
3907 bitmap_iterator bi;
3908 unsigned i, c = 0;
3909
3910 fprintf (file, " EQUIVALENCES: { ");
3911
3912 EXECUTE_IF_SET_IN_BITMAP (vr->equiv, 0, i, bi)
3913 {
3914 print_generic_expr (file, ssa_name (i), 0);
3915 fprintf (file, " ");
3916 c++;
3917 }
3918
3919 fprintf (file, "} (%u elements)", c);
3920 }
3921 }
3922 else if (vr->type == VR_VARYING)
3923 fprintf (file, "VARYING");
3924 else
3925 fprintf (file, "INVALID RANGE");
3926 }
3927
3928
3929 /* Dump value range VR to stderr. */
3930
3931 DEBUG_FUNCTION void
3932 debug_value_range (value_range_t *vr)
3933 {
3934 dump_value_range (stderr, vr);
3935 fprintf (stderr, "\n");
3936 }
3937
3938
3939 /* Dump value ranges of all SSA_NAMEs to FILE. */
3940
3941 void
3942 dump_all_value_ranges (FILE *file)
3943 {
3944 size_t i;
3945
3946 for (i = 0; i < num_vr_values; i++)
3947 {
3948 if (vr_value[i])
3949 {
3950 print_generic_expr (file, ssa_name (i), 0);
3951 fprintf (file, ": ");
3952 dump_value_range (file, vr_value[i]);
3953 fprintf (file, "\n");
3954 }
3955 }
3956
3957 fprintf (file, "\n");
3958 }
3959
3960
3961 /* Dump all value ranges to stderr. */
3962
3963 DEBUG_FUNCTION void
3964 debug_all_value_ranges (void)
3965 {
3966 dump_all_value_ranges (stderr);
3967 }
3968
3969
3970 /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
3971 create a new SSA name N and return the assertion assignment
3972 'V = ASSERT_EXPR <V, V OP W>'. */
3973
3974 static gimple
3975 build_assert_expr_for (tree cond, tree v)
3976 {
3977 tree n;
3978 gimple assertion;
3979
3980 gcc_assert (TREE_CODE (v) == SSA_NAME);
3981 n = duplicate_ssa_name (v, NULL);
3982
3983 if (COMPARISON_CLASS_P (cond))
3984 {
3985 tree a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond);
3986 assertion = gimple_build_assign (n, a);
3987 }
3988 else if (TREE_CODE (cond) == SSA_NAME)
3989 {
3990 /* Given V, build the assignment N = true. */
3991 gcc_assert (v == cond);
3992 assertion = gimple_build_assign (n, boolean_true_node);
3993 }
3994 else
3995 gcc_unreachable ();
3996
3997 SSA_NAME_DEF_STMT (n) = assertion;
3998
3999 /* The new ASSERT_EXPR, creates a new SSA name that replaces the
4000 operand of the ASSERT_EXPR. Register the new name and the old one
4001 in the replacement table so that we can fix the SSA web after
4002 adding all the ASSERT_EXPRs. */
4003 register_new_name_mapping (n, v);
4004
4005 return assertion;
4006 }
4007
4008
4009 /* Return false if EXPR is a predicate expression involving floating
4010 point values. */
4011
4012 static inline bool
4013 fp_predicate (gimple stmt)
4014 {
4015 GIMPLE_CHECK (stmt, GIMPLE_COND);
4016
4017 return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)));
4018 }
4019
4020
4021 /* If the range of values taken by OP can be inferred after STMT executes,
4022 return the comparison code (COMP_CODE_P) and value (VAL_P) that
4023 describes the inferred range. Return true if a range could be
4024 inferred. */
4025
4026 static bool
4027 infer_value_range (gimple stmt, tree op, enum tree_code *comp_code_p, tree *val_p)
4028 {
4029 *val_p = NULL_TREE;
4030 *comp_code_p = ERROR_MARK;
4031
4032 /* Do not attempt to infer anything in names that flow through
4033 abnormal edges. */
4034 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op))
4035 return false;
4036
4037 /* Similarly, don't infer anything from statements that may throw
4038 exceptions. */
4039 if (stmt_could_throw_p (stmt))
4040 return false;
4041
4042 /* If STMT is the last statement of a basic block with no
4043 successors, there is no point inferring anything about any of its
4044 operands. We would not be able to find a proper insertion point
4045 for the assertion, anyway. */
4046 if (stmt_ends_bb_p (stmt) && EDGE_COUNT (gimple_bb (stmt)->succs) == 0)
4047 return false;
4048
4049 /* We can only assume that a pointer dereference will yield
4050 non-NULL if -fdelete-null-pointer-checks is enabled. */
4051 if (flag_delete_null_pointer_checks
4052 && POINTER_TYPE_P (TREE_TYPE (op))
4053 && gimple_code (stmt) != GIMPLE_ASM)
4054 {
4055 unsigned num_uses, num_loads, num_stores;
4056
4057 count_uses_and_derefs (op, stmt, &num_uses, &num_loads, &num_stores);
4058 if (num_loads + num_stores > 0)
4059 {
4060 *val_p = build_int_cst (TREE_TYPE (op), 0);
4061 *comp_code_p = NE_EXPR;
4062 return true;
4063 }
4064 }
4065
4066 return false;
4067 }
4068
4069
4070 void dump_asserts_for (FILE *, tree);
4071 void debug_asserts_for (tree);
4072 void dump_all_asserts (FILE *);
4073 void debug_all_asserts (void);
4074
4075 /* Dump all the registered assertions for NAME to FILE. */
4076
4077 void
4078 dump_asserts_for (FILE *file, tree name)
4079 {
4080 assert_locus_t loc;
4081
4082 fprintf (file, "Assertions to be inserted for ");
4083 print_generic_expr (file, name, 0);
4084 fprintf (file, "\n");
4085
4086 loc = asserts_for[SSA_NAME_VERSION (name)];
4087 while (loc)
4088 {
4089 fprintf (file, "\t");
4090 print_gimple_stmt (file, gsi_stmt (loc->si), 0, 0);
4091 fprintf (file, "\n\tBB #%d", loc->bb->index);
4092 if (loc->e)
4093 {
4094 fprintf (file, "\n\tEDGE %d->%d", loc->e->src->index,
4095 loc->e->dest->index);
4096 dump_edge_info (file, loc->e, 0);
4097 }
4098 fprintf (file, "\n\tPREDICATE: ");
4099 print_generic_expr (file, name, 0);
4100 fprintf (file, " %s ", tree_code_name[(int)loc->comp_code]);
4101 print_generic_expr (file, loc->val, 0);
4102 fprintf (file, "\n\n");
4103 loc = loc->next;
4104 }
4105
4106 fprintf (file, "\n");
4107 }
4108
4109
4110 /* Dump all the registered assertions for NAME to stderr. */
4111
4112 DEBUG_FUNCTION void
4113 debug_asserts_for (tree name)
4114 {
4115 dump_asserts_for (stderr, name);
4116 }
4117
4118
4119 /* Dump all the registered assertions for all the names to FILE. */
4120
4121 void
4122 dump_all_asserts (FILE *file)
4123 {
4124 unsigned i;
4125 bitmap_iterator bi;
4126
4127 fprintf (file, "\nASSERT_EXPRs to be inserted\n\n");
4128 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
4129 dump_asserts_for (file, ssa_name (i));
4130 fprintf (file, "\n");
4131 }
4132
4133
4134 /* Dump all the registered assertions for all the names to stderr. */
4135
4136 DEBUG_FUNCTION void
4137 debug_all_asserts (void)
4138 {
4139 dump_all_asserts (stderr);
4140 }
4141
4142
4143 /* If NAME doesn't have an ASSERT_EXPR registered for asserting
4144 'EXPR COMP_CODE VAL' at a location that dominates block BB or
4145 E->DEST, then register this location as a possible insertion point
4146 for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
4147
4148 BB, E and SI provide the exact insertion point for the new
4149 ASSERT_EXPR. If BB is NULL, then the ASSERT_EXPR is to be inserted
4150 on edge E. Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
4151 BB. If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
4152 must not be NULL. */
4153
4154 static void
4155 register_new_assert_for (tree name, tree expr,
4156 enum tree_code comp_code,
4157 tree val,
4158 basic_block bb,
4159 edge e,
4160 gimple_stmt_iterator si)
4161 {
4162 assert_locus_t n, loc, last_loc;
4163 basic_block dest_bb;
4164
4165 gcc_checking_assert (bb == NULL || e == NULL);
4166
4167 if (e == NULL)
4168 gcc_checking_assert (gimple_code (gsi_stmt (si)) != GIMPLE_COND
4169 && gimple_code (gsi_stmt (si)) != GIMPLE_SWITCH);
4170
4171 /* Never build an assert comparing against an integer constant with
4172 TREE_OVERFLOW set. This confuses our undefined overflow warning
4173 machinery. */
4174 if (TREE_CODE (val) == INTEGER_CST
4175 && TREE_OVERFLOW (val))
4176 val = build_int_cst_wide (TREE_TYPE (val),
4177 TREE_INT_CST_LOW (val), TREE_INT_CST_HIGH (val));
4178
4179 /* The new assertion A will be inserted at BB or E. We need to
4180 determine if the new location is dominated by a previously
4181 registered location for A. If we are doing an edge insertion,
4182 assume that A will be inserted at E->DEST. Note that this is not
4183 necessarily true.
4184
4185 If E is a critical edge, it will be split. But even if E is
4186 split, the new block will dominate the same set of blocks that
4187 E->DEST dominates.
4188
4189 The reverse, however, is not true, blocks dominated by E->DEST
4190 will not be dominated by the new block created to split E. So,
4191 if the insertion location is on a critical edge, we will not use
4192 the new location to move another assertion previously registered
4193 at a block dominated by E->DEST. */
4194 dest_bb = (bb) ? bb : e->dest;
4195
4196 /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and
4197 VAL at a block dominating DEST_BB, then we don't need to insert a new
4198 one. Similarly, if the same assertion already exists at a block
4199 dominated by DEST_BB and the new location is not on a critical
4200 edge, then update the existing location for the assertion (i.e.,
4201 move the assertion up in the dominance tree).
4202
4203 Note, this is implemented as a simple linked list because there
4204 should not be more than a handful of assertions registered per
4205 name. If this becomes a performance problem, a table hashed by
4206 COMP_CODE and VAL could be implemented. */
4207 loc = asserts_for[SSA_NAME_VERSION (name)];
4208 last_loc = loc;
4209 while (loc)
4210 {
4211 if (loc->comp_code == comp_code
4212 && (loc->val == val
4213 || operand_equal_p (loc->val, val, 0))
4214 && (loc->expr == expr
4215 || operand_equal_p (loc->expr, expr, 0)))
4216 {
4217 /* If the assertion NAME COMP_CODE VAL has already been
4218 registered at a basic block that dominates DEST_BB, then
4219 we don't need to insert the same assertion again. Note
4220 that we don't check strict dominance here to avoid
4221 replicating the same assertion inside the same basic
4222 block more than once (e.g., when a pointer is
4223 dereferenced several times inside a block).
4224
4225 An exception to this rule are edge insertions. If the
4226 new assertion is to be inserted on edge E, then it will
4227 dominate all the other insertions that we may want to
4228 insert in DEST_BB. So, if we are doing an edge
4229 insertion, don't do this dominance check. */
4230 if (e == NULL
4231 && dominated_by_p (CDI_DOMINATORS, dest_bb, loc->bb))
4232 return;
4233
4234 /* Otherwise, if E is not a critical edge and DEST_BB
4235 dominates the existing location for the assertion, move
4236 the assertion up in the dominance tree by updating its
4237 location information. */
4238 if ((e == NULL || !EDGE_CRITICAL_P (e))
4239 && dominated_by_p (CDI_DOMINATORS, loc->bb, dest_bb))
4240 {
4241 loc->bb = dest_bb;
4242 loc->e = e;
4243 loc->si = si;
4244 return;
4245 }
4246 }
4247
4248 /* Update the last node of the list and move to the next one. */
4249 last_loc = loc;
4250 loc = loc->next;
4251 }
4252
4253 /* If we didn't find an assertion already registered for
4254 NAME COMP_CODE VAL, add a new one at the end of the list of
4255 assertions associated with NAME. */
4256 n = XNEW (struct assert_locus_d);
4257 n->bb = dest_bb;
4258 n->e = e;
4259 n->si = si;
4260 n->comp_code = comp_code;
4261 n->val = val;
4262 n->expr = expr;
4263 n->next = NULL;
4264
4265 if (last_loc)
4266 last_loc->next = n;
4267 else
4268 asserts_for[SSA_NAME_VERSION (name)] = n;
4269
4270 bitmap_set_bit (need_assert_for, SSA_NAME_VERSION (name));
4271 }
4272
4273 /* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME.
4274 Extract a suitable test code and value and store them into *CODE_P and
4275 *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P.
4276
4277 If no extraction was possible, return FALSE, otherwise return TRUE.
4278
4279 If INVERT is true, then we invert the result stored into *CODE_P. */
4280
4281 static bool
4282 extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code,
4283 tree cond_op0, tree cond_op1,
4284 bool invert, enum tree_code *code_p,
4285 tree *val_p)
4286 {
4287 enum tree_code comp_code;
4288 tree val;
4289
4290 /* Otherwise, we have a comparison of the form NAME COMP VAL
4291 or VAL COMP NAME. */
4292 if (name == cond_op1)
4293 {
4294 /* If the predicate is of the form VAL COMP NAME, flip
4295 COMP around because we need to register NAME as the
4296 first operand in the predicate. */
4297 comp_code = swap_tree_comparison (cond_code);
4298 val = cond_op0;
4299 }
4300 else
4301 {
4302 /* The comparison is of the form NAME COMP VAL, so the
4303 comparison code remains unchanged. */
4304 comp_code = cond_code;
4305 val = cond_op1;
4306 }
4307
4308 /* Invert the comparison code as necessary. */
4309 if (invert)
4310 comp_code = invert_tree_comparison (comp_code, 0);
4311
4312 /* VRP does not handle float types. */
4313 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (val)))
4314 return false;
4315
4316 /* Do not register always-false predicates.
4317 FIXME: this works around a limitation in fold() when dealing with
4318 enumerations. Given 'enum { N1, N2 } x;', fold will not
4319 fold 'if (x > N2)' to 'if (0)'. */
4320 if ((comp_code == GT_EXPR || comp_code == LT_EXPR)
4321 && INTEGRAL_TYPE_P (TREE_TYPE (val)))
4322 {
4323 tree min = TYPE_MIN_VALUE (TREE_TYPE (val));
4324 tree max = TYPE_MAX_VALUE (TREE_TYPE (val));
4325
4326 if (comp_code == GT_EXPR
4327 && (!max
4328 || compare_values (val, max) == 0))
4329 return false;
4330
4331 if (comp_code == LT_EXPR
4332 && (!min
4333 || compare_values (val, min) == 0))
4334 return false;
4335 }
4336 *code_p = comp_code;
4337 *val_p = val;
4338 return true;
4339 }
4340
4341 /* Try to register an edge assertion for SSA name NAME on edge E for
4342 the condition COND contributing to the conditional jump pointed to by BSI.
4343 Invert the condition COND if INVERT is true.
4344 Return true if an assertion for NAME could be registered. */
4345
4346 static bool
4347 register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi,
4348 enum tree_code cond_code,
4349 tree cond_op0, tree cond_op1, bool invert)
4350 {
4351 tree val;
4352 enum tree_code comp_code;
4353 bool retval = false;
4354
4355 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
4356 cond_op0,
4357 cond_op1,
4358 invert, &comp_code, &val))
4359 return false;
4360
4361 /* Only register an ASSERT_EXPR if NAME was found in the sub-graph
4362 reachable from E. */
4363 if (live_on_edge (e, name)
4364 && !has_single_use (name))
4365 {
4366 register_new_assert_for (name, name, comp_code, val, NULL, e, bsi);
4367 retval = true;
4368 }
4369
4370 /* In the case of NAME <= CST and NAME being defined as
4371 NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2
4372 and NAME2 <= CST - CST2. We can do the same for NAME > CST.
4373 This catches range and anti-range tests. */
4374 if ((comp_code == LE_EXPR
4375 || comp_code == GT_EXPR)
4376 && TREE_CODE (val) == INTEGER_CST
4377 && TYPE_UNSIGNED (TREE_TYPE (val)))
4378 {
4379 gimple def_stmt = SSA_NAME_DEF_STMT (name);
4380 tree cst2 = NULL_TREE, name2 = NULL_TREE, name3 = NULL_TREE;
4381
4382 /* Extract CST2 from the (optional) addition. */
4383 if (is_gimple_assign (def_stmt)
4384 && gimple_assign_rhs_code (def_stmt) == PLUS_EXPR)
4385 {
4386 name2 = gimple_assign_rhs1 (def_stmt);
4387 cst2 = gimple_assign_rhs2 (def_stmt);
4388 if (TREE_CODE (name2) == SSA_NAME
4389 && TREE_CODE (cst2) == INTEGER_CST)
4390 def_stmt = SSA_NAME_DEF_STMT (name2);
4391 }
4392
4393 /* Extract NAME2 from the (optional) sign-changing cast. */
4394 if (gimple_assign_cast_p (def_stmt))
4395 {
4396 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt))
4397 && ! TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
4398 && (TYPE_PRECISION (gimple_expr_type (def_stmt))
4399 == TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))))
4400 name3 = gimple_assign_rhs1 (def_stmt);
4401 }
4402
4403 /* If name3 is used later, create an ASSERT_EXPR for it. */
4404 if (name3 != NULL_TREE
4405 && TREE_CODE (name3) == SSA_NAME
4406 && (cst2 == NULL_TREE
4407 || TREE_CODE (cst2) == INTEGER_CST)
4408 && INTEGRAL_TYPE_P (TREE_TYPE (name3))
4409 && live_on_edge (e, name3)
4410 && !has_single_use (name3))
4411 {
4412 tree tmp;
4413
4414 /* Build an expression for the range test. */
4415 tmp = build1 (NOP_EXPR, TREE_TYPE (name), name3);
4416 if (cst2 != NULL_TREE)
4417 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
4418
4419 if (dump_file)
4420 {
4421 fprintf (dump_file, "Adding assert for ");
4422 print_generic_expr (dump_file, name3, 0);
4423 fprintf (dump_file, " from ");
4424 print_generic_expr (dump_file, tmp, 0);
4425 fprintf (dump_file, "\n");
4426 }
4427
4428 register_new_assert_for (name3, tmp, comp_code, val, NULL, e, bsi);
4429
4430 retval = true;
4431 }
4432
4433 /* If name2 is used later, create an ASSERT_EXPR for it. */
4434 if (name2 != NULL_TREE
4435 && TREE_CODE (name2) == SSA_NAME
4436 && TREE_CODE (cst2) == INTEGER_CST
4437 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
4438 && live_on_edge (e, name2)
4439 && !has_single_use (name2))
4440 {
4441 tree tmp;
4442
4443 /* Build an expression for the range test. */
4444 tmp = name2;
4445 if (TREE_TYPE (name) != TREE_TYPE (name2))
4446 tmp = build1 (NOP_EXPR, TREE_TYPE (name), tmp);
4447 if (cst2 != NULL_TREE)
4448 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
4449
4450 if (dump_file)
4451 {
4452 fprintf (dump_file, "Adding assert for ");
4453 print_generic_expr (dump_file, name2, 0);
4454 fprintf (dump_file, " from ");
4455 print_generic_expr (dump_file, tmp, 0);
4456 fprintf (dump_file, "\n");
4457 }
4458
4459 register_new_assert_for (name2, tmp, comp_code, val, NULL, e, bsi);
4460
4461 retval = true;
4462 }
4463 }
4464
4465 /* Similarly add asserts for NAME == CST and NAME being defined as
4466 NAME = NAME2 >> CST2. */
4467 if (TREE_CODE_CLASS (comp_code) == tcc_comparison
4468 && TREE_CODE (val) == INTEGER_CST)
4469 {
4470 gimple def_stmt = SSA_NAME_DEF_STMT (name);
4471 tree name2 = NULL_TREE, cst2 = NULL_TREE;
4472 tree val2 = NULL_TREE;
4473 unsigned HOST_WIDE_INT mask[2] = { 0, 0 };
4474
4475 /* Extract CST2 from the right shift. */
4476 if (is_gimple_assign (def_stmt)
4477 && gimple_assign_rhs_code (def_stmt) == RSHIFT_EXPR)
4478 {
4479 name2 = gimple_assign_rhs1 (def_stmt);
4480 cst2 = gimple_assign_rhs2 (def_stmt);
4481 if (TREE_CODE (name2) == SSA_NAME
4482 && host_integerp (cst2, 1)
4483 && (unsigned HOST_WIDE_INT) tree_low_cst (cst2, 1)
4484 < 2 * HOST_BITS_PER_WIDE_INT
4485 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
4486 && live_on_edge (e, name2)
4487 && !has_single_use (name2))
4488 {
4489 if ((unsigned HOST_WIDE_INT) tree_low_cst (cst2, 1)
4490 < HOST_BITS_PER_WIDE_INT)
4491 mask[0] = ((unsigned HOST_WIDE_INT) 1
4492 << tree_low_cst (cst2, 1)) - 1;
4493 else
4494 {
4495 mask[1] = ((unsigned HOST_WIDE_INT) 1
4496 << (tree_low_cst (cst2, 1)
4497 - HOST_BITS_PER_WIDE_INT)) - 1;
4498 mask[0] = -1;
4499 }
4500 val2 = fold_binary (LSHIFT_EXPR, TREE_TYPE (val), val, cst2);
4501 }
4502 }
4503
4504 if (val2 != NULL_TREE
4505 && TREE_CODE (val2) == INTEGER_CST
4506 && simple_cst_equal (fold_build2 (RSHIFT_EXPR,
4507 TREE_TYPE (val),
4508 val2, cst2), val))
4509 {
4510 enum tree_code new_comp_code = comp_code;
4511 tree tmp, new_val;
4512
4513 tmp = name2;
4514 if (comp_code == EQ_EXPR || comp_code == NE_EXPR)
4515 {
4516 if (!TYPE_UNSIGNED (TREE_TYPE (val)))
4517 {
4518 unsigned int prec = TYPE_PRECISION (TREE_TYPE (val));
4519 tree type = build_nonstandard_integer_type (prec, 1);
4520 tmp = build1 (NOP_EXPR, type, name2);
4521 val2 = fold_convert (type, val2);
4522 }
4523 tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (tmp), tmp, val2);
4524 new_val = build_int_cst_wide (TREE_TYPE (tmp), mask[0], mask[1]);
4525 new_comp_code = comp_code == EQ_EXPR ? LE_EXPR : GT_EXPR;
4526 }
4527 else if (comp_code == LT_EXPR || comp_code == GE_EXPR)
4528 new_val = val2;
4529 else
4530 {
4531 new_val = build_int_cst_wide (TREE_TYPE (val2),
4532 mask[0], mask[1]);
4533 new_val = fold_binary (BIT_IOR_EXPR, TREE_TYPE (val2),
4534 val2, new_val);
4535 }
4536
4537 if (dump_file)
4538 {
4539 fprintf (dump_file, "Adding assert for ");
4540 print_generic_expr (dump_file, name2, 0);
4541 fprintf (dump_file, " from ");
4542 print_generic_expr (dump_file, tmp, 0);
4543 fprintf (dump_file, "\n");
4544 }
4545
4546 register_new_assert_for (name2, tmp, new_comp_code, new_val,
4547 NULL, e, bsi);
4548 retval = true;
4549 }
4550 }
4551
4552 return retval;
4553 }
4554
4555 /* OP is an operand of a truth value expression which is known to have
4556 a particular value. Register any asserts for OP and for any
4557 operands in OP's defining statement.
4558
4559 If CODE is EQ_EXPR, then we want to register OP is zero (false),
4560 if CODE is NE_EXPR, then we want to register OP is nonzero (true). */
4561
4562 static bool
4563 register_edge_assert_for_1 (tree op, enum tree_code code,
4564 edge e, gimple_stmt_iterator bsi)
4565 {
4566 bool retval = false;
4567 gimple op_def;
4568 tree val;
4569 enum tree_code rhs_code;
4570
4571 /* We only care about SSA_NAMEs. */
4572 if (TREE_CODE (op) != SSA_NAME)
4573 return false;
4574
4575 /* We know that OP will have a zero or nonzero value. If OP is used
4576 more than once go ahead and register an assert for OP.
4577
4578 The FOUND_IN_SUBGRAPH support is not helpful in this situation as
4579 it will always be set for OP (because OP is used in a COND_EXPR in
4580 the subgraph). */
4581 if (!has_single_use (op))
4582 {
4583 val = build_int_cst (TREE_TYPE (op), 0);
4584 register_new_assert_for (op, op, code, val, NULL, e, bsi);
4585 retval = true;
4586 }
4587
4588 /* Now look at how OP is set. If it's set from a comparison,
4589 a truth operation or some bit operations, then we may be able
4590 to register information about the operands of that assignment. */
4591 op_def = SSA_NAME_DEF_STMT (op);
4592 if (gimple_code (op_def) != GIMPLE_ASSIGN)
4593 return retval;
4594
4595 rhs_code = gimple_assign_rhs_code (op_def);
4596
4597 if (TREE_CODE_CLASS (rhs_code) == tcc_comparison)
4598 {
4599 bool invert = (code == EQ_EXPR ? true : false);
4600 tree op0 = gimple_assign_rhs1 (op_def);
4601 tree op1 = gimple_assign_rhs2 (op_def);
4602
4603 if (TREE_CODE (op0) == SSA_NAME)
4604 retval |= register_edge_assert_for_2 (op0, e, bsi, rhs_code, op0, op1,
4605 invert);
4606 if (TREE_CODE (op1) == SSA_NAME)
4607 retval |= register_edge_assert_for_2 (op1, e, bsi, rhs_code, op0, op1,
4608 invert);
4609 }
4610 else if ((code == NE_EXPR
4611 && gimple_assign_rhs_code (op_def) == BIT_AND_EXPR)
4612 || (code == EQ_EXPR
4613 && gimple_assign_rhs_code (op_def) == BIT_IOR_EXPR))
4614 {
4615 /* Recurse on each operand. */
4616 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def),
4617 code, e, bsi);
4618 retval |= register_edge_assert_for_1 (gimple_assign_rhs2 (op_def),
4619 code, e, bsi);
4620 }
4621 else if (gimple_assign_rhs_code (op_def) == BIT_NOT_EXPR
4622 && TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (op_def))) == 1)
4623 {
4624 /* Recurse, flipping CODE. */
4625 code = invert_tree_comparison (code, false);
4626 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def),
4627 code, e, bsi);
4628 }
4629 else if (gimple_assign_rhs_code (op_def) == SSA_NAME)
4630 {
4631 /* Recurse through the copy. */
4632 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def),
4633 code, e, bsi);
4634 }
4635 else if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (op_def)))
4636 {
4637 /* Recurse through the type conversion. */
4638 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def),
4639 code, e, bsi);
4640 }
4641
4642 return retval;
4643 }
4644
4645 /* Try to register an edge assertion for SSA name NAME on edge E for
4646 the condition COND contributing to the conditional jump pointed to by SI.
4647 Return true if an assertion for NAME could be registered. */
4648
4649 static bool
4650 register_edge_assert_for (tree name, edge e, gimple_stmt_iterator si,
4651 enum tree_code cond_code, tree cond_op0,
4652 tree cond_op1)
4653 {
4654 tree val;
4655 enum tree_code comp_code;
4656 bool retval = false;
4657 bool is_else_edge = (e->flags & EDGE_FALSE_VALUE) != 0;
4658
4659 /* Do not attempt to infer anything in names that flow through
4660 abnormal edges. */
4661 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
4662 return false;
4663
4664 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
4665 cond_op0, cond_op1,
4666 is_else_edge,
4667 &comp_code, &val))
4668 return false;
4669
4670 /* Register ASSERT_EXPRs for name. */
4671 retval |= register_edge_assert_for_2 (name, e, si, cond_code, cond_op0,
4672 cond_op1, is_else_edge);
4673
4674
4675 /* If COND is effectively an equality test of an SSA_NAME against
4676 the value zero or one, then we may be able to assert values
4677 for SSA_NAMEs which flow into COND. */
4678
4679 /* In the case of NAME == 1 or NAME != 0, for BIT_AND_EXPR defining
4680 statement of NAME we can assert both operands of the BIT_AND_EXPR
4681 have nonzero value. */
4682 if (((comp_code == EQ_EXPR && integer_onep (val))
4683 || (comp_code == NE_EXPR && integer_zerop (val))))
4684 {
4685 gimple def_stmt = SSA_NAME_DEF_STMT (name);
4686
4687 if (is_gimple_assign (def_stmt)
4688 && gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR)
4689 {
4690 tree op0 = gimple_assign_rhs1 (def_stmt);
4691 tree op1 = gimple_assign_rhs2 (def_stmt);
4692 retval |= register_edge_assert_for_1 (op0, NE_EXPR, e, si);
4693 retval |= register_edge_assert_for_1 (op1, NE_EXPR, e, si);
4694 }
4695 }
4696
4697 /* In the case of NAME == 0 or NAME != 1, for BIT_IOR_EXPR defining
4698 statement of NAME we can assert both operands of the BIT_IOR_EXPR
4699 have zero value. */
4700 if (((comp_code == EQ_EXPR && integer_zerop (val))
4701 || (comp_code == NE_EXPR && integer_onep (val))))
4702 {
4703 gimple def_stmt = SSA_NAME_DEF_STMT (name);
4704
4705 /* For BIT_IOR_EXPR only if NAME == 0 both operands have
4706 necessarily zero value, or if type-precision is one. */
4707 if (is_gimple_assign (def_stmt)
4708 && (gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR
4709 && (TYPE_PRECISION (TREE_TYPE (name)) == 1
4710 || comp_code == EQ_EXPR)))
4711 {
4712 tree op0 = gimple_assign_rhs1 (def_stmt);
4713 tree op1 = gimple_assign_rhs2 (def_stmt);
4714 retval |= register_edge_assert_for_1 (op0, EQ_EXPR, e, si);
4715 retval |= register_edge_assert_for_1 (op1, EQ_EXPR, e, si);
4716 }
4717 }
4718
4719 return retval;
4720 }
4721
4722
4723 /* Determine whether the outgoing edges of BB should receive an
4724 ASSERT_EXPR for each of the operands of BB's LAST statement.
4725 The last statement of BB must be a COND_EXPR.
4726
4727 If any of the sub-graphs rooted at BB have an interesting use of
4728 the predicate operands, an assert location node is added to the
4729 list of assertions for the corresponding operands. */
4730
4731 static bool
4732 find_conditional_asserts (basic_block bb, gimple last)
4733 {
4734 bool need_assert;
4735 gimple_stmt_iterator bsi;
4736 tree op;
4737 edge_iterator ei;
4738 edge e;
4739 ssa_op_iter iter;
4740
4741 need_assert = false;
4742 bsi = gsi_for_stmt (last);
4743
4744 /* Look for uses of the operands in each of the sub-graphs
4745 rooted at BB. We need to check each of the outgoing edges
4746 separately, so that we know what kind of ASSERT_EXPR to
4747 insert. */
4748 FOR_EACH_EDGE (e, ei, bb->succs)
4749 {
4750 if (e->dest == bb)
4751 continue;
4752
4753 /* Register the necessary assertions for each operand in the
4754 conditional predicate. */
4755 FOR_EACH_SSA_TREE_OPERAND (op, last, iter, SSA_OP_USE)
4756 {
4757 need_assert |= register_edge_assert_for (op, e, bsi,
4758 gimple_cond_code (last),
4759 gimple_cond_lhs (last),
4760 gimple_cond_rhs (last));
4761 }
4762 }
4763
4764 return need_assert;
4765 }
4766
4767 struct case_info
4768 {
4769 tree expr;
4770 basic_block bb;
4771 };
4772
4773 /* Compare two case labels sorting first by the destination bb index
4774 and then by the case value. */
4775
4776 static int
4777 compare_case_labels (const void *p1, const void *p2)
4778 {
4779 const struct case_info *ci1 = (const struct case_info *) p1;
4780 const struct case_info *ci2 = (const struct case_info *) p2;
4781 int idx1 = ci1->bb->index;
4782 int idx2 = ci2->bb->index;
4783
4784 if (idx1 < idx2)
4785 return -1;
4786 else if (idx1 == idx2)
4787 {
4788 /* Make sure the default label is first in a group. */
4789 if (!CASE_LOW (ci1->expr))
4790 return -1;
4791 else if (!CASE_LOW (ci2->expr))
4792 return 1;
4793 else
4794 return tree_int_cst_compare (CASE_LOW (ci1->expr),
4795 CASE_LOW (ci2->expr));
4796 }
4797 else
4798 return 1;
4799 }
4800
4801 /* Determine whether the outgoing edges of BB should receive an
4802 ASSERT_EXPR for each of the operands of BB's LAST statement.
4803 The last statement of BB must be a SWITCH_EXPR.
4804
4805 If any of the sub-graphs rooted at BB have an interesting use of
4806 the predicate operands, an assert location node is added to the
4807 list of assertions for the corresponding operands. */
4808
4809 static bool
4810 find_switch_asserts (basic_block bb, gimple last)
4811 {
4812 bool need_assert;
4813 gimple_stmt_iterator bsi;
4814 tree op;
4815 edge e;
4816 struct case_info *ci;
4817 size_t n = gimple_switch_num_labels (last);
4818 #if GCC_VERSION >= 4000
4819 unsigned int idx;
4820 #else
4821 /* Work around GCC 3.4 bug (PR 37086). */
4822 volatile unsigned int idx;
4823 #endif
4824
4825 need_assert = false;
4826 bsi = gsi_for_stmt (last);
4827 op = gimple_switch_index (last);
4828 if (TREE_CODE (op) != SSA_NAME)
4829 return false;
4830
4831 /* Build a vector of case labels sorted by destination label. */
4832 ci = XNEWVEC (struct case_info, n);
4833 for (idx = 0; idx < n; ++idx)
4834 {
4835 ci[idx].expr = gimple_switch_label (last, idx);
4836 ci[idx].bb = label_to_block (CASE_LABEL (ci[idx].expr));
4837 }
4838 qsort (ci, n, sizeof (struct case_info), compare_case_labels);
4839
4840 for (idx = 0; idx < n; ++idx)
4841 {
4842 tree min, max;
4843 tree cl = ci[idx].expr;
4844 basic_block cbb = ci[idx].bb;
4845
4846 min = CASE_LOW (cl);
4847 max = CASE_HIGH (cl);
4848
4849 /* If there are multiple case labels with the same destination
4850 we need to combine them to a single value range for the edge. */
4851 if (idx + 1 < n && cbb == ci[idx + 1].bb)
4852 {
4853 /* Skip labels until the last of the group. */
4854 do {
4855 ++idx;
4856 } while (idx < n && cbb == ci[idx].bb);
4857 --idx;
4858
4859 /* Pick up the maximum of the case label range. */
4860 if (CASE_HIGH (ci[idx].expr))
4861 max = CASE_HIGH (ci[idx].expr);
4862 else
4863 max = CASE_LOW (ci[idx].expr);
4864 }
4865
4866 /* Nothing to do if the range includes the default label until we
4867 can register anti-ranges. */
4868 if (min == NULL_TREE)
4869 continue;
4870
4871 /* Find the edge to register the assert expr on. */
4872 e = find_edge (bb, cbb);
4873
4874 /* Register the necessary assertions for the operand in the
4875 SWITCH_EXPR. */
4876 need_assert |= register_edge_assert_for (op, e, bsi,
4877 max ? GE_EXPR : EQ_EXPR,
4878 op,
4879 fold_convert (TREE_TYPE (op),
4880 min));
4881 if (max)
4882 {
4883 need_assert |= register_edge_assert_for (op, e, bsi, LE_EXPR,
4884 op,
4885 fold_convert (TREE_TYPE (op),
4886 max));
4887 }
4888 }
4889
4890 XDELETEVEC (ci);
4891 return need_assert;
4892 }
4893
4894
4895 /* Traverse all the statements in block BB looking for statements that
4896 may generate useful assertions for the SSA names in their operand.
4897 If a statement produces a useful assertion A for name N_i, then the
4898 list of assertions already generated for N_i is scanned to
4899 determine if A is actually needed.
4900
4901 If N_i already had the assertion A at a location dominating the
4902 current location, then nothing needs to be done. Otherwise, the
4903 new location for A is recorded instead.
4904
4905 1- For every statement S in BB, all the variables used by S are
4906 added to bitmap FOUND_IN_SUBGRAPH.
4907
4908 2- If statement S uses an operand N in a way that exposes a known
4909 value range for N, then if N was not already generated by an
4910 ASSERT_EXPR, create a new assert location for N. For instance,
4911 if N is a pointer and the statement dereferences it, we can
4912 assume that N is not NULL.
4913
4914 3- COND_EXPRs are a special case of #2. We can derive range
4915 information from the predicate but need to insert different
4916 ASSERT_EXPRs for each of the sub-graphs rooted at the
4917 conditional block. If the last statement of BB is a conditional
4918 expression of the form 'X op Y', then
4919
4920 a) Remove X and Y from the set FOUND_IN_SUBGRAPH.
4921
4922 b) If the conditional is the only entry point to the sub-graph
4923 corresponding to the THEN_CLAUSE, recurse into it. On
4924 return, if X and/or Y are marked in FOUND_IN_SUBGRAPH, then
4925 an ASSERT_EXPR is added for the corresponding variable.
4926
4927 c) Repeat step (b) on the ELSE_CLAUSE.
4928
4929 d) Mark X and Y in FOUND_IN_SUBGRAPH.
4930
4931 For instance,
4932
4933 if (a == 9)
4934 b = a;
4935 else
4936 b = c + 1;
4937
4938 In this case, an assertion on the THEN clause is useful to
4939 determine that 'a' is always 9 on that edge. However, an assertion
4940 on the ELSE clause would be unnecessary.
4941
4942 4- If BB does not end in a conditional expression, then we recurse
4943 into BB's dominator children.
4944
4945 At the end of the recursive traversal, every SSA name will have a
4946 list of locations where ASSERT_EXPRs should be added. When a new
4947 location for name N is found, it is registered by calling
4948 register_new_assert_for. That function keeps track of all the
4949 registered assertions to prevent adding unnecessary assertions.
4950 For instance, if a pointer P_4 is dereferenced more than once in a
4951 dominator tree, only the location dominating all the dereference of
4952 P_4 will receive an ASSERT_EXPR.
4953
4954 If this function returns true, then it means that there are names
4955 for which we need to generate ASSERT_EXPRs. Those assertions are
4956 inserted by process_assert_insertions. */
4957
4958 static bool
4959 find_assert_locations_1 (basic_block bb, sbitmap live)
4960 {
4961 gimple_stmt_iterator si;
4962 gimple last;
4963 gimple phi;
4964 bool need_assert;
4965
4966 need_assert = false;
4967 last = last_stmt (bb);
4968
4969 /* If BB's last statement is a conditional statement involving integer
4970 operands, determine if we need to add ASSERT_EXPRs. */
4971 if (last
4972 && gimple_code (last) == GIMPLE_COND
4973 && !fp_predicate (last)
4974 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
4975 need_assert |= find_conditional_asserts (bb, last);
4976
4977 /* If BB's last statement is a switch statement involving integer
4978 operands, determine if we need to add ASSERT_EXPRs. */
4979 if (last
4980 && gimple_code (last) == GIMPLE_SWITCH
4981 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
4982 need_assert |= find_switch_asserts (bb, last);
4983
4984 /* Traverse all the statements in BB marking used names and looking
4985 for statements that may infer assertions for their used operands. */
4986 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
4987 {
4988 gimple stmt;
4989 tree op;
4990 ssa_op_iter i;
4991
4992 stmt = gsi_stmt (si);
4993
4994 if (is_gimple_debug (stmt))
4995 continue;
4996
4997 /* See if we can derive an assertion for any of STMT's operands. */
4998 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
4999 {
5000 tree value;
5001 enum tree_code comp_code;
5002
5003 /* Mark OP in our live bitmap. */
5004 SET_BIT (live, SSA_NAME_VERSION (op));
5005
5006 /* If OP is used in such a way that we can infer a value
5007 range for it, and we don't find a previous assertion for
5008 it, create a new assertion location node for OP. */
5009 if (infer_value_range (stmt, op, &comp_code, &value))
5010 {
5011 /* If we are able to infer a nonzero value range for OP,
5012 then walk backwards through the use-def chain to see if OP
5013 was set via a typecast.
5014
5015 If so, then we can also infer a nonzero value range
5016 for the operand of the NOP_EXPR. */
5017 if (comp_code == NE_EXPR && integer_zerop (value))
5018 {
5019 tree t = op;
5020 gimple def_stmt = SSA_NAME_DEF_STMT (t);
5021
5022 while (is_gimple_assign (def_stmt)
5023 && gimple_assign_rhs_code (def_stmt) == NOP_EXPR
5024 && TREE_CODE
5025 (gimple_assign_rhs1 (def_stmt)) == SSA_NAME
5026 && POINTER_TYPE_P
5027 (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))
5028 {
5029 t = gimple_assign_rhs1 (def_stmt);
5030 def_stmt = SSA_NAME_DEF_STMT (t);
5031
5032 /* Note we want to register the assert for the
5033 operand of the NOP_EXPR after SI, not after the
5034 conversion. */
5035 if (! has_single_use (t))
5036 {
5037 register_new_assert_for (t, t, comp_code, value,
5038 bb, NULL, si);
5039 need_assert = true;
5040 }
5041 }
5042 }
5043
5044 /* If OP is used only once, namely in this STMT, don't
5045 bother creating an ASSERT_EXPR for it. Such an
5046 ASSERT_EXPR would do nothing but increase compile time. */
5047 if (!has_single_use (op))
5048 {
5049 register_new_assert_for (op, op, comp_code, value,
5050 bb, NULL, si);
5051 need_assert = true;
5052 }
5053 }
5054 }
5055 }
5056
5057 /* Traverse all PHI nodes in BB marking used operands. */
5058 for (si = gsi_start_phis (bb); !gsi_end_p(si); gsi_next (&si))
5059 {
5060 use_operand_p arg_p;
5061 ssa_op_iter i;
5062 phi = gsi_stmt (si);
5063
5064 FOR_EACH_PHI_ARG (arg_p, phi, i, SSA_OP_USE)
5065 {
5066 tree arg = USE_FROM_PTR (arg_p);
5067 if (TREE_CODE (arg) == SSA_NAME)
5068 SET_BIT (live, SSA_NAME_VERSION (arg));
5069 }
5070 }
5071
5072 return need_assert;
5073 }
5074
5075 /* Do an RPO walk over the function computing SSA name liveness
5076 on-the-fly and deciding on assert expressions to insert.
5077 Returns true if there are assert expressions to be inserted. */
5078
5079 static bool
5080 find_assert_locations (void)
5081 {
5082 int *rpo = XCNEWVEC (int, last_basic_block + NUM_FIXED_BLOCKS);
5083 int *bb_rpo = XCNEWVEC (int, last_basic_block + NUM_FIXED_BLOCKS);
5084 int *last_rpo = XCNEWVEC (int, last_basic_block + NUM_FIXED_BLOCKS);
5085 int rpo_cnt, i;
5086 bool need_asserts;
5087
5088 live = XCNEWVEC (sbitmap, last_basic_block + NUM_FIXED_BLOCKS);
5089 rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
5090 for (i = 0; i < rpo_cnt; ++i)
5091 bb_rpo[rpo[i]] = i;
5092
5093 need_asserts = false;
5094 for (i = rpo_cnt-1; i >= 0; --i)
5095 {
5096 basic_block bb = BASIC_BLOCK (rpo[i]);
5097 edge e;
5098 edge_iterator ei;
5099
5100 if (!live[rpo[i]])
5101 {
5102 live[rpo[i]] = sbitmap_alloc (num_ssa_names);
5103 sbitmap_zero (live[rpo[i]]);
5104 }
5105
5106 /* Process BB and update the live information with uses in
5107 this block. */
5108 need_asserts |= find_assert_locations_1 (bb, live[rpo[i]]);
5109
5110 /* Merge liveness into the predecessor blocks and free it. */
5111 if (!sbitmap_empty_p (live[rpo[i]]))
5112 {
5113 int pred_rpo = i;
5114 FOR_EACH_EDGE (e, ei, bb->preds)
5115 {
5116 int pred = e->src->index;
5117 if (e->flags & EDGE_DFS_BACK)
5118 continue;
5119
5120 if (!live[pred])
5121 {
5122 live[pred] = sbitmap_alloc (num_ssa_names);
5123 sbitmap_zero (live[pred]);
5124 }
5125 sbitmap_a_or_b (live[pred], live[pred], live[rpo[i]]);
5126
5127 if (bb_rpo[pred] < pred_rpo)
5128 pred_rpo = bb_rpo[pred];
5129 }
5130
5131 /* Record the RPO number of the last visited block that needs
5132 live information from this block. */
5133 last_rpo[rpo[i]] = pred_rpo;
5134 }
5135 else
5136 {
5137 sbitmap_free (live[rpo[i]]);
5138 live[rpo[i]] = NULL;
5139 }
5140
5141 /* We can free all successors live bitmaps if all their
5142 predecessors have been visited already. */
5143 FOR_EACH_EDGE (e, ei, bb->succs)
5144 if (last_rpo[e->dest->index] == i
5145 && live[e->dest->index])
5146 {
5147 sbitmap_free (live[e->dest->index]);
5148 live[e->dest->index] = NULL;
5149 }
5150 }
5151
5152 XDELETEVEC (rpo);
5153 XDELETEVEC (bb_rpo);
5154 XDELETEVEC (last_rpo);
5155 for (i = 0; i < last_basic_block + NUM_FIXED_BLOCKS; ++i)
5156 if (live[i])
5157 sbitmap_free (live[i]);
5158 XDELETEVEC (live);
5159
5160 return need_asserts;
5161 }
5162
5163 /* Create an ASSERT_EXPR for NAME and insert it in the location
5164 indicated by LOC. Return true if we made any edge insertions. */
5165
5166 static bool
5167 process_assert_insertions_for (tree name, assert_locus_t loc)
5168 {
5169 /* Build the comparison expression NAME_i COMP_CODE VAL. */
5170 gimple stmt;
5171 tree cond;
5172 gimple assert_stmt;
5173 edge_iterator ei;
5174 edge e;
5175
5176 /* If we have X <=> X do not insert an assert expr for that. */
5177 if (loc->expr == loc->val)
5178 return false;
5179
5180 cond = build2 (loc->comp_code, boolean_type_node, loc->expr, loc->val);
5181 assert_stmt = build_assert_expr_for (cond, name);
5182 if (loc->e)
5183 {
5184 /* We have been asked to insert the assertion on an edge. This
5185 is used only by COND_EXPR and SWITCH_EXPR assertions. */
5186 gcc_checking_assert (gimple_code (gsi_stmt (loc->si)) == GIMPLE_COND
5187 || (gimple_code (gsi_stmt (loc->si))
5188 == GIMPLE_SWITCH));
5189
5190 gsi_insert_on_edge (loc->e, assert_stmt);
5191 return true;
5192 }
5193
5194 /* Otherwise, we can insert right after LOC->SI iff the
5195 statement must not be the last statement in the block. */
5196 stmt = gsi_stmt (loc->si);
5197 if (!stmt_ends_bb_p (stmt))
5198 {
5199 gsi_insert_after (&loc->si, assert_stmt, GSI_SAME_STMT);
5200 return false;
5201 }
5202
5203 /* If STMT must be the last statement in BB, we can only insert new
5204 assertions on the non-abnormal edge out of BB. Note that since
5205 STMT is not control flow, there may only be one non-abnormal edge
5206 out of BB. */
5207 FOR_EACH_EDGE (e, ei, loc->bb->succs)
5208 if (!(e->flags & EDGE_ABNORMAL))
5209 {
5210 gsi_insert_on_edge (e, assert_stmt);
5211 return true;
5212 }
5213
5214 gcc_unreachable ();
5215 }
5216
5217
5218 /* Process all the insertions registered for every name N_i registered
5219 in NEED_ASSERT_FOR. The list of assertions to be inserted are
5220 found in ASSERTS_FOR[i]. */
5221
5222 static void
5223 process_assert_insertions (void)
5224 {
5225 unsigned i;
5226 bitmap_iterator bi;
5227 bool update_edges_p = false;
5228 int num_asserts = 0;
5229
5230 if (dump_file && (dump_flags & TDF_DETAILS))
5231 dump_all_asserts (dump_file);
5232
5233 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
5234 {
5235 assert_locus_t loc = asserts_for[i];
5236 gcc_assert (loc);
5237
5238 while (loc)
5239 {
5240 assert_locus_t next = loc->next;
5241 update_edges_p |= process_assert_insertions_for (ssa_name (i), loc);
5242 free (loc);
5243 loc = next;
5244 num_asserts++;
5245 }
5246 }
5247
5248 if (update_edges_p)
5249 gsi_commit_edge_inserts ();
5250
5251 statistics_counter_event (cfun, "Number of ASSERT_EXPR expressions inserted",
5252 num_asserts);
5253 }
5254
5255
5256 /* Traverse the flowgraph looking for conditional jumps to insert range
5257 expressions. These range expressions are meant to provide information
5258 to optimizations that need to reason in terms of value ranges. They
5259 will not be expanded into RTL. For instance, given:
5260
5261 x = ...
5262 y = ...
5263 if (x < y)
5264 y = x - 2;
5265 else
5266 x = y + 3;
5267
5268 this pass will transform the code into:
5269
5270 x = ...
5271 y = ...
5272 if (x < y)
5273 {
5274 x = ASSERT_EXPR <x, x < y>
5275 y = x - 2
5276 }
5277 else
5278 {
5279 y = ASSERT_EXPR <y, x <= y>
5280 x = y + 3
5281 }
5282
5283 The idea is that once copy and constant propagation have run, other
5284 optimizations will be able to determine what ranges of values can 'x'
5285 take in different paths of the code, simply by checking the reaching
5286 definition of 'x'. */
5287
5288 static void
5289 insert_range_assertions (void)
5290 {
5291 need_assert_for = BITMAP_ALLOC (NULL);
5292 asserts_for = XCNEWVEC (assert_locus_t, num_ssa_names);
5293
5294 calculate_dominance_info (CDI_DOMINATORS);
5295
5296 if (find_assert_locations ())
5297 {
5298 process_assert_insertions ();
5299 update_ssa (TODO_update_ssa_no_phi);
5300 }
5301
5302 if (dump_file && (dump_flags & TDF_DETAILS))
5303 {
5304 fprintf (dump_file, "\nSSA form after inserting ASSERT_EXPRs\n");
5305 dump_function_to_file (current_function_decl, dump_file, dump_flags);
5306 }
5307
5308 free (asserts_for);
5309 BITMAP_FREE (need_assert_for);
5310 }
5311
5312 /* Checks one ARRAY_REF in REF, located at LOCUS. Ignores flexible arrays
5313 and "struct" hacks. If VRP can determine that the
5314 array subscript is a constant, check if it is outside valid
5315 range. If the array subscript is a RANGE, warn if it is
5316 non-overlapping with valid range.
5317 IGNORE_OFF_BY_ONE is true if the ARRAY_REF is inside a ADDR_EXPR. */
5318
5319 static void
5320 check_array_ref (location_t location, tree ref, bool ignore_off_by_one)
5321 {
5322 value_range_t* vr = NULL;
5323 tree low_sub, up_sub;
5324 tree low_bound, up_bound, up_bound_p1;
5325 tree base;
5326
5327 if (TREE_NO_WARNING (ref))
5328 return;
5329
5330 low_sub = up_sub = TREE_OPERAND (ref, 1);
5331 up_bound = array_ref_up_bound (ref);
5332
5333 /* Can not check flexible arrays. */
5334 if (!up_bound
5335 || TREE_CODE (up_bound) != INTEGER_CST)
5336 return;
5337
5338 /* Accesses to trailing arrays via pointers may access storage
5339 beyond the types array bounds. */
5340 base = get_base_address (ref);
5341 if (base && TREE_CODE (base) == MEM_REF)
5342 {
5343 tree cref, next = NULL_TREE;
5344
5345 if (TREE_CODE (TREE_OPERAND (ref, 0)) != COMPONENT_REF)
5346 return;
5347
5348 cref = TREE_OPERAND (ref, 0);
5349 if (TREE_CODE (TREE_TYPE (TREE_OPERAND (cref, 0))) == RECORD_TYPE)
5350 for (next = DECL_CHAIN (TREE_OPERAND (cref, 1));
5351 next && TREE_CODE (next) != FIELD_DECL;
5352 next = DECL_CHAIN (next))
5353 ;
5354
5355 /* If this is the last field in a struct type or a field in a
5356 union type do not warn. */
5357 if (!next)
5358 return;
5359 }
5360
5361 low_bound = array_ref_low_bound (ref);
5362 up_bound_p1 = int_const_binop (PLUS_EXPR, up_bound, integer_one_node);
5363
5364 if (TREE_CODE (low_sub) == SSA_NAME)
5365 {
5366 vr = get_value_range (low_sub);
5367 if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
5368 {
5369 low_sub = vr->type == VR_RANGE ? vr->max : vr->min;
5370 up_sub = vr->type == VR_RANGE ? vr->min : vr->max;
5371 }
5372 }
5373
5374 if (vr && vr->type == VR_ANTI_RANGE)
5375 {
5376 if (TREE_CODE (up_sub) == INTEGER_CST
5377 && tree_int_cst_lt (up_bound, up_sub)
5378 && TREE_CODE (low_sub) == INTEGER_CST
5379 && tree_int_cst_lt (low_sub, low_bound))
5380 {
5381 warning_at (location, OPT_Warray_bounds,
5382 "array subscript is outside array bounds");
5383 TREE_NO_WARNING (ref) = 1;
5384 }
5385 }
5386 else if (TREE_CODE (up_sub) == INTEGER_CST
5387 && (ignore_off_by_one
5388 ? (tree_int_cst_lt (up_bound, up_sub)
5389 && !tree_int_cst_equal (up_bound_p1, up_sub))
5390 : (tree_int_cst_lt (up_bound, up_sub)
5391 || tree_int_cst_equal (up_bound_p1, up_sub))))
5392 {
5393 warning_at (location, OPT_Warray_bounds,
5394 "array subscript is above array bounds");
5395 TREE_NO_WARNING (ref) = 1;
5396 }
5397 else if (TREE_CODE (low_sub) == INTEGER_CST
5398 && tree_int_cst_lt (low_sub, low_bound))
5399 {
5400 warning_at (location, OPT_Warray_bounds,
5401 "array subscript is below array bounds");
5402 TREE_NO_WARNING (ref) = 1;
5403 }
5404 }
5405
5406 /* Searches if the expr T, located at LOCATION computes
5407 address of an ARRAY_REF, and call check_array_ref on it. */
5408
5409 static void
5410 search_for_addr_array (tree t, location_t location)
5411 {
5412 while (TREE_CODE (t) == SSA_NAME)
5413 {
5414 gimple g = SSA_NAME_DEF_STMT (t);
5415
5416 if (gimple_code (g) != GIMPLE_ASSIGN)
5417 return;
5418
5419 if (get_gimple_rhs_class (gimple_assign_rhs_code (g))
5420 != GIMPLE_SINGLE_RHS)
5421 return;
5422
5423 t = gimple_assign_rhs1 (g);
5424 }
5425
5426
5427 /* We are only interested in addresses of ARRAY_REF's. */
5428 if (TREE_CODE (t) != ADDR_EXPR)
5429 return;
5430
5431 /* Check each ARRAY_REFs in the reference chain. */
5432 do
5433 {
5434 if (TREE_CODE (t) == ARRAY_REF)
5435 check_array_ref (location, t, true /*ignore_off_by_one*/);
5436
5437 t = TREE_OPERAND (t, 0);
5438 }
5439 while (handled_component_p (t));
5440
5441 if (TREE_CODE (t) == MEM_REF
5442 && TREE_CODE (TREE_OPERAND (t, 0)) == ADDR_EXPR
5443 && !TREE_NO_WARNING (t))
5444 {
5445 tree tem = TREE_OPERAND (TREE_OPERAND (t, 0), 0);
5446 tree low_bound, up_bound, el_sz;
5447 double_int idx;
5448 if (TREE_CODE (TREE_TYPE (tem)) != ARRAY_TYPE
5449 || TREE_CODE (TREE_TYPE (TREE_TYPE (tem))) == ARRAY_TYPE
5450 || !TYPE_DOMAIN (TREE_TYPE (tem)))
5451 return;
5452
5453 low_bound = TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
5454 up_bound = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
5455 el_sz = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (tem)));
5456 if (!low_bound
5457 || TREE_CODE (low_bound) != INTEGER_CST
5458 || !up_bound
5459 || TREE_CODE (up_bound) != INTEGER_CST
5460 || !el_sz
5461 || TREE_CODE (el_sz) != INTEGER_CST)
5462 return;
5463
5464 idx = mem_ref_offset (t);
5465 idx = double_int_sdiv (idx, tree_to_double_int (el_sz), TRUNC_DIV_EXPR);
5466 if (double_int_scmp (idx, double_int_zero) < 0)
5467 {
5468 warning_at (location, OPT_Warray_bounds,
5469 "array subscript is below array bounds");
5470 TREE_NO_WARNING (t) = 1;
5471 }
5472 else if (double_int_scmp (idx,
5473 double_int_add
5474 (double_int_add
5475 (tree_to_double_int (up_bound),
5476 double_int_neg
5477 (tree_to_double_int (low_bound))),
5478 double_int_one)) > 0)
5479 {
5480 warning_at (location, OPT_Warray_bounds,
5481 "array subscript is above array bounds");
5482 TREE_NO_WARNING (t) = 1;
5483 }
5484 }
5485 }
5486
5487 /* walk_tree() callback that checks if *TP is
5488 an ARRAY_REF inside an ADDR_EXPR (in which an array
5489 subscript one outside the valid range is allowed). Call
5490 check_array_ref for each ARRAY_REF found. The location is
5491 passed in DATA. */
5492
5493 static tree
5494 check_array_bounds (tree *tp, int *walk_subtree, void *data)
5495 {
5496 tree t = *tp;
5497 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
5498 location_t location;
5499
5500 if (EXPR_HAS_LOCATION (t))
5501 location = EXPR_LOCATION (t);
5502 else
5503 {
5504 location_t *locp = (location_t *) wi->info;
5505 location = *locp;
5506 }
5507
5508 *walk_subtree = TRUE;
5509
5510 if (TREE_CODE (t) == ARRAY_REF)
5511 check_array_ref (location, t, false /*ignore_off_by_one*/);
5512
5513 if (TREE_CODE (t) == MEM_REF
5514 || (TREE_CODE (t) == RETURN_EXPR && TREE_OPERAND (t, 0)))
5515 search_for_addr_array (TREE_OPERAND (t, 0), location);
5516
5517 if (TREE_CODE (t) == ADDR_EXPR)
5518 *walk_subtree = FALSE;
5519
5520 return NULL_TREE;
5521 }
5522
5523 /* Walk over all statements of all reachable BBs and call check_array_bounds
5524 on them. */
5525
5526 static void
5527 check_all_array_refs (void)
5528 {
5529 basic_block bb;
5530 gimple_stmt_iterator si;
5531
5532 FOR_EACH_BB (bb)
5533 {
5534 edge_iterator ei;
5535 edge e;
5536 bool executable = false;
5537
5538 /* Skip blocks that were found to be unreachable. */
5539 FOR_EACH_EDGE (e, ei, bb->preds)
5540 executable |= !!(e->flags & EDGE_EXECUTABLE);
5541 if (!executable)
5542 continue;
5543
5544 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
5545 {
5546 gimple stmt = gsi_stmt (si);
5547 struct walk_stmt_info wi;
5548 if (!gimple_has_location (stmt))
5549 continue;
5550
5551 if (is_gimple_call (stmt))
5552 {
5553 size_t i;
5554 size_t n = gimple_call_num_args (stmt);
5555 for (i = 0; i < n; i++)
5556 {
5557 tree arg = gimple_call_arg (stmt, i);
5558 search_for_addr_array (arg, gimple_location (stmt));
5559 }
5560 }
5561 else
5562 {
5563 memset (&wi, 0, sizeof (wi));
5564 wi.info = CONST_CAST (void *, (const void *)
5565 gimple_location_ptr (stmt));
5566
5567 walk_gimple_op (gsi_stmt (si),
5568 check_array_bounds,
5569 &wi);
5570 }
5571 }
5572 }
5573 }
5574
5575 /* Convert range assertion expressions into the implied copies and
5576 copy propagate away the copies. Doing the trivial copy propagation
5577 here avoids the need to run the full copy propagation pass after
5578 VRP.
5579
5580 FIXME, this will eventually lead to copy propagation removing the
5581 names that had useful range information attached to them. For
5582 instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>,
5583 then N_i will have the range [3, +INF].
5584
5585 However, by converting the assertion into the implied copy
5586 operation N_i = N_j, we will then copy-propagate N_j into the uses
5587 of N_i and lose the range information. We may want to hold on to
5588 ASSERT_EXPRs a little while longer as the ranges could be used in
5589 things like jump threading.
5590
5591 The problem with keeping ASSERT_EXPRs around is that passes after
5592 VRP need to handle them appropriately.
5593
5594 Another approach would be to make the range information a first
5595 class property of the SSA_NAME so that it can be queried from
5596 any pass. This is made somewhat more complex by the need for
5597 multiple ranges to be associated with one SSA_NAME. */
5598
5599 static void
5600 remove_range_assertions (void)
5601 {
5602 basic_block bb;
5603 gimple_stmt_iterator si;
5604
5605 /* Note that the BSI iterator bump happens at the bottom of the
5606 loop and no bump is necessary if we're removing the statement
5607 referenced by the current BSI. */
5608 FOR_EACH_BB (bb)
5609 for (si = gsi_start_bb (bb); !gsi_end_p (si);)
5610 {
5611 gimple stmt = gsi_stmt (si);
5612 gimple use_stmt;
5613
5614 if (is_gimple_assign (stmt)
5615 && gimple_assign_rhs_code (stmt) == ASSERT_EXPR)
5616 {
5617 tree rhs = gimple_assign_rhs1 (stmt);
5618 tree var;
5619 tree cond = fold (ASSERT_EXPR_COND (rhs));
5620 use_operand_p use_p;
5621 imm_use_iterator iter;
5622
5623 gcc_assert (cond != boolean_false_node);
5624
5625 /* Propagate the RHS into every use of the LHS. */
5626 var = ASSERT_EXPR_VAR (rhs);
5627 FOR_EACH_IMM_USE_STMT (use_stmt, iter,
5628 gimple_assign_lhs (stmt))
5629 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
5630 {
5631 SET_USE (use_p, var);
5632 gcc_assert (TREE_CODE (var) == SSA_NAME);
5633 }
5634
5635 /* And finally, remove the copy, it is not needed. */
5636 gsi_remove (&si, true);
5637 release_defs (stmt);
5638 }
5639 else
5640 gsi_next (&si);
5641 }
5642 }
5643
5644
5645 /* Return true if STMT is interesting for VRP. */
5646
5647 static bool
5648 stmt_interesting_for_vrp (gimple stmt)
5649 {
5650 if (gimple_code (stmt) == GIMPLE_PHI
5651 && is_gimple_reg (gimple_phi_result (stmt))
5652 && (INTEGRAL_TYPE_P (TREE_TYPE (gimple_phi_result (stmt)))
5653 || POINTER_TYPE_P (TREE_TYPE (gimple_phi_result (stmt)))))
5654 return true;
5655 else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
5656 {
5657 tree lhs = gimple_get_lhs (stmt);
5658
5659 /* In general, assignments with virtual operands are not useful
5660 for deriving ranges, with the obvious exception of calls to
5661 builtin functions. */
5662 if (lhs && TREE_CODE (lhs) == SSA_NAME
5663 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
5664 || POINTER_TYPE_P (TREE_TYPE (lhs)))
5665 && ((is_gimple_call (stmt)
5666 && gimple_call_fndecl (stmt) != NULL_TREE
5667 && DECL_BUILT_IN (gimple_call_fndecl (stmt)))
5668 || !gimple_vuse (stmt)))
5669 return true;
5670 }
5671 else if (gimple_code (stmt) == GIMPLE_COND
5672 || gimple_code (stmt) == GIMPLE_SWITCH)
5673 return true;
5674
5675 return false;
5676 }
5677
5678
5679 /* Initialize local data structures for VRP. */
5680
5681 static void
5682 vrp_initialize (void)
5683 {
5684 basic_block bb;
5685
5686 values_propagated = false;
5687 num_vr_values = num_ssa_names;
5688 vr_value = XCNEWVEC (value_range_t *, num_vr_values);
5689 vr_phi_edge_counts = XCNEWVEC (int, num_ssa_names);
5690
5691 FOR_EACH_BB (bb)
5692 {
5693 gimple_stmt_iterator si;
5694
5695 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
5696 {
5697 gimple phi = gsi_stmt (si);
5698 if (!stmt_interesting_for_vrp (phi))
5699 {
5700 tree lhs = PHI_RESULT (phi);
5701 set_value_range_to_varying (get_value_range (lhs));
5702 prop_set_simulate_again (phi, false);
5703 }
5704 else
5705 prop_set_simulate_again (phi, true);
5706 }
5707
5708 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
5709 {
5710 gimple stmt = gsi_stmt (si);
5711
5712 /* If the statement is a control insn, then we do not
5713 want to avoid simulating the statement once. Failure
5714 to do so means that those edges will never get added. */
5715 if (stmt_ends_bb_p (stmt))
5716 prop_set_simulate_again (stmt, true);
5717 else if (!stmt_interesting_for_vrp (stmt))
5718 {
5719 ssa_op_iter i;
5720 tree def;
5721 FOR_EACH_SSA_TREE_OPERAND (def, stmt, i, SSA_OP_DEF)
5722 set_value_range_to_varying (get_value_range (def));
5723 prop_set_simulate_again (stmt, false);
5724 }
5725 else
5726 prop_set_simulate_again (stmt, true);
5727 }
5728 }
5729 }
5730
5731 /* Return the singleton value-range for NAME or NAME. */
5732
5733 static inline tree
5734 vrp_valueize (tree name)
5735 {
5736 if (TREE_CODE (name) == SSA_NAME)
5737 {
5738 value_range_t *vr = get_value_range (name);
5739 if (vr->type == VR_RANGE
5740 && (vr->min == vr->max
5741 || operand_equal_p (vr->min, vr->max, 0)))
5742 return vr->min;
5743 }
5744 return name;
5745 }
5746
5747 /* Visit assignment STMT. If it produces an interesting range, record
5748 the SSA name in *OUTPUT_P. */
5749
5750 static enum ssa_prop_result
5751 vrp_visit_assignment_or_call (gimple stmt, tree *output_p)
5752 {
5753 tree def, lhs;
5754 ssa_op_iter iter;
5755 enum gimple_code code = gimple_code (stmt);
5756 lhs = gimple_get_lhs (stmt);
5757
5758 /* We only keep track of ranges in integral and pointer types. */
5759 if (TREE_CODE (lhs) == SSA_NAME
5760 && ((INTEGRAL_TYPE_P (TREE_TYPE (lhs))
5761 /* It is valid to have NULL MIN/MAX values on a type. See
5762 build_range_type. */
5763 && TYPE_MIN_VALUE (TREE_TYPE (lhs))
5764 && TYPE_MAX_VALUE (TREE_TYPE (lhs)))
5765 || POINTER_TYPE_P (TREE_TYPE (lhs))))
5766 {
5767 value_range_t new_vr = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
5768
5769 /* Try folding the statement to a constant first. */
5770 tree tem = gimple_fold_stmt_to_constant (stmt, vrp_valueize);
5771 if (tem && !is_overflow_infinity (tem))
5772 set_value_range (&new_vr, VR_RANGE, tem, tem, NULL);
5773 /* Then dispatch to value-range extracting functions. */
5774 else if (code == GIMPLE_CALL)
5775 extract_range_basic (&new_vr, stmt);
5776 else
5777 extract_range_from_assignment (&new_vr, stmt);
5778
5779 if (update_value_range (lhs, &new_vr))
5780 {
5781 *output_p = lhs;
5782
5783 if (dump_file && (dump_flags & TDF_DETAILS))
5784 {
5785 fprintf (dump_file, "Found new range for ");
5786 print_generic_expr (dump_file, lhs, 0);
5787 fprintf (dump_file, ": ");
5788 dump_value_range (dump_file, &new_vr);
5789 fprintf (dump_file, "\n\n");
5790 }
5791
5792 if (new_vr.type == VR_VARYING)
5793 return SSA_PROP_VARYING;
5794
5795 return SSA_PROP_INTERESTING;
5796 }
5797
5798 return SSA_PROP_NOT_INTERESTING;
5799 }
5800
5801 /* Every other statement produces no useful ranges. */
5802 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_DEF)
5803 set_value_range_to_varying (get_value_range (def));
5804
5805 return SSA_PROP_VARYING;
5806 }
5807
5808 /* Helper that gets the value range of the SSA_NAME with version I
5809 or a symbolic range containing the SSA_NAME only if the value range
5810 is varying or undefined. */
5811
5812 static inline value_range_t
5813 get_vr_for_comparison (int i)
5814 {
5815 value_range_t vr = *get_value_range (ssa_name (i));
5816
5817 /* If name N_i does not have a valid range, use N_i as its own
5818 range. This allows us to compare against names that may
5819 have N_i in their ranges. */
5820 if (vr.type == VR_VARYING || vr.type == VR_UNDEFINED)
5821 {
5822 vr.type = VR_RANGE;
5823 vr.min = ssa_name (i);
5824 vr.max = ssa_name (i);
5825 }
5826
5827 return vr;
5828 }
5829
5830 /* Compare all the value ranges for names equivalent to VAR with VAL
5831 using comparison code COMP. Return the same value returned by
5832 compare_range_with_value, including the setting of
5833 *STRICT_OVERFLOW_P. */
5834
5835 static tree
5836 compare_name_with_value (enum tree_code comp, tree var, tree val,
5837 bool *strict_overflow_p)
5838 {
5839 bitmap_iterator bi;
5840 unsigned i;
5841 bitmap e;
5842 tree retval, t;
5843 int used_strict_overflow;
5844 bool sop;
5845 value_range_t equiv_vr;
5846
5847 /* Get the set of equivalences for VAR. */
5848 e = get_value_range (var)->equiv;
5849
5850 /* Start at -1. Set it to 0 if we do a comparison without relying
5851 on overflow, or 1 if all comparisons rely on overflow. */
5852 used_strict_overflow = -1;
5853
5854 /* Compare vars' value range with val. */
5855 equiv_vr = get_vr_for_comparison (SSA_NAME_VERSION (var));
5856 sop = false;
5857 retval = compare_range_with_value (comp, &equiv_vr, val, &sop);
5858 if (retval)
5859 used_strict_overflow = sop ? 1 : 0;
5860
5861 /* If the equiv set is empty we have done all work we need to do. */
5862 if (e == NULL)
5863 {
5864 if (retval
5865 && used_strict_overflow > 0)
5866 *strict_overflow_p = true;
5867 return retval;
5868 }
5869
5870 EXECUTE_IF_SET_IN_BITMAP (e, 0, i, bi)
5871 {
5872 equiv_vr = get_vr_for_comparison (i);
5873 sop = false;
5874 t = compare_range_with_value (comp, &equiv_vr, val, &sop);
5875 if (t)
5876 {
5877 /* If we get different answers from different members
5878 of the equivalence set this check must be in a dead
5879 code region. Folding it to a trap representation
5880 would be correct here. For now just return don't-know. */
5881 if (retval != NULL
5882 && t != retval)
5883 {
5884 retval = NULL_TREE;
5885 break;
5886 }
5887 retval = t;
5888
5889 if (!sop)
5890 used_strict_overflow = 0;
5891 else if (used_strict_overflow < 0)
5892 used_strict_overflow = 1;
5893 }
5894 }
5895
5896 if (retval
5897 && used_strict_overflow > 0)
5898 *strict_overflow_p = true;
5899
5900 return retval;
5901 }
5902
5903
5904 /* Given a comparison code COMP and names N1 and N2, compare all the
5905 ranges equivalent to N1 against all the ranges equivalent to N2
5906 to determine the value of N1 COMP N2. Return the same value
5907 returned by compare_ranges. Set *STRICT_OVERFLOW_P to indicate
5908 whether we relied on an overflow infinity in the comparison. */
5909
5910
5911 static tree
5912 compare_names (enum tree_code comp, tree n1, tree n2,
5913 bool *strict_overflow_p)
5914 {
5915 tree t, retval;
5916 bitmap e1, e2;
5917 bitmap_iterator bi1, bi2;
5918 unsigned i1, i2;
5919 int used_strict_overflow;
5920 static bitmap_obstack *s_obstack = NULL;
5921 static bitmap s_e1 = NULL, s_e2 = NULL;
5922
5923 /* Compare the ranges of every name equivalent to N1 against the
5924 ranges of every name equivalent to N2. */
5925 e1 = get_value_range (n1)->equiv;
5926 e2 = get_value_range (n2)->equiv;
5927
5928 /* Use the fake bitmaps if e1 or e2 are not available. */
5929 if (s_obstack == NULL)
5930 {
5931 s_obstack = XNEW (bitmap_obstack);
5932 bitmap_obstack_initialize (s_obstack);
5933 s_e1 = BITMAP_ALLOC (s_obstack);
5934 s_e2 = BITMAP_ALLOC (s_obstack);
5935 }
5936 if (e1 == NULL)
5937 e1 = s_e1;
5938 if (e2 == NULL)
5939 e2 = s_e2;
5940
5941 /* Add N1 and N2 to their own set of equivalences to avoid
5942 duplicating the body of the loop just to check N1 and N2
5943 ranges. */
5944 bitmap_set_bit (e1, SSA_NAME_VERSION (n1));
5945 bitmap_set_bit (e2, SSA_NAME_VERSION (n2));
5946
5947 /* If the equivalence sets have a common intersection, then the two
5948 names can be compared without checking their ranges. */
5949 if (bitmap_intersect_p (e1, e2))
5950 {
5951 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
5952 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
5953
5954 return (comp == EQ_EXPR || comp == GE_EXPR || comp == LE_EXPR)
5955 ? boolean_true_node
5956 : boolean_false_node;
5957 }
5958
5959 /* Start at -1. Set it to 0 if we do a comparison without relying
5960 on overflow, or 1 if all comparisons rely on overflow. */
5961 used_strict_overflow = -1;
5962
5963 /* Otherwise, compare all the equivalent ranges. First, add N1 and
5964 N2 to their own set of equivalences to avoid duplicating the body
5965 of the loop just to check N1 and N2 ranges. */
5966 EXECUTE_IF_SET_IN_BITMAP (e1, 0, i1, bi1)
5967 {
5968 value_range_t vr1 = get_vr_for_comparison (i1);
5969
5970 t = retval = NULL_TREE;
5971 EXECUTE_IF_SET_IN_BITMAP (e2, 0, i2, bi2)
5972 {
5973 bool sop = false;
5974
5975 value_range_t vr2 = get_vr_for_comparison (i2);
5976
5977 t = compare_ranges (comp, &vr1, &vr2, &sop);
5978 if (t)
5979 {
5980 /* If we get different answers from different members
5981 of the equivalence set this check must be in a dead
5982 code region. Folding it to a trap representation
5983 would be correct here. For now just return don't-know. */
5984 if (retval != NULL
5985 && t != retval)
5986 {
5987 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
5988 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
5989 return NULL_TREE;
5990 }
5991 retval = t;
5992
5993 if (!sop)
5994 used_strict_overflow = 0;
5995 else if (used_strict_overflow < 0)
5996 used_strict_overflow = 1;
5997 }
5998 }
5999
6000 if (retval)
6001 {
6002 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
6003 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
6004 if (used_strict_overflow > 0)
6005 *strict_overflow_p = true;
6006 return retval;
6007 }
6008 }
6009
6010 /* None of the equivalent ranges are useful in computing this
6011 comparison. */
6012 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
6013 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
6014 return NULL_TREE;
6015 }
6016
6017 /* Helper function for vrp_evaluate_conditional_warnv. */
6018
6019 static tree
6020 vrp_evaluate_conditional_warnv_with_ops_using_ranges (enum tree_code code,
6021 tree op0, tree op1,
6022 bool * strict_overflow_p)
6023 {
6024 value_range_t *vr0, *vr1;
6025
6026 vr0 = (TREE_CODE (op0) == SSA_NAME) ? get_value_range (op0) : NULL;
6027 vr1 = (TREE_CODE (op1) == SSA_NAME) ? get_value_range (op1) : NULL;
6028
6029 if (vr0 && vr1)
6030 return compare_ranges (code, vr0, vr1, strict_overflow_p);
6031 else if (vr0 && vr1 == NULL)
6032 return compare_range_with_value (code, vr0, op1, strict_overflow_p);
6033 else if (vr0 == NULL && vr1)
6034 return (compare_range_with_value
6035 (swap_tree_comparison (code), vr1, op0, strict_overflow_p));
6036 return NULL;
6037 }
6038
6039 /* Helper function for vrp_evaluate_conditional_warnv. */
6040
6041 static tree
6042 vrp_evaluate_conditional_warnv_with_ops (enum tree_code code, tree op0,
6043 tree op1, bool use_equiv_p,
6044 bool *strict_overflow_p, bool *only_ranges)
6045 {
6046 tree ret;
6047 if (only_ranges)
6048 *only_ranges = true;
6049
6050 /* We only deal with integral and pointer types. */
6051 if (!INTEGRAL_TYPE_P (TREE_TYPE (op0))
6052 && !POINTER_TYPE_P (TREE_TYPE (op0)))
6053 return NULL_TREE;
6054
6055 if (use_equiv_p)
6056 {
6057 if (only_ranges
6058 && (ret = vrp_evaluate_conditional_warnv_with_ops_using_ranges
6059 (code, op0, op1, strict_overflow_p)))
6060 return ret;
6061 *only_ranges = false;
6062 if (TREE_CODE (op0) == SSA_NAME && TREE_CODE (op1) == SSA_NAME)
6063 return compare_names (code, op0, op1, strict_overflow_p);
6064 else if (TREE_CODE (op0) == SSA_NAME)
6065 return compare_name_with_value (code, op0, op1, strict_overflow_p);
6066 else if (TREE_CODE (op1) == SSA_NAME)
6067 return (compare_name_with_value
6068 (swap_tree_comparison (code), op1, op0, strict_overflow_p));
6069 }
6070 else
6071 return vrp_evaluate_conditional_warnv_with_ops_using_ranges (code, op0, op1,
6072 strict_overflow_p);
6073 return NULL_TREE;
6074 }
6075
6076 /* Given (CODE OP0 OP1) within STMT, try to simplify it based on value range
6077 information. Return NULL if the conditional can not be evaluated.
6078 The ranges of all the names equivalent with the operands in COND
6079 will be used when trying to compute the value. If the result is
6080 based on undefined signed overflow, issue a warning if
6081 appropriate. */
6082
6083 static tree
6084 vrp_evaluate_conditional (enum tree_code code, tree op0, tree op1, gimple stmt)
6085 {
6086 bool sop;
6087 tree ret;
6088 bool only_ranges;
6089
6090 /* Some passes and foldings leak constants with overflow flag set
6091 into the IL. Avoid doing wrong things with these and bail out. */
6092 if ((TREE_CODE (op0) == INTEGER_CST
6093 && TREE_OVERFLOW (op0))
6094 || (TREE_CODE (op1) == INTEGER_CST
6095 && TREE_OVERFLOW (op1)))
6096 return NULL_TREE;
6097
6098 sop = false;
6099 ret = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, true, &sop,
6100 &only_ranges);
6101
6102 if (ret && sop)
6103 {
6104 enum warn_strict_overflow_code wc;
6105 const char* warnmsg;
6106
6107 if (is_gimple_min_invariant (ret))
6108 {
6109 wc = WARN_STRICT_OVERFLOW_CONDITIONAL;
6110 warnmsg = G_("assuming signed overflow does not occur when "
6111 "simplifying conditional to constant");
6112 }
6113 else
6114 {
6115 wc = WARN_STRICT_OVERFLOW_COMPARISON;
6116 warnmsg = G_("assuming signed overflow does not occur when "
6117 "simplifying conditional");
6118 }
6119
6120 if (issue_strict_overflow_warning (wc))
6121 {
6122 location_t location;
6123
6124 if (!gimple_has_location (stmt))
6125 location = input_location;
6126 else
6127 location = gimple_location (stmt);
6128 warning_at (location, OPT_Wstrict_overflow, "%s", warnmsg);
6129 }
6130 }
6131
6132 if (warn_type_limits
6133 && ret && only_ranges
6134 && TREE_CODE_CLASS (code) == tcc_comparison
6135 && TREE_CODE (op0) == SSA_NAME)
6136 {
6137 /* If the comparison is being folded and the operand on the LHS
6138 is being compared against a constant value that is outside of
6139 the natural range of OP0's type, then the predicate will
6140 always fold regardless of the value of OP0. If -Wtype-limits
6141 was specified, emit a warning. */
6142 tree type = TREE_TYPE (op0);
6143 value_range_t *vr0 = get_value_range (op0);
6144
6145 if (vr0->type != VR_VARYING
6146 && INTEGRAL_TYPE_P (type)
6147 && vrp_val_is_min (vr0->min)
6148 && vrp_val_is_max (vr0->max)
6149 && is_gimple_min_invariant (op1))
6150 {
6151 location_t location;
6152
6153 if (!gimple_has_location (stmt))
6154 location = input_location;
6155 else
6156 location = gimple_location (stmt);
6157
6158 warning_at (location, OPT_Wtype_limits,
6159 integer_zerop (ret)
6160 ? G_("comparison always false "
6161 "due to limited range of data type")
6162 : G_("comparison always true "
6163 "due to limited range of data type"));
6164 }
6165 }
6166
6167 return ret;
6168 }
6169
6170
6171 /* Visit conditional statement STMT. If we can determine which edge
6172 will be taken out of STMT's basic block, record it in
6173 *TAKEN_EDGE_P and return SSA_PROP_INTERESTING. Otherwise, return
6174 SSA_PROP_VARYING. */
6175
6176 static enum ssa_prop_result
6177 vrp_visit_cond_stmt (gimple stmt, edge *taken_edge_p)
6178 {
6179 tree val;
6180 bool sop;
6181
6182 *taken_edge_p = NULL;
6183
6184 if (dump_file && (dump_flags & TDF_DETAILS))
6185 {
6186 tree use;
6187 ssa_op_iter i;
6188
6189 fprintf (dump_file, "\nVisiting conditional with predicate: ");
6190 print_gimple_stmt (dump_file, stmt, 0, 0);
6191 fprintf (dump_file, "\nWith known ranges\n");
6192
6193 FOR_EACH_SSA_TREE_OPERAND (use, stmt, i, SSA_OP_USE)
6194 {
6195 fprintf (dump_file, "\t");
6196 print_generic_expr (dump_file, use, 0);
6197 fprintf (dump_file, ": ");
6198 dump_value_range (dump_file, vr_value[SSA_NAME_VERSION (use)]);
6199 }
6200
6201 fprintf (dump_file, "\n");
6202 }
6203
6204 /* Compute the value of the predicate COND by checking the known
6205 ranges of each of its operands.
6206
6207 Note that we cannot evaluate all the equivalent ranges here
6208 because those ranges may not yet be final and with the current
6209 propagation strategy, we cannot determine when the value ranges
6210 of the names in the equivalence set have changed.
6211
6212 For instance, given the following code fragment
6213
6214 i_5 = PHI <8, i_13>
6215 ...
6216 i_14 = ASSERT_EXPR <i_5, i_5 != 0>
6217 if (i_14 == 1)
6218 ...
6219
6220 Assume that on the first visit to i_14, i_5 has the temporary
6221 range [8, 8] because the second argument to the PHI function is
6222 not yet executable. We derive the range ~[0, 0] for i_14 and the
6223 equivalence set { i_5 }. So, when we visit 'if (i_14 == 1)' for
6224 the first time, since i_14 is equivalent to the range [8, 8], we
6225 determine that the predicate is always false.
6226
6227 On the next round of propagation, i_13 is determined to be
6228 VARYING, which causes i_5 to drop down to VARYING. So, another
6229 visit to i_14 is scheduled. In this second visit, we compute the
6230 exact same range and equivalence set for i_14, namely ~[0, 0] and
6231 { i_5 }. But we did not have the previous range for i_5
6232 registered, so vrp_visit_assignment thinks that the range for
6233 i_14 has not changed. Therefore, the predicate 'if (i_14 == 1)'
6234 is not visited again, which stops propagation from visiting
6235 statements in the THEN clause of that if().
6236
6237 To properly fix this we would need to keep the previous range
6238 value for the names in the equivalence set. This way we would've
6239 discovered that from one visit to the other i_5 changed from
6240 range [8, 8] to VR_VARYING.
6241
6242 However, fixing this apparent limitation may not be worth the
6243 additional checking. Testing on several code bases (GCC, DLV,
6244 MICO, TRAMP3D and SPEC2000) showed that doing this results in
6245 4 more predicates folded in SPEC. */
6246 sop = false;
6247
6248 val = vrp_evaluate_conditional_warnv_with_ops (gimple_cond_code (stmt),
6249 gimple_cond_lhs (stmt),
6250 gimple_cond_rhs (stmt),
6251 false, &sop, NULL);
6252 if (val)
6253 {
6254 if (!sop)
6255 *taken_edge_p = find_taken_edge (gimple_bb (stmt), val);
6256 else
6257 {
6258 if (dump_file && (dump_flags & TDF_DETAILS))
6259 fprintf (dump_file,
6260 "\nIgnoring predicate evaluation because "
6261 "it assumes that signed overflow is undefined");
6262 val = NULL_TREE;
6263 }
6264 }
6265
6266 if (dump_file && (dump_flags & TDF_DETAILS))
6267 {
6268 fprintf (dump_file, "\nPredicate evaluates to: ");
6269 if (val == NULL_TREE)
6270 fprintf (dump_file, "DON'T KNOW\n");
6271 else
6272 print_generic_stmt (dump_file, val, 0);
6273 }
6274
6275 return (*taken_edge_p) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING;
6276 }
6277
6278 /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL
6279 that includes the value VAL. The search is restricted to the range
6280 [START_IDX, n - 1] where n is the size of VEC.
6281
6282 If there is a CASE_LABEL for VAL, its index is placed in IDX and true is
6283 returned.
6284
6285 If there is no CASE_LABEL for VAL and there is one that is larger than VAL,
6286 it is placed in IDX and false is returned.
6287
6288 If VAL is larger than any CASE_LABEL, n is placed on IDX and false is
6289 returned. */
6290
6291 static bool
6292 find_case_label_index (gimple stmt, size_t start_idx, tree val, size_t *idx)
6293 {
6294 size_t n = gimple_switch_num_labels (stmt);
6295 size_t low, high;
6296
6297 /* Find case label for minimum of the value range or the next one.
6298 At each iteration we are searching in [low, high - 1]. */
6299
6300 for (low = start_idx, high = n; high != low; )
6301 {
6302 tree t;
6303 int cmp;
6304 /* Note that i != high, so we never ask for n. */
6305 size_t i = (high + low) / 2;
6306 t = gimple_switch_label (stmt, i);
6307
6308 /* Cache the result of comparing CASE_LOW and val. */
6309 cmp = tree_int_cst_compare (CASE_LOW (t), val);
6310
6311 if (cmp == 0)
6312 {
6313 /* Ranges cannot be empty. */
6314 *idx = i;
6315 return true;
6316 }
6317 else if (cmp > 0)
6318 high = i;
6319 else
6320 {
6321 low = i + 1;
6322 if (CASE_HIGH (t) != NULL
6323 && tree_int_cst_compare (CASE_HIGH (t), val) >= 0)
6324 {
6325 *idx = i;
6326 return true;
6327 }
6328 }
6329 }
6330
6331 *idx = high;
6332 return false;
6333 }
6334
6335 /* Searches the case label vector VEC for the range of CASE_LABELs that is used
6336 for values between MIN and MAX. The first index is placed in MIN_IDX. The
6337 last index is placed in MAX_IDX. If the range of CASE_LABELs is empty
6338 then MAX_IDX < MIN_IDX.
6339 Returns true if the default label is not needed. */
6340
6341 static bool
6342 find_case_label_range (gimple stmt, tree min, tree max, size_t *min_idx,
6343 size_t *max_idx)
6344 {
6345 size_t i, j;
6346 bool min_take_default = !find_case_label_index (stmt, 1, min, &i);
6347 bool max_take_default = !find_case_label_index (stmt, i, max, &j);
6348
6349 if (i == j
6350 && min_take_default
6351 && max_take_default)
6352 {
6353 /* Only the default case label reached.
6354 Return an empty range. */
6355 *min_idx = 1;
6356 *max_idx = 0;
6357 return false;
6358 }
6359 else
6360 {
6361 bool take_default = min_take_default || max_take_default;
6362 tree low, high;
6363 size_t k;
6364
6365 if (max_take_default)
6366 j--;
6367
6368 /* If the case label range is continuous, we do not need
6369 the default case label. Verify that. */
6370 high = CASE_LOW (gimple_switch_label (stmt, i));
6371 if (CASE_HIGH (gimple_switch_label (stmt, i)))
6372 high = CASE_HIGH (gimple_switch_label (stmt, i));
6373 for (k = i + 1; k <= j; ++k)
6374 {
6375 low = CASE_LOW (gimple_switch_label (stmt, k));
6376 if (!integer_onep (int_const_binop (MINUS_EXPR, low, high)))
6377 {
6378 take_default = true;
6379 break;
6380 }
6381 high = low;
6382 if (CASE_HIGH (gimple_switch_label (stmt, k)))
6383 high = CASE_HIGH (gimple_switch_label (stmt, k));
6384 }
6385
6386 *min_idx = i;
6387 *max_idx = j;
6388 return !take_default;
6389 }
6390 }
6391
6392 /* Visit switch statement STMT. If we can determine which edge
6393 will be taken out of STMT's basic block, record it in
6394 *TAKEN_EDGE_P and return SSA_PROP_INTERESTING. Otherwise, return
6395 SSA_PROP_VARYING. */
6396
6397 static enum ssa_prop_result
6398 vrp_visit_switch_stmt (gimple stmt, edge *taken_edge_p)
6399 {
6400 tree op, val;
6401 value_range_t *vr;
6402 size_t i = 0, j = 0;
6403 bool take_default;
6404
6405 *taken_edge_p = NULL;
6406 op = gimple_switch_index (stmt);
6407 if (TREE_CODE (op) != SSA_NAME)
6408 return SSA_PROP_VARYING;
6409
6410 vr = get_value_range (op);
6411 if (dump_file && (dump_flags & TDF_DETAILS))
6412 {
6413 fprintf (dump_file, "\nVisiting switch expression with operand ");
6414 print_generic_expr (dump_file, op, 0);
6415 fprintf (dump_file, " with known range ");
6416 dump_value_range (dump_file, vr);
6417 fprintf (dump_file, "\n");
6418 }
6419
6420 if (vr->type != VR_RANGE
6421 || symbolic_range_p (vr))
6422 return SSA_PROP_VARYING;
6423
6424 /* Find the single edge that is taken from the switch expression. */
6425 take_default = !find_case_label_range (stmt, vr->min, vr->max, &i, &j);
6426
6427 /* Check if the range spans no CASE_LABEL. If so, we only reach the default
6428 label */
6429 if (j < i)
6430 {
6431 gcc_assert (take_default);
6432 val = gimple_switch_default_label (stmt);
6433 }
6434 else
6435 {
6436 /* Check if labels with index i to j and maybe the default label
6437 are all reaching the same label. */
6438
6439 val = gimple_switch_label (stmt, i);
6440 if (take_default
6441 && CASE_LABEL (gimple_switch_default_label (stmt))
6442 != CASE_LABEL (val))
6443 {
6444 if (dump_file && (dump_flags & TDF_DETAILS))
6445 fprintf (dump_file, " not a single destination for this "
6446 "range\n");
6447 return SSA_PROP_VARYING;
6448 }
6449 for (++i; i <= j; ++i)
6450 {
6451 if (CASE_LABEL (gimple_switch_label (stmt, i)) != CASE_LABEL (val))
6452 {
6453 if (dump_file && (dump_flags & TDF_DETAILS))
6454 fprintf (dump_file, " not a single destination for this "
6455 "range\n");
6456 return SSA_PROP_VARYING;
6457 }
6458 }
6459 }
6460
6461 *taken_edge_p = find_edge (gimple_bb (stmt),
6462 label_to_block (CASE_LABEL (val)));
6463
6464 if (dump_file && (dump_flags & TDF_DETAILS))
6465 {
6466 fprintf (dump_file, " will take edge to ");
6467 print_generic_stmt (dump_file, CASE_LABEL (val), 0);
6468 }
6469
6470 return SSA_PROP_INTERESTING;
6471 }
6472
6473
6474 /* Evaluate statement STMT. If the statement produces a useful range,
6475 return SSA_PROP_INTERESTING and record the SSA name with the
6476 interesting range into *OUTPUT_P.
6477
6478 If STMT is a conditional branch and we can determine its truth
6479 value, the taken edge is recorded in *TAKEN_EDGE_P.
6480
6481 If STMT produces a varying value, return SSA_PROP_VARYING. */
6482
6483 static enum ssa_prop_result
6484 vrp_visit_stmt (gimple stmt, edge *taken_edge_p, tree *output_p)
6485 {
6486 tree def;
6487 ssa_op_iter iter;
6488
6489 if (dump_file && (dump_flags & TDF_DETAILS))
6490 {
6491 fprintf (dump_file, "\nVisiting statement:\n");
6492 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
6493 fprintf (dump_file, "\n");
6494 }
6495
6496 if (!stmt_interesting_for_vrp (stmt))
6497 gcc_assert (stmt_ends_bb_p (stmt));
6498 else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
6499 {
6500 /* In general, assignments with virtual operands are not useful
6501 for deriving ranges, with the obvious exception of calls to
6502 builtin functions. */
6503 if ((is_gimple_call (stmt)
6504 && gimple_call_fndecl (stmt) != NULL_TREE
6505 && DECL_BUILT_IN (gimple_call_fndecl (stmt)))
6506 || !gimple_vuse (stmt))
6507 return vrp_visit_assignment_or_call (stmt, output_p);
6508 }
6509 else if (gimple_code (stmt) == GIMPLE_COND)
6510 return vrp_visit_cond_stmt (stmt, taken_edge_p);
6511 else if (gimple_code (stmt) == GIMPLE_SWITCH)
6512 return vrp_visit_switch_stmt (stmt, taken_edge_p);
6513
6514 /* All other statements produce nothing of interest for VRP, so mark
6515 their outputs varying and prevent further simulation. */
6516 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_DEF)
6517 set_value_range_to_varying (get_value_range (def));
6518
6519 return SSA_PROP_VARYING;
6520 }
6521
6522
6523 /* Meet operation for value ranges. Given two value ranges VR0 and
6524 VR1, store in VR0 a range that contains both VR0 and VR1. This
6525 may not be the smallest possible such range. */
6526
6527 static void
6528 vrp_meet (value_range_t *vr0, value_range_t *vr1)
6529 {
6530 if (vr0->type == VR_UNDEFINED)
6531 {
6532 copy_value_range (vr0, vr1);
6533 return;
6534 }
6535
6536 if (vr1->type == VR_UNDEFINED)
6537 {
6538 /* Nothing to do. VR0 already has the resulting range. */
6539 return;
6540 }
6541
6542 if (vr0->type == VR_VARYING)
6543 {
6544 /* Nothing to do. VR0 already has the resulting range. */
6545 return;
6546 }
6547
6548 if (vr1->type == VR_VARYING)
6549 {
6550 set_value_range_to_varying (vr0);
6551 return;
6552 }
6553
6554 if (vr0->type == VR_RANGE && vr1->type == VR_RANGE)
6555 {
6556 int cmp;
6557 tree min, max;
6558
6559 /* Compute the convex hull of the ranges. The lower limit of
6560 the new range is the minimum of the two ranges. If they
6561 cannot be compared, then give up. */
6562 cmp = compare_values (vr0->min, vr1->min);
6563 if (cmp == 0 || cmp == 1)
6564 min = vr1->min;
6565 else if (cmp == -1)
6566 min = vr0->min;
6567 else
6568 goto give_up;
6569
6570 /* Similarly, the upper limit of the new range is the maximum
6571 of the two ranges. If they cannot be compared, then
6572 give up. */
6573 cmp = compare_values (vr0->max, vr1->max);
6574 if (cmp == 0 || cmp == -1)
6575 max = vr1->max;
6576 else if (cmp == 1)
6577 max = vr0->max;
6578 else
6579 goto give_up;
6580
6581 /* Check for useless ranges. */
6582 if (INTEGRAL_TYPE_P (TREE_TYPE (min))
6583 && ((vrp_val_is_min (min) || is_overflow_infinity (min))
6584 && (vrp_val_is_max (max) || is_overflow_infinity (max))))
6585 goto give_up;
6586
6587 /* The resulting set of equivalences is the intersection of
6588 the two sets. */
6589 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
6590 bitmap_and_into (vr0->equiv, vr1->equiv);
6591 else if (vr0->equiv && !vr1->equiv)
6592 bitmap_clear (vr0->equiv);
6593
6594 set_value_range (vr0, vr0->type, min, max, vr0->equiv);
6595 }
6596 else if (vr0->type == VR_ANTI_RANGE && vr1->type == VR_ANTI_RANGE)
6597 {
6598 /* Two anti-ranges meet only if their complements intersect.
6599 Only handle the case of identical ranges. */
6600 if (compare_values (vr0->min, vr1->min) == 0
6601 && compare_values (vr0->max, vr1->max) == 0
6602 && compare_values (vr0->min, vr0->max) == 0)
6603 {
6604 /* The resulting set of equivalences is the intersection of
6605 the two sets. */
6606 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
6607 bitmap_and_into (vr0->equiv, vr1->equiv);
6608 else if (vr0->equiv && !vr1->equiv)
6609 bitmap_clear (vr0->equiv);
6610 }
6611 else
6612 goto give_up;
6613 }
6614 else if (vr0->type == VR_ANTI_RANGE || vr1->type == VR_ANTI_RANGE)
6615 {
6616 /* For a numeric range [VAL1, VAL2] and an anti-range ~[VAL3, VAL4],
6617 only handle the case where the ranges have an empty intersection.
6618 The result of the meet operation is the anti-range. */
6619 if (!symbolic_range_p (vr0)
6620 && !symbolic_range_p (vr1)
6621 && !value_ranges_intersect_p (vr0, vr1))
6622 {
6623 /* Copy most of VR1 into VR0. Don't copy VR1's equivalence
6624 set. We need to compute the intersection of the two
6625 equivalence sets. */
6626 if (vr1->type == VR_ANTI_RANGE)
6627 set_value_range (vr0, vr1->type, vr1->min, vr1->max, vr0->equiv);
6628
6629 /* The resulting set of equivalences is the intersection of
6630 the two sets. */
6631 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
6632 bitmap_and_into (vr0->equiv, vr1->equiv);
6633 else if (vr0->equiv && !vr1->equiv)
6634 bitmap_clear (vr0->equiv);
6635 }
6636 else
6637 goto give_up;
6638 }
6639 else
6640 gcc_unreachable ();
6641
6642 return;
6643
6644 give_up:
6645 /* Failed to find an efficient meet. Before giving up and setting
6646 the result to VARYING, see if we can at least derive a useful
6647 anti-range. FIXME, all this nonsense about distinguishing
6648 anti-ranges from ranges is necessary because of the odd
6649 semantics of range_includes_zero_p and friends. */
6650 if (!symbolic_range_p (vr0)
6651 && ((vr0->type == VR_RANGE && !range_includes_zero_p (vr0))
6652 || (vr0->type == VR_ANTI_RANGE && range_includes_zero_p (vr0)))
6653 && !symbolic_range_p (vr1)
6654 && ((vr1->type == VR_RANGE && !range_includes_zero_p (vr1))
6655 || (vr1->type == VR_ANTI_RANGE && range_includes_zero_p (vr1))))
6656 {
6657 set_value_range_to_nonnull (vr0, TREE_TYPE (vr0->min));
6658
6659 /* Since this meet operation did not result from the meeting of
6660 two equivalent names, VR0 cannot have any equivalences. */
6661 if (vr0->equiv)
6662 bitmap_clear (vr0->equiv);
6663 }
6664 else
6665 set_value_range_to_varying (vr0);
6666 }
6667
6668
6669 /* Visit all arguments for PHI node PHI that flow through executable
6670 edges. If a valid value range can be derived from all the incoming
6671 value ranges, set a new range for the LHS of PHI. */
6672
6673 static enum ssa_prop_result
6674 vrp_visit_phi_node (gimple phi)
6675 {
6676 size_t i;
6677 tree lhs = PHI_RESULT (phi);
6678 value_range_t *lhs_vr = get_value_range (lhs);
6679 value_range_t vr_result = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
6680 int edges, old_edges;
6681 struct loop *l;
6682
6683 if (dump_file && (dump_flags & TDF_DETAILS))
6684 {
6685 fprintf (dump_file, "\nVisiting PHI node: ");
6686 print_gimple_stmt (dump_file, phi, 0, dump_flags);
6687 }
6688
6689 edges = 0;
6690 for (i = 0; i < gimple_phi_num_args (phi); i++)
6691 {
6692 edge e = gimple_phi_arg_edge (phi, i);
6693
6694 if (dump_file && (dump_flags & TDF_DETAILS))
6695 {
6696 fprintf (dump_file,
6697 "\n Argument #%d (%d -> %d %sexecutable)\n",
6698 (int) i, e->src->index, e->dest->index,
6699 (e->flags & EDGE_EXECUTABLE) ? "" : "not ");
6700 }
6701
6702 if (e->flags & EDGE_EXECUTABLE)
6703 {
6704 tree arg = PHI_ARG_DEF (phi, i);
6705 value_range_t vr_arg;
6706
6707 ++edges;
6708
6709 if (TREE_CODE (arg) == SSA_NAME)
6710 {
6711 vr_arg = *(get_value_range (arg));
6712 }
6713 else
6714 {
6715 if (is_overflow_infinity (arg))
6716 {
6717 arg = copy_node (arg);
6718 TREE_OVERFLOW (arg) = 0;
6719 }
6720
6721 vr_arg.type = VR_RANGE;
6722 vr_arg.min = arg;
6723 vr_arg.max = arg;
6724 vr_arg.equiv = NULL;
6725 }
6726
6727 if (dump_file && (dump_flags & TDF_DETAILS))
6728 {
6729 fprintf (dump_file, "\t");
6730 print_generic_expr (dump_file, arg, dump_flags);
6731 fprintf (dump_file, "\n\tValue: ");
6732 dump_value_range (dump_file, &vr_arg);
6733 fprintf (dump_file, "\n");
6734 }
6735
6736 vrp_meet (&vr_result, &vr_arg);
6737
6738 if (vr_result.type == VR_VARYING)
6739 break;
6740 }
6741 }
6742
6743 if (vr_result.type == VR_VARYING)
6744 goto varying;
6745 else if (vr_result.type == VR_UNDEFINED)
6746 goto update_range;
6747
6748 old_edges = vr_phi_edge_counts[SSA_NAME_VERSION (lhs)];
6749 vr_phi_edge_counts[SSA_NAME_VERSION (lhs)] = edges;
6750
6751 /* To prevent infinite iterations in the algorithm, derive ranges
6752 when the new value is slightly bigger or smaller than the
6753 previous one. We don't do this if we have seen a new executable
6754 edge; this helps us avoid an overflow infinity for conditionals
6755 which are not in a loop. */
6756 if (edges > 0
6757 && gimple_phi_num_args (phi) > 1
6758 && edges == old_edges)
6759 {
6760 int cmp_min = compare_values (lhs_vr->min, vr_result.min);
6761 int cmp_max = compare_values (lhs_vr->max, vr_result.max);
6762
6763 /* For non VR_RANGE or for pointers fall back to varying if
6764 the range changed. */
6765 if ((lhs_vr->type != VR_RANGE || vr_result.type != VR_RANGE
6766 || POINTER_TYPE_P (TREE_TYPE (lhs)))
6767 && (cmp_min != 0 || cmp_max != 0))
6768 goto varying;
6769
6770 /* If the new minimum is smaller or larger than the previous
6771 one, go all the way to -INF. In the first case, to avoid
6772 iterating millions of times to reach -INF, and in the
6773 other case to avoid infinite bouncing between different
6774 minimums. */
6775 if (cmp_min > 0 || cmp_min < 0)
6776 {
6777 if (!needs_overflow_infinity (TREE_TYPE (vr_result.min))
6778 || !vrp_var_may_overflow (lhs, phi))
6779 vr_result.min = TYPE_MIN_VALUE (TREE_TYPE (vr_result.min));
6780 else if (supports_overflow_infinity (TREE_TYPE (vr_result.min)))
6781 vr_result.min =
6782 negative_overflow_infinity (TREE_TYPE (vr_result.min));
6783 }
6784
6785 /* Similarly, if the new maximum is smaller or larger than
6786 the previous one, go all the way to +INF. */
6787 if (cmp_max < 0 || cmp_max > 0)
6788 {
6789 if (!needs_overflow_infinity (TREE_TYPE (vr_result.max))
6790 || !vrp_var_may_overflow (lhs, phi))
6791 vr_result.max = TYPE_MAX_VALUE (TREE_TYPE (vr_result.max));
6792 else if (supports_overflow_infinity (TREE_TYPE (vr_result.max)))
6793 vr_result.max =
6794 positive_overflow_infinity (TREE_TYPE (vr_result.max));
6795 }
6796
6797 /* If we dropped either bound to +-INF then if this is a loop
6798 PHI node SCEV may known more about its value-range. */
6799 if ((cmp_min > 0 || cmp_min < 0
6800 || cmp_max < 0 || cmp_max > 0)
6801 && current_loops
6802 && (l = loop_containing_stmt (phi))
6803 && l->header == gimple_bb (phi))
6804 adjust_range_with_scev (&vr_result, l, phi, lhs);
6805
6806 /* If we will end up with a (-INF, +INF) range, set it to
6807 VARYING. Same if the previous max value was invalid for
6808 the type and we end up with vr_result.min > vr_result.max. */
6809 if ((vrp_val_is_max (vr_result.max)
6810 && vrp_val_is_min (vr_result.min))
6811 || compare_values (vr_result.min,
6812 vr_result.max) > 0)
6813 goto varying;
6814 }
6815
6816 /* If the new range is different than the previous value, keep
6817 iterating. */
6818 update_range:
6819 if (update_value_range (lhs, &vr_result))
6820 {
6821 if (dump_file && (dump_flags & TDF_DETAILS))
6822 {
6823 fprintf (dump_file, "Found new range for ");
6824 print_generic_expr (dump_file, lhs, 0);
6825 fprintf (dump_file, ": ");
6826 dump_value_range (dump_file, &vr_result);
6827 fprintf (dump_file, "\n\n");
6828 }
6829
6830 return SSA_PROP_INTERESTING;
6831 }
6832
6833 /* Nothing changed, don't add outgoing edges. */
6834 return SSA_PROP_NOT_INTERESTING;
6835
6836 /* No match found. Set the LHS to VARYING. */
6837 varying:
6838 set_value_range_to_varying (lhs_vr);
6839 return SSA_PROP_VARYING;
6840 }
6841
6842 /* Simplify boolean operations if the source is known
6843 to be already a boolean. */
6844 static bool
6845 simplify_truth_ops_using_ranges (gimple_stmt_iterator *gsi, gimple stmt)
6846 {
6847 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
6848 tree lhs, op0, op1;
6849 bool need_conversion;
6850
6851 /* We handle only !=/== case here. */
6852 gcc_assert (rhs_code == EQ_EXPR || rhs_code == NE_EXPR);
6853
6854 op0 = gimple_assign_rhs1 (stmt);
6855 if (!op_with_boolean_value_range_p (op0))
6856 return false;
6857
6858 op1 = gimple_assign_rhs2 (stmt);
6859 if (!op_with_boolean_value_range_p (op1))
6860 return false;
6861
6862 /* Reduce number of cases to handle to NE_EXPR. As there is no
6863 BIT_XNOR_EXPR we cannot replace A == B with a single statement. */
6864 if (rhs_code == EQ_EXPR)
6865 {
6866 if (TREE_CODE (op1) == INTEGER_CST)
6867 op1 = int_const_binop (BIT_XOR_EXPR, op1, integer_one_node);
6868 else
6869 return false;
6870 }
6871
6872 lhs = gimple_assign_lhs (stmt);
6873 need_conversion
6874 = !useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (op0));
6875
6876 /* Make sure to not sign-extend a 1-bit 1 when converting the result. */
6877 if (need_conversion
6878 && !TYPE_UNSIGNED (TREE_TYPE (op0))
6879 && TYPE_PRECISION (TREE_TYPE (op0)) == 1
6880 && TYPE_PRECISION (TREE_TYPE (lhs)) > 1)
6881 return false;
6882
6883 /* For A != 0 we can substitute A itself. */
6884 if (integer_zerop (op1))
6885 gimple_assign_set_rhs_with_ops (gsi,
6886 need_conversion
6887 ? NOP_EXPR : TREE_CODE (op0),
6888 op0, NULL_TREE);
6889 /* For A != B we substitute A ^ B. Either with conversion. */
6890 else if (need_conversion)
6891 {
6892 gimple newop;
6893 tree tem = create_tmp_reg (TREE_TYPE (op0), NULL);
6894 newop = gimple_build_assign_with_ops (BIT_XOR_EXPR, tem, op0, op1);
6895 tem = make_ssa_name (tem, newop);
6896 gimple_assign_set_lhs (newop, tem);
6897 gsi_insert_before (gsi, newop, GSI_SAME_STMT);
6898 update_stmt (newop);
6899 gimple_assign_set_rhs_with_ops (gsi, NOP_EXPR, tem, NULL_TREE);
6900 }
6901 /* Or without. */
6902 else
6903 gimple_assign_set_rhs_with_ops (gsi, BIT_XOR_EXPR, op0, op1);
6904 update_stmt (gsi_stmt (*gsi));
6905
6906 return true;
6907 }
6908
6909 /* Simplify a division or modulo operator to a right shift or
6910 bitwise and if the first operand is unsigned or is greater
6911 than zero and the second operand is an exact power of two. */
6912
6913 static bool
6914 simplify_div_or_mod_using_ranges (gimple stmt)
6915 {
6916 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
6917 tree val = NULL;
6918 tree op0 = gimple_assign_rhs1 (stmt);
6919 tree op1 = gimple_assign_rhs2 (stmt);
6920 value_range_t *vr = get_value_range (gimple_assign_rhs1 (stmt));
6921
6922 if (TYPE_UNSIGNED (TREE_TYPE (op0)))
6923 {
6924 val = integer_one_node;
6925 }
6926 else
6927 {
6928 bool sop = false;
6929
6930 val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, &sop);
6931
6932 if (val
6933 && sop
6934 && integer_onep (val)
6935 && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
6936 {
6937 location_t location;
6938
6939 if (!gimple_has_location (stmt))
6940 location = input_location;
6941 else
6942 location = gimple_location (stmt);
6943 warning_at (location, OPT_Wstrict_overflow,
6944 "assuming signed overflow does not occur when "
6945 "simplifying %</%> or %<%%%> to %<>>%> or %<&%>");
6946 }
6947 }
6948
6949 if (val && integer_onep (val))
6950 {
6951 tree t;
6952
6953 if (rhs_code == TRUNC_DIV_EXPR)
6954 {
6955 t = build_int_cst (integer_type_node, tree_log2 (op1));
6956 gimple_assign_set_rhs_code (stmt, RSHIFT_EXPR);
6957 gimple_assign_set_rhs1 (stmt, op0);
6958 gimple_assign_set_rhs2 (stmt, t);
6959 }
6960 else
6961 {
6962 t = build_int_cst (TREE_TYPE (op1), 1);
6963 t = int_const_binop (MINUS_EXPR, op1, t);
6964 t = fold_convert (TREE_TYPE (op0), t);
6965
6966 gimple_assign_set_rhs_code (stmt, BIT_AND_EXPR);
6967 gimple_assign_set_rhs1 (stmt, op0);
6968 gimple_assign_set_rhs2 (stmt, t);
6969 }
6970
6971 update_stmt (stmt);
6972 return true;
6973 }
6974
6975 return false;
6976 }
6977
6978 /* If the operand to an ABS_EXPR is >= 0, then eliminate the
6979 ABS_EXPR. If the operand is <= 0, then simplify the
6980 ABS_EXPR into a NEGATE_EXPR. */
6981
6982 static bool
6983 simplify_abs_using_ranges (gimple stmt)
6984 {
6985 tree val = NULL;
6986 tree op = gimple_assign_rhs1 (stmt);
6987 tree type = TREE_TYPE (op);
6988 value_range_t *vr = get_value_range (op);
6989
6990 if (TYPE_UNSIGNED (type))
6991 {
6992 val = integer_zero_node;
6993 }
6994 else if (vr)
6995 {
6996 bool sop = false;
6997
6998 val = compare_range_with_value (LE_EXPR, vr, integer_zero_node, &sop);
6999 if (!val)
7000 {
7001 sop = false;
7002 val = compare_range_with_value (GE_EXPR, vr, integer_zero_node,
7003 &sop);
7004
7005 if (val)
7006 {
7007 if (integer_zerop (val))
7008 val = integer_one_node;
7009 else if (integer_onep (val))
7010 val = integer_zero_node;
7011 }
7012 }
7013
7014 if (val
7015 && (integer_onep (val) || integer_zerop (val)))
7016 {
7017 if (sop && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
7018 {
7019 location_t location;
7020
7021 if (!gimple_has_location (stmt))
7022 location = input_location;
7023 else
7024 location = gimple_location (stmt);
7025 warning_at (location, OPT_Wstrict_overflow,
7026 "assuming signed overflow does not occur when "
7027 "simplifying %<abs (X)%> to %<X%> or %<-X%>");
7028 }
7029
7030 gimple_assign_set_rhs1 (stmt, op);
7031 if (integer_onep (val))
7032 gimple_assign_set_rhs_code (stmt, NEGATE_EXPR);
7033 else
7034 gimple_assign_set_rhs_code (stmt, SSA_NAME);
7035 update_stmt (stmt);
7036 return true;
7037 }
7038 }
7039
7040 return false;
7041 }
7042
7043 /* Optimize away redundant BIT_AND_EXPR and BIT_IOR_EXPR.
7044 If all the bits that are being cleared by & are already
7045 known to be zero from VR, or all the bits that are being
7046 set by | are already known to be one from VR, the bit
7047 operation is redundant. */
7048
7049 static bool
7050 simplify_bit_ops_using_ranges (gimple_stmt_iterator *gsi, gimple stmt)
7051 {
7052 tree op0 = gimple_assign_rhs1 (stmt);
7053 tree op1 = gimple_assign_rhs2 (stmt);
7054 tree op = NULL_TREE;
7055 value_range_t vr0 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
7056 value_range_t vr1 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
7057 double_int may_be_nonzero0, may_be_nonzero1;
7058 double_int must_be_nonzero0, must_be_nonzero1;
7059 double_int mask;
7060
7061 if (TREE_CODE (op0) == SSA_NAME)
7062 vr0 = *(get_value_range (op0));
7063 else if (is_gimple_min_invariant (op0))
7064 set_value_range_to_value (&vr0, op0, NULL);
7065 else
7066 return false;
7067
7068 if (TREE_CODE (op1) == SSA_NAME)
7069 vr1 = *(get_value_range (op1));
7070 else if (is_gimple_min_invariant (op1))
7071 set_value_range_to_value (&vr1, op1, NULL);
7072 else
7073 return false;
7074
7075 if (!zero_nonzero_bits_from_vr (&vr0, &may_be_nonzero0, &must_be_nonzero0))
7076 return false;
7077 if (!zero_nonzero_bits_from_vr (&vr1, &may_be_nonzero1, &must_be_nonzero1))
7078 return false;
7079
7080 switch (gimple_assign_rhs_code (stmt))
7081 {
7082 case BIT_AND_EXPR:
7083 mask = double_int_and_not (may_be_nonzero0, must_be_nonzero1);
7084 if (double_int_zero_p (mask))
7085 {
7086 op = op0;
7087 break;
7088 }
7089 mask = double_int_and_not (may_be_nonzero1, must_be_nonzero0);
7090 if (double_int_zero_p (mask))
7091 {
7092 op = op1;
7093 break;
7094 }
7095 break;
7096 case BIT_IOR_EXPR:
7097 mask = double_int_and_not (may_be_nonzero0, must_be_nonzero1);
7098 if (double_int_zero_p (mask))
7099 {
7100 op = op1;
7101 break;
7102 }
7103 mask = double_int_and_not (may_be_nonzero1, must_be_nonzero0);
7104 if (double_int_zero_p (mask))
7105 {
7106 op = op0;
7107 break;
7108 }
7109 break;
7110 default:
7111 gcc_unreachable ();
7112 }
7113
7114 if (op == NULL_TREE)
7115 return false;
7116
7117 gimple_assign_set_rhs_with_ops (gsi, TREE_CODE (op), op, NULL);
7118 update_stmt (gsi_stmt (*gsi));
7119 return true;
7120 }
7121
7122 /* We are comparing trees OP0 and OP1 using COND_CODE. OP0 has
7123 a known value range VR.
7124
7125 If there is one and only one value which will satisfy the
7126 conditional, then return that value. Else return NULL. */
7127
7128 static tree
7129 test_for_singularity (enum tree_code cond_code, tree op0,
7130 tree op1, value_range_t *vr)
7131 {
7132 tree min = NULL;
7133 tree max = NULL;
7134
7135 /* Extract minimum/maximum values which satisfy the
7136 the conditional as it was written. */
7137 if (cond_code == LE_EXPR || cond_code == LT_EXPR)
7138 {
7139 /* This should not be negative infinity; there is no overflow
7140 here. */
7141 min = TYPE_MIN_VALUE (TREE_TYPE (op0));
7142
7143 max = op1;
7144 if (cond_code == LT_EXPR && !is_overflow_infinity (max))
7145 {
7146 tree one = build_int_cst (TREE_TYPE (op0), 1);
7147 max = fold_build2 (MINUS_EXPR, TREE_TYPE (op0), max, one);
7148 if (EXPR_P (max))
7149 TREE_NO_WARNING (max) = 1;
7150 }
7151 }
7152 else if (cond_code == GE_EXPR || cond_code == GT_EXPR)
7153 {
7154 /* This should not be positive infinity; there is no overflow
7155 here. */
7156 max = TYPE_MAX_VALUE (TREE_TYPE (op0));
7157
7158 min = op1;
7159 if (cond_code == GT_EXPR && !is_overflow_infinity (min))
7160 {
7161 tree one = build_int_cst (TREE_TYPE (op0), 1);
7162 min = fold_build2 (PLUS_EXPR, TREE_TYPE (op0), min, one);
7163 if (EXPR_P (min))
7164 TREE_NO_WARNING (min) = 1;
7165 }
7166 }
7167
7168 /* Now refine the minimum and maximum values using any
7169 value range information we have for op0. */
7170 if (min && max)
7171 {
7172 if (compare_values (vr->min, min) == 1)
7173 min = vr->min;
7174 if (compare_values (vr->max, max) == -1)
7175 max = vr->max;
7176
7177 /* If the new min/max values have converged to a single value,
7178 then there is only one value which can satisfy the condition,
7179 return that value. */
7180 if (operand_equal_p (min, max, 0) && is_gimple_min_invariant (min))
7181 return min;
7182 }
7183 return NULL;
7184 }
7185
7186 /* Simplify a conditional using a relational operator to an equality
7187 test if the range information indicates only one value can satisfy
7188 the original conditional. */
7189
7190 static bool
7191 simplify_cond_using_ranges (gimple stmt)
7192 {
7193 tree op0 = gimple_cond_lhs (stmt);
7194 tree op1 = gimple_cond_rhs (stmt);
7195 enum tree_code cond_code = gimple_cond_code (stmt);
7196
7197 if (cond_code != NE_EXPR
7198 && cond_code != EQ_EXPR
7199 && TREE_CODE (op0) == SSA_NAME
7200 && INTEGRAL_TYPE_P (TREE_TYPE (op0))
7201 && is_gimple_min_invariant (op1))
7202 {
7203 value_range_t *vr = get_value_range (op0);
7204
7205 /* If we have range information for OP0, then we might be
7206 able to simplify this conditional. */
7207 if (vr->type == VR_RANGE)
7208 {
7209 tree new_tree = test_for_singularity (cond_code, op0, op1, vr);
7210
7211 if (new_tree)
7212 {
7213 if (dump_file)
7214 {
7215 fprintf (dump_file, "Simplified relational ");
7216 print_gimple_stmt (dump_file, stmt, 0, 0);
7217 fprintf (dump_file, " into ");
7218 }
7219
7220 gimple_cond_set_code (stmt, EQ_EXPR);
7221 gimple_cond_set_lhs (stmt, op0);
7222 gimple_cond_set_rhs (stmt, new_tree);
7223
7224 update_stmt (stmt);
7225
7226 if (dump_file)
7227 {
7228 print_gimple_stmt (dump_file, stmt, 0, 0);
7229 fprintf (dump_file, "\n");
7230 }
7231
7232 return true;
7233 }
7234
7235 /* Try again after inverting the condition. We only deal
7236 with integral types here, so no need to worry about
7237 issues with inverting FP comparisons. */
7238 cond_code = invert_tree_comparison (cond_code, false);
7239 new_tree = test_for_singularity (cond_code, op0, op1, vr);
7240
7241 if (new_tree)
7242 {
7243 if (dump_file)
7244 {
7245 fprintf (dump_file, "Simplified relational ");
7246 print_gimple_stmt (dump_file, stmt, 0, 0);
7247 fprintf (dump_file, " into ");
7248 }
7249
7250 gimple_cond_set_code (stmt, NE_EXPR);
7251 gimple_cond_set_lhs (stmt, op0);
7252 gimple_cond_set_rhs (stmt, new_tree);
7253
7254 update_stmt (stmt);
7255
7256 if (dump_file)
7257 {
7258 print_gimple_stmt (dump_file, stmt, 0, 0);
7259 fprintf (dump_file, "\n");
7260 }
7261
7262 return true;
7263 }
7264 }
7265 }
7266
7267 return false;
7268 }
7269
7270 /* Simplify a switch statement using the value range of the switch
7271 argument. */
7272
7273 static bool
7274 simplify_switch_using_ranges (gimple stmt)
7275 {
7276 tree op = gimple_switch_index (stmt);
7277 value_range_t *vr;
7278 bool take_default;
7279 edge e;
7280 edge_iterator ei;
7281 size_t i = 0, j = 0, n, n2;
7282 tree vec2;
7283 switch_update su;
7284
7285 if (TREE_CODE (op) == SSA_NAME)
7286 {
7287 vr = get_value_range (op);
7288
7289 /* We can only handle integer ranges. */
7290 if (vr->type != VR_RANGE
7291 || symbolic_range_p (vr))
7292 return false;
7293
7294 /* Find case label for min/max of the value range. */
7295 take_default = !find_case_label_range (stmt, vr->min, vr->max, &i, &j);
7296 }
7297 else if (TREE_CODE (op) == INTEGER_CST)
7298 {
7299 take_default = !find_case_label_index (stmt, 1, op, &i);
7300 if (take_default)
7301 {
7302 i = 1;
7303 j = 0;
7304 }
7305 else
7306 {
7307 j = i;
7308 }
7309 }
7310 else
7311 return false;
7312
7313 n = gimple_switch_num_labels (stmt);
7314
7315 /* Bail out if this is just all edges taken. */
7316 if (i == 1
7317 && j == n - 1
7318 && take_default)
7319 return false;
7320
7321 /* Build a new vector of taken case labels. */
7322 vec2 = make_tree_vec (j - i + 1 + (int)take_default);
7323 n2 = 0;
7324
7325 /* Add the default edge, if necessary. */
7326 if (take_default)
7327 TREE_VEC_ELT (vec2, n2++) = gimple_switch_default_label (stmt);
7328
7329 for (; i <= j; ++i, ++n2)
7330 TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, i);
7331
7332 /* Mark needed edges. */
7333 for (i = 0; i < n2; ++i)
7334 {
7335 e = find_edge (gimple_bb (stmt),
7336 label_to_block (CASE_LABEL (TREE_VEC_ELT (vec2, i))));
7337 e->aux = (void *)-1;
7338 }
7339
7340 /* Queue not needed edges for later removal. */
7341 FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
7342 {
7343 if (e->aux == (void *)-1)
7344 {
7345 e->aux = NULL;
7346 continue;
7347 }
7348
7349 if (dump_file && (dump_flags & TDF_DETAILS))
7350 {
7351 fprintf (dump_file, "removing unreachable case label\n");
7352 }
7353 VEC_safe_push (edge, heap, to_remove_edges, e);
7354 e->flags &= ~EDGE_EXECUTABLE;
7355 }
7356
7357 /* And queue an update for the stmt. */
7358 su.stmt = stmt;
7359 su.vec = vec2;
7360 VEC_safe_push (switch_update, heap, to_update_switch_stmts, &su);
7361 return false;
7362 }
7363
7364 /* Simplify an integral conversion from an SSA name in STMT. */
7365
7366 static bool
7367 simplify_conversion_using_ranges (gimple stmt)
7368 {
7369 tree innerop, middleop, finaltype;
7370 gimple def_stmt;
7371 value_range_t *innervr;
7372 bool inner_unsigned_p, middle_unsigned_p, final_unsigned_p;
7373 unsigned inner_prec, middle_prec, final_prec;
7374 double_int innermin, innermed, innermax, middlemin, middlemed, middlemax;
7375
7376 finaltype = TREE_TYPE (gimple_assign_lhs (stmt));
7377 if (!INTEGRAL_TYPE_P (finaltype))
7378 return false;
7379 middleop = gimple_assign_rhs1 (stmt);
7380 def_stmt = SSA_NAME_DEF_STMT (middleop);
7381 if (!is_gimple_assign (def_stmt)
7382 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)))
7383 return false;
7384 innerop = gimple_assign_rhs1 (def_stmt);
7385 if (TREE_CODE (innerop) != SSA_NAME)
7386 return false;
7387
7388 /* Get the value-range of the inner operand. */
7389 innervr = get_value_range (innerop);
7390 if (innervr->type != VR_RANGE
7391 || TREE_CODE (innervr->min) != INTEGER_CST
7392 || TREE_CODE (innervr->max) != INTEGER_CST)
7393 return false;
7394
7395 /* Simulate the conversion chain to check if the result is equal if
7396 the middle conversion is removed. */
7397 innermin = tree_to_double_int (innervr->min);
7398 innermax = tree_to_double_int (innervr->max);
7399
7400 inner_prec = TYPE_PRECISION (TREE_TYPE (innerop));
7401 middle_prec = TYPE_PRECISION (TREE_TYPE (middleop));
7402 final_prec = TYPE_PRECISION (finaltype);
7403
7404 /* If the first conversion is not injective, the second must not
7405 be widening. */
7406 if (double_int_cmp (double_int_sub (innermax, innermin),
7407 double_int_mask (middle_prec), true) > 0
7408 && middle_prec < final_prec)
7409 return false;
7410 /* We also want a medium value so that we can track the effect that
7411 narrowing conversions with sign change have. */
7412 inner_unsigned_p = TYPE_UNSIGNED (TREE_TYPE (innerop));
7413 if (inner_unsigned_p)
7414 innermed = double_int_rshift (double_int_mask (inner_prec),
7415 1, inner_prec, false);
7416 else
7417 innermed = double_int_zero;
7418 if (double_int_cmp (innermin, innermed, inner_unsigned_p) >= 0
7419 || double_int_cmp (innermed, innermax, inner_unsigned_p) >= 0)
7420 innermed = innermin;
7421
7422 middle_unsigned_p = TYPE_UNSIGNED (TREE_TYPE (middleop));
7423 middlemin = double_int_ext (innermin, middle_prec, middle_unsigned_p);
7424 middlemed = double_int_ext (innermed, middle_prec, middle_unsigned_p);
7425 middlemax = double_int_ext (innermax, middle_prec, middle_unsigned_p);
7426
7427 /* Require that the final conversion applied to both the original
7428 and the intermediate range produces the same result. */
7429 final_unsigned_p = TYPE_UNSIGNED (finaltype);
7430 if (!double_int_equal_p (double_int_ext (middlemin,
7431 final_prec, final_unsigned_p),
7432 double_int_ext (innermin,
7433 final_prec, final_unsigned_p))
7434 || !double_int_equal_p (double_int_ext (middlemed,
7435 final_prec, final_unsigned_p),
7436 double_int_ext (innermed,
7437 final_prec, final_unsigned_p))
7438 || !double_int_equal_p (double_int_ext (middlemax,
7439 final_prec, final_unsigned_p),
7440 double_int_ext (innermax,
7441 final_prec, final_unsigned_p)))
7442 return false;
7443
7444 gimple_assign_set_rhs1 (stmt, innerop);
7445 update_stmt (stmt);
7446 return true;
7447 }
7448
7449 /* Return whether the value range *VR fits in an integer type specified
7450 by PRECISION and UNSIGNED_P. */
7451
7452 static bool
7453 range_fits_type_p (value_range_t *vr, unsigned precision, bool unsigned_p)
7454 {
7455 tree src_type;
7456 unsigned src_precision;
7457 double_int tem;
7458
7459 /* We can only handle integral and pointer types. */
7460 src_type = TREE_TYPE (vr->min);
7461 if (!INTEGRAL_TYPE_P (src_type)
7462 && !POINTER_TYPE_P (src_type))
7463 return false;
7464
7465 /* An extension is always fine, so is an identity transform. */
7466 src_precision = TYPE_PRECISION (TREE_TYPE (vr->min));
7467 if (src_precision < precision
7468 || (src_precision == precision
7469 && TYPE_UNSIGNED (src_type) == unsigned_p))
7470 return true;
7471
7472 /* Now we can only handle ranges with constant bounds. */
7473 if (vr->type != VR_RANGE
7474 || TREE_CODE (vr->min) != INTEGER_CST
7475 || TREE_CODE (vr->max) != INTEGER_CST)
7476 return false;
7477
7478 /* For precision-preserving sign-changes the MSB of the double-int
7479 has to be clear. */
7480 if (src_precision == precision
7481 && (TREE_INT_CST_HIGH (vr->min) | TREE_INT_CST_HIGH (vr->max)) < 0)
7482 return false;
7483
7484 /* Then we can perform the conversion on both ends and compare
7485 the result for equality. */
7486 tem = double_int_ext (tree_to_double_int (vr->min), precision, unsigned_p);
7487 if (!double_int_equal_p (tree_to_double_int (vr->min), tem))
7488 return false;
7489 tem = double_int_ext (tree_to_double_int (vr->max), precision, unsigned_p);
7490 if (!double_int_equal_p (tree_to_double_int (vr->max), tem))
7491 return false;
7492
7493 return true;
7494 }
7495
7496 /* Simplify a conversion from integral SSA name to float in STMT. */
7497
7498 static bool
7499 simplify_float_conversion_using_ranges (gimple_stmt_iterator *gsi, gimple stmt)
7500 {
7501 tree rhs1 = gimple_assign_rhs1 (stmt);
7502 value_range_t *vr = get_value_range (rhs1);
7503 enum machine_mode fltmode = TYPE_MODE (TREE_TYPE (gimple_assign_lhs (stmt)));
7504 enum machine_mode mode;
7505 tree tem;
7506 gimple conv;
7507
7508 /* We can only handle constant ranges. */
7509 if (vr->type != VR_RANGE
7510 || TREE_CODE (vr->min) != INTEGER_CST
7511 || TREE_CODE (vr->max) != INTEGER_CST)
7512 return false;
7513
7514 /* First check if we can use a signed type in place of an unsigned. */
7515 if (TYPE_UNSIGNED (TREE_TYPE (rhs1))
7516 && (can_float_p (fltmode, TYPE_MODE (TREE_TYPE (rhs1)), 0)
7517 != CODE_FOR_nothing)
7518 && range_fits_type_p (vr, GET_MODE_PRECISION
7519 (TYPE_MODE (TREE_TYPE (rhs1))), 0))
7520 mode = TYPE_MODE (TREE_TYPE (rhs1));
7521 /* If we can do the conversion in the current input mode do nothing. */
7522 else if (can_float_p (fltmode, TYPE_MODE (TREE_TYPE (rhs1)),
7523 TYPE_UNSIGNED (TREE_TYPE (rhs1))))
7524 return false;
7525 /* Otherwise search for a mode we can use, starting from the narrowest
7526 integer mode available. */
7527 else
7528 {
7529 mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
7530 do
7531 {
7532 /* If we cannot do a signed conversion to float from mode
7533 or if the value-range does not fit in the signed type
7534 try with a wider mode. */
7535 if (can_float_p (fltmode, mode, 0) != CODE_FOR_nothing
7536 && range_fits_type_p (vr, GET_MODE_PRECISION (mode), 0))
7537 break;
7538
7539 mode = GET_MODE_WIDER_MODE (mode);
7540 /* But do not widen the input. Instead leave that to the
7541 optabs expansion code. */
7542 if (GET_MODE_PRECISION (mode) > TYPE_PRECISION (TREE_TYPE (rhs1)))
7543 return false;
7544 }
7545 while (mode != VOIDmode);
7546 if (mode == VOIDmode)
7547 return false;
7548 }
7549
7550 /* It works, insert a truncation or sign-change before the
7551 float conversion. */
7552 tem = create_tmp_var (build_nonstandard_integer_type
7553 (GET_MODE_PRECISION (mode), 0), NULL);
7554 conv = gimple_build_assign_with_ops (NOP_EXPR, tem, rhs1, NULL_TREE);
7555 tem = make_ssa_name (tem, conv);
7556 gimple_assign_set_lhs (conv, tem);
7557 gsi_insert_before (gsi, conv, GSI_SAME_STMT);
7558 gimple_assign_set_rhs1 (stmt, tem);
7559 update_stmt (stmt);
7560
7561 return true;
7562 }
7563
7564 /* Simplify STMT using ranges if possible. */
7565
7566 static bool
7567 simplify_stmt_using_ranges (gimple_stmt_iterator *gsi)
7568 {
7569 gimple stmt = gsi_stmt (*gsi);
7570 if (is_gimple_assign (stmt))
7571 {
7572 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
7573 tree rhs1 = gimple_assign_rhs1 (stmt);
7574
7575 switch (rhs_code)
7576 {
7577 case EQ_EXPR:
7578 case NE_EXPR:
7579 /* Transform EQ_EXPR, NE_EXPR into BIT_XOR_EXPR or identity
7580 if the RHS is zero or one, and the LHS are known to be boolean
7581 values. */
7582 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
7583 return simplify_truth_ops_using_ranges (gsi, stmt);
7584 break;
7585
7586 /* Transform TRUNC_DIV_EXPR and TRUNC_MOD_EXPR into RSHIFT_EXPR
7587 and BIT_AND_EXPR respectively if the first operand is greater
7588 than zero and the second operand is an exact power of two. */
7589 case TRUNC_DIV_EXPR:
7590 case TRUNC_MOD_EXPR:
7591 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
7592 && integer_pow2p (gimple_assign_rhs2 (stmt)))
7593 return simplify_div_or_mod_using_ranges (stmt);
7594 break;
7595
7596 /* Transform ABS (X) into X or -X as appropriate. */
7597 case ABS_EXPR:
7598 if (TREE_CODE (rhs1) == SSA_NAME
7599 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
7600 return simplify_abs_using_ranges (stmt);
7601 break;
7602
7603 case BIT_AND_EXPR:
7604 case BIT_IOR_EXPR:
7605 /* Optimize away BIT_AND_EXPR and BIT_IOR_EXPR
7606 if all the bits being cleared are already cleared or
7607 all the bits being set are already set. */
7608 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
7609 return simplify_bit_ops_using_ranges (gsi, stmt);
7610 break;
7611
7612 CASE_CONVERT:
7613 if (TREE_CODE (rhs1) == SSA_NAME
7614 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
7615 return simplify_conversion_using_ranges (stmt);
7616 break;
7617
7618 case FLOAT_EXPR:
7619 if (TREE_CODE (rhs1) == SSA_NAME
7620 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
7621 return simplify_float_conversion_using_ranges (gsi, stmt);
7622 break;
7623
7624 default:
7625 break;
7626 }
7627 }
7628 else if (gimple_code (stmt) == GIMPLE_COND)
7629 return simplify_cond_using_ranges (stmt);
7630 else if (gimple_code (stmt) == GIMPLE_SWITCH)
7631 return simplify_switch_using_ranges (stmt);
7632
7633 return false;
7634 }
7635
7636 /* If the statement pointed by SI has a predicate whose value can be
7637 computed using the value range information computed by VRP, compute
7638 its value and return true. Otherwise, return false. */
7639
7640 static bool
7641 fold_predicate_in (gimple_stmt_iterator *si)
7642 {
7643 bool assignment_p = false;
7644 tree val;
7645 gimple stmt = gsi_stmt (*si);
7646
7647 if (is_gimple_assign (stmt)
7648 && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison)
7649 {
7650 assignment_p = true;
7651 val = vrp_evaluate_conditional (gimple_assign_rhs_code (stmt),
7652 gimple_assign_rhs1 (stmt),
7653 gimple_assign_rhs2 (stmt),
7654 stmt);
7655 }
7656 else if (gimple_code (stmt) == GIMPLE_COND)
7657 val = vrp_evaluate_conditional (gimple_cond_code (stmt),
7658 gimple_cond_lhs (stmt),
7659 gimple_cond_rhs (stmt),
7660 stmt);
7661 else
7662 return false;
7663
7664 if (val)
7665 {
7666 if (assignment_p)
7667 val = fold_convert (gimple_expr_type (stmt), val);
7668
7669 if (dump_file)
7670 {
7671 fprintf (dump_file, "Folding predicate ");
7672 print_gimple_expr (dump_file, stmt, 0, 0);
7673 fprintf (dump_file, " to ");
7674 print_generic_expr (dump_file, val, 0);
7675 fprintf (dump_file, "\n");
7676 }
7677
7678 if (is_gimple_assign (stmt))
7679 gimple_assign_set_rhs_from_tree (si, val);
7680 else
7681 {
7682 gcc_assert (gimple_code (stmt) == GIMPLE_COND);
7683 if (integer_zerop (val))
7684 gimple_cond_make_false (stmt);
7685 else if (integer_onep (val))
7686 gimple_cond_make_true (stmt);
7687 else
7688 gcc_unreachable ();
7689 }
7690
7691 return true;
7692 }
7693
7694 return false;
7695 }
7696
7697 /* Callback for substitute_and_fold folding the stmt at *SI. */
7698
7699 static bool
7700 vrp_fold_stmt (gimple_stmt_iterator *si)
7701 {
7702 if (fold_predicate_in (si))
7703 return true;
7704
7705 return simplify_stmt_using_ranges (si);
7706 }
7707
7708 /* Stack of dest,src equivalency pairs that need to be restored after
7709 each attempt to thread a block's incoming edge to an outgoing edge.
7710
7711 A NULL entry is used to mark the end of pairs which need to be
7712 restored. */
7713 static VEC(tree,heap) *stack;
7714
7715 /* A trivial wrapper so that we can present the generic jump threading
7716 code with a simple API for simplifying statements. STMT is the
7717 statement we want to simplify, WITHIN_STMT provides the location
7718 for any overflow warnings. */
7719
7720 static tree
7721 simplify_stmt_for_jump_threading (gimple stmt, gimple within_stmt)
7722 {
7723 /* We only use VRP information to simplify conditionals. This is
7724 overly conservative, but it's unclear if doing more would be
7725 worth the compile time cost. */
7726 if (gimple_code (stmt) != GIMPLE_COND)
7727 return NULL;
7728
7729 return vrp_evaluate_conditional (gimple_cond_code (stmt),
7730 gimple_cond_lhs (stmt),
7731 gimple_cond_rhs (stmt), within_stmt);
7732 }
7733
7734 /* Blocks which have more than one predecessor and more than
7735 one successor present jump threading opportunities, i.e.,
7736 when the block is reached from a specific predecessor, we
7737 may be able to determine which of the outgoing edges will
7738 be traversed. When this optimization applies, we are able
7739 to avoid conditionals at runtime and we may expose secondary
7740 optimization opportunities.
7741
7742 This routine is effectively a driver for the generic jump
7743 threading code. It basically just presents the generic code
7744 with edges that may be suitable for jump threading.
7745
7746 Unlike DOM, we do not iterate VRP if jump threading was successful.
7747 While iterating may expose new opportunities for VRP, it is expected
7748 those opportunities would be very limited and the compile time cost
7749 to expose those opportunities would be significant.
7750
7751 As jump threading opportunities are discovered, they are registered
7752 for later realization. */
7753
7754 static void
7755 identify_jump_threads (void)
7756 {
7757 basic_block bb;
7758 gimple dummy;
7759 int i;
7760 edge e;
7761
7762 /* Ugh. When substituting values earlier in this pass we can
7763 wipe the dominance information. So rebuild the dominator
7764 information as we need it within the jump threading code. */
7765 calculate_dominance_info (CDI_DOMINATORS);
7766
7767 /* We do not allow VRP information to be used for jump threading
7768 across a back edge in the CFG. Otherwise it becomes too
7769 difficult to avoid eliminating loop exit tests. Of course
7770 EDGE_DFS_BACK is not accurate at this time so we have to
7771 recompute it. */
7772 mark_dfs_back_edges ();
7773
7774 /* Do not thread across edges we are about to remove. Just marking
7775 them as EDGE_DFS_BACK will do. */
7776 FOR_EACH_VEC_ELT (edge, to_remove_edges, i, e)
7777 e->flags |= EDGE_DFS_BACK;
7778
7779 /* Allocate our unwinder stack to unwind any temporary equivalences
7780 that might be recorded. */
7781 stack = VEC_alloc (tree, heap, 20);
7782
7783 /* To avoid lots of silly node creation, we create a single
7784 conditional and just modify it in-place when attempting to
7785 thread jumps. */
7786 dummy = gimple_build_cond (EQ_EXPR,
7787 integer_zero_node, integer_zero_node,
7788 NULL, NULL);
7789
7790 /* Walk through all the blocks finding those which present a
7791 potential jump threading opportunity. We could set this up
7792 as a dominator walker and record data during the walk, but
7793 I doubt it's worth the effort for the classes of jump
7794 threading opportunities we are trying to identify at this
7795 point in compilation. */
7796 FOR_EACH_BB (bb)
7797 {
7798 gimple last;
7799
7800 /* If the generic jump threading code does not find this block
7801 interesting, then there is nothing to do. */
7802 if (! potentially_threadable_block (bb))
7803 continue;
7804
7805 /* We only care about blocks ending in a COND_EXPR. While there
7806 may be some value in handling SWITCH_EXPR here, I doubt it's
7807 terribly important. */
7808 last = gsi_stmt (gsi_last_bb (bb));
7809
7810 /* We're basically looking for a switch or any kind of conditional with
7811 integral or pointer type arguments. Note the type of the second
7812 argument will be the same as the first argument, so no need to
7813 check it explicitly. */
7814 if (gimple_code (last) == GIMPLE_SWITCH
7815 || (gimple_code (last) == GIMPLE_COND
7816 && TREE_CODE (gimple_cond_lhs (last)) == SSA_NAME
7817 && (INTEGRAL_TYPE_P (TREE_TYPE (gimple_cond_lhs (last)))
7818 || POINTER_TYPE_P (TREE_TYPE (gimple_cond_lhs (last))))
7819 && (TREE_CODE (gimple_cond_rhs (last)) == SSA_NAME
7820 || is_gimple_min_invariant (gimple_cond_rhs (last)))))
7821 {
7822 edge_iterator ei;
7823
7824 /* We've got a block with multiple predecessors and multiple
7825 successors which also ends in a suitable conditional or
7826 switch statement. For each predecessor, see if we can thread
7827 it to a specific successor. */
7828 FOR_EACH_EDGE (e, ei, bb->preds)
7829 {
7830 /* Do not thread across back edges or abnormal edges
7831 in the CFG. */
7832 if (e->flags & (EDGE_DFS_BACK | EDGE_COMPLEX))
7833 continue;
7834
7835 thread_across_edge (dummy, e, true, &stack,
7836 simplify_stmt_for_jump_threading);
7837 }
7838 }
7839 }
7840
7841 /* We do not actually update the CFG or SSA graphs at this point as
7842 ASSERT_EXPRs are still in the IL and cfg cleanup code does not yet
7843 handle ASSERT_EXPRs gracefully. */
7844 }
7845
7846 /* We identified all the jump threading opportunities earlier, but could
7847 not transform the CFG at that time. This routine transforms the
7848 CFG and arranges for the dominator tree to be rebuilt if necessary.
7849
7850 Note the SSA graph update will occur during the normal TODO
7851 processing by the pass manager. */
7852 static void
7853 finalize_jump_threads (void)
7854 {
7855 thread_through_all_blocks (false);
7856 VEC_free (tree, heap, stack);
7857 }
7858
7859
7860 /* Traverse all the blocks folding conditionals with known ranges. */
7861
7862 static void
7863 vrp_finalize (void)
7864 {
7865 size_t i;
7866
7867 values_propagated = true;
7868
7869 if (dump_file)
7870 {
7871 fprintf (dump_file, "\nValue ranges after VRP:\n\n");
7872 dump_all_value_ranges (dump_file);
7873 fprintf (dump_file, "\n");
7874 }
7875
7876 substitute_and_fold (op_with_constant_singleton_value_range,
7877 vrp_fold_stmt, false);
7878
7879 if (warn_array_bounds)
7880 check_all_array_refs ();
7881
7882 /* We must identify jump threading opportunities before we release
7883 the datastructures built by VRP. */
7884 identify_jump_threads ();
7885
7886 /* Free allocated memory. */
7887 for (i = 0; i < num_vr_values; i++)
7888 if (vr_value[i])
7889 {
7890 BITMAP_FREE (vr_value[i]->equiv);
7891 free (vr_value[i]);
7892 }
7893
7894 free (vr_value);
7895 free (vr_phi_edge_counts);
7896
7897 /* So that we can distinguish between VRP data being available
7898 and not available. */
7899 vr_value = NULL;
7900 vr_phi_edge_counts = NULL;
7901 }
7902
7903
7904 /* Main entry point to VRP (Value Range Propagation). This pass is
7905 loosely based on J. R. C. Patterson, ``Accurate Static Branch
7906 Prediction by Value Range Propagation,'' in SIGPLAN Conference on
7907 Programming Language Design and Implementation, pp. 67-78, 1995.
7908 Also available at http://citeseer.ist.psu.edu/patterson95accurate.html
7909
7910 This is essentially an SSA-CCP pass modified to deal with ranges
7911 instead of constants.
7912
7913 While propagating ranges, we may find that two or more SSA name
7914 have equivalent, though distinct ranges. For instance,
7915
7916 1 x_9 = p_3->a;
7917 2 p_4 = ASSERT_EXPR <p_3, p_3 != 0>
7918 3 if (p_4 == q_2)
7919 4 p_5 = ASSERT_EXPR <p_4, p_4 == q_2>;
7920 5 endif
7921 6 if (q_2)
7922
7923 In the code above, pointer p_5 has range [q_2, q_2], but from the
7924 code we can also determine that p_5 cannot be NULL and, if q_2 had
7925 a non-varying range, p_5's range should also be compatible with it.
7926
7927 These equivalences are created by two expressions: ASSERT_EXPR and
7928 copy operations. Since p_5 is an assertion on p_4, and p_4 was the
7929 result of another assertion, then we can use the fact that p_5 and
7930 p_4 are equivalent when evaluating p_5's range.
7931
7932 Together with value ranges, we also propagate these equivalences
7933 between names so that we can take advantage of information from
7934 multiple ranges when doing final replacement. Note that this
7935 equivalency relation is transitive but not symmetric.
7936
7937 In the example above, p_5 is equivalent to p_4, q_2 and p_3, but we
7938 cannot assert that q_2 is equivalent to p_5 because q_2 may be used
7939 in contexts where that assertion does not hold (e.g., in line 6).
7940
7941 TODO, the main difference between this pass and Patterson's is that
7942 we do not propagate edge probabilities. We only compute whether
7943 edges can be taken or not. That is, instead of having a spectrum
7944 of jump probabilities between 0 and 1, we only deal with 0, 1 and
7945 DON'T KNOW. In the future, it may be worthwhile to propagate
7946 probabilities to aid branch prediction. */
7947
7948 static unsigned int
7949 execute_vrp (void)
7950 {
7951 int i;
7952 edge e;
7953 switch_update *su;
7954
7955 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
7956 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
7957 scev_initialize ();
7958
7959 insert_range_assertions ();
7960
7961 /* Estimate number of iterations - but do not use undefined behavior
7962 for this. We can't do this lazily as other functions may compute
7963 this using undefined behavior. */
7964 free_numbers_of_iterations_estimates ();
7965 estimate_numbers_of_iterations (false);
7966
7967 to_remove_edges = VEC_alloc (edge, heap, 10);
7968 to_update_switch_stmts = VEC_alloc (switch_update, heap, 5);
7969 threadedge_initialize_values ();
7970
7971 vrp_initialize ();
7972 ssa_propagate (vrp_visit_stmt, vrp_visit_phi_node);
7973 vrp_finalize ();
7974
7975 free_numbers_of_iterations_estimates ();
7976
7977 /* ASSERT_EXPRs must be removed before finalizing jump threads
7978 as finalizing jump threads calls the CFG cleanup code which
7979 does not properly handle ASSERT_EXPRs. */
7980 remove_range_assertions ();
7981
7982 /* If we exposed any new variables, go ahead and put them into
7983 SSA form now, before we handle jump threading. This simplifies
7984 interactions between rewriting of _DECL nodes into SSA form
7985 and rewriting SSA_NAME nodes into SSA form after block
7986 duplication and CFG manipulation. */
7987 update_ssa (TODO_update_ssa);
7988
7989 finalize_jump_threads ();
7990
7991 /* Remove dead edges from SWITCH_EXPR optimization. This leaves the
7992 CFG in a broken state and requires a cfg_cleanup run. */
7993 FOR_EACH_VEC_ELT (edge, to_remove_edges, i, e)
7994 remove_edge (e);
7995 /* Update SWITCH_EXPR case label vector. */
7996 FOR_EACH_VEC_ELT (switch_update, to_update_switch_stmts, i, su)
7997 {
7998 size_t j;
7999 size_t n = TREE_VEC_LENGTH (su->vec);
8000 tree label;
8001 gimple_switch_set_num_labels (su->stmt, n);
8002 for (j = 0; j < n; j++)
8003 gimple_switch_set_label (su->stmt, j, TREE_VEC_ELT (su->vec, j));
8004 /* As we may have replaced the default label with a regular one
8005 make sure to make it a real default label again. This ensures
8006 optimal expansion. */
8007 label = gimple_switch_default_label (su->stmt);
8008 CASE_LOW (label) = NULL_TREE;
8009 CASE_HIGH (label) = NULL_TREE;
8010 }
8011
8012 if (VEC_length (edge, to_remove_edges) > 0)
8013 free_dominance_info (CDI_DOMINATORS);
8014
8015 VEC_free (edge, heap, to_remove_edges);
8016 VEC_free (switch_update, heap, to_update_switch_stmts);
8017 threadedge_finalize_values ();
8018
8019 scev_finalize ();
8020 loop_optimizer_finalize ();
8021 return 0;
8022 }
8023
8024 static bool
8025 gate_vrp (void)
8026 {
8027 return flag_tree_vrp != 0;
8028 }
8029
8030 struct gimple_opt_pass pass_vrp =
8031 {
8032 {
8033 GIMPLE_PASS,
8034 "vrp", /* name */
8035 gate_vrp, /* gate */
8036 execute_vrp, /* execute */
8037 NULL, /* sub */
8038 NULL, /* next */
8039 0, /* static_pass_number */
8040 TV_TREE_VRP, /* tv_id */
8041 PROP_ssa, /* properties_required */
8042 0, /* properties_provided */
8043 0, /* properties_destroyed */
8044 0, /* todo_flags_start */
8045 TODO_cleanup_cfg
8046 | TODO_update_ssa
8047 | TODO_verify_ssa
8048 | TODO_verify_flow
8049 | TODO_ggc_collect /* todo_flags_finish */
8050 }
8051 };