safe_unordered_sequence.h, [...]: Rename respectively in...
[gcc.git] / gcc / tree-vrp.c
1 /* Support routines for Value Range Propagation (VRP).
2 Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011
3 Free Software Foundation, Inc.
4 Contributed by Diego Novillo <dnovillo@redhat.com>.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
12
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "ggc.h"
27 #include "flags.h"
28 #include "tree.h"
29 #include "basic-block.h"
30 #include "tree-flow.h"
31 #include "tree-pass.h"
32 #include "tree-dump.h"
33 #include "timevar.h"
34 #include "tree-pretty-print.h"
35 #include "gimple-pretty-print.h"
36 #include "diagnostic-core.h"
37 #include "intl.h"
38 #include "cfgloop.h"
39 #include "tree-scalar-evolution.h"
40 #include "tree-ssa-propagate.h"
41 #include "tree-chrec.h"
42 #include "gimple-fold.h"
43
44
45 /* Type of value ranges. See value_range_d for a description of these
46 types. */
47 enum value_range_type { VR_UNDEFINED, VR_RANGE, VR_ANTI_RANGE, VR_VARYING };
48
49 /* Range of values that can be associated with an SSA_NAME after VRP
50 has executed. */
51 struct value_range_d
52 {
53 /* Lattice value represented by this range. */
54 enum value_range_type type;
55
56 /* Minimum and maximum values represented by this range. These
57 values should be interpreted as follows:
58
59 - If TYPE is VR_UNDEFINED or VR_VARYING then MIN and MAX must
60 be NULL.
61
62 - If TYPE == VR_RANGE then MIN holds the minimum value and
63 MAX holds the maximum value of the range [MIN, MAX].
64
65 - If TYPE == ANTI_RANGE the variable is known to NOT
66 take any values in the range [MIN, MAX]. */
67 tree min;
68 tree max;
69
70 /* Set of SSA names whose value ranges are equivalent to this one.
71 This set is only valid when TYPE is VR_RANGE or VR_ANTI_RANGE. */
72 bitmap equiv;
73 };
74
75 typedef struct value_range_d value_range_t;
76
77 /* Set of SSA names found live during the RPO traversal of the function
78 for still active basic-blocks. */
79 static sbitmap *live;
80
81 /* Return true if the SSA name NAME is live on the edge E. */
82
83 static bool
84 live_on_edge (edge e, tree name)
85 {
86 return (live[e->dest->index]
87 && TEST_BIT (live[e->dest->index], SSA_NAME_VERSION (name)));
88 }
89
90 /* Local functions. */
91 static int compare_values (tree val1, tree val2);
92 static int compare_values_warnv (tree val1, tree val2, bool *);
93 static void vrp_meet (value_range_t *, value_range_t *);
94 static tree vrp_evaluate_conditional_warnv_with_ops (enum tree_code,
95 tree, tree, bool, bool *,
96 bool *);
97
98 /* Location information for ASSERT_EXPRs. Each instance of this
99 structure describes an ASSERT_EXPR for an SSA name. Since a single
100 SSA name may have more than one assertion associated with it, these
101 locations are kept in a linked list attached to the corresponding
102 SSA name. */
103 struct assert_locus_d
104 {
105 /* Basic block where the assertion would be inserted. */
106 basic_block bb;
107
108 /* Some assertions need to be inserted on an edge (e.g., assertions
109 generated by COND_EXPRs). In those cases, BB will be NULL. */
110 edge e;
111
112 /* Pointer to the statement that generated this assertion. */
113 gimple_stmt_iterator si;
114
115 /* Predicate code for the ASSERT_EXPR. Must be COMPARISON_CLASS_P. */
116 enum tree_code comp_code;
117
118 /* Value being compared against. */
119 tree val;
120
121 /* Expression to compare. */
122 tree expr;
123
124 /* Next node in the linked list. */
125 struct assert_locus_d *next;
126 };
127
128 typedef struct assert_locus_d *assert_locus_t;
129
130 /* If bit I is present, it means that SSA name N_i has a list of
131 assertions that should be inserted in the IL. */
132 static bitmap need_assert_for;
133
134 /* Array of locations lists where to insert assertions. ASSERTS_FOR[I]
135 holds a list of ASSERT_LOCUS_T nodes that describe where
136 ASSERT_EXPRs for SSA name N_I should be inserted. */
137 static assert_locus_t *asserts_for;
138
139 /* Value range array. After propagation, VR_VALUE[I] holds the range
140 of values that SSA name N_I may take. */
141 static value_range_t **vr_value;
142
143 /* For a PHI node which sets SSA name N_I, VR_COUNTS[I] holds the
144 number of executable edges we saw the last time we visited the
145 node. */
146 static int *vr_phi_edge_counts;
147
148 typedef struct {
149 gimple stmt;
150 tree vec;
151 } switch_update;
152
153 static VEC (edge, heap) *to_remove_edges;
154 DEF_VEC_O(switch_update);
155 DEF_VEC_ALLOC_O(switch_update, heap);
156 static VEC (switch_update, heap) *to_update_switch_stmts;
157
158
159 /* Return the maximum value for TYPE. */
160
161 static inline tree
162 vrp_val_max (const_tree type)
163 {
164 if (!INTEGRAL_TYPE_P (type))
165 return NULL_TREE;
166
167 return TYPE_MAX_VALUE (type);
168 }
169
170 /* Return the minimum value for TYPE. */
171
172 static inline tree
173 vrp_val_min (const_tree type)
174 {
175 if (!INTEGRAL_TYPE_P (type))
176 return NULL_TREE;
177
178 return TYPE_MIN_VALUE (type);
179 }
180
181 /* Return whether VAL is equal to the maximum value of its type. This
182 will be true for a positive overflow infinity. We can't do a
183 simple equality comparison with TYPE_MAX_VALUE because C typedefs
184 and Ada subtypes can produce types whose TYPE_MAX_VALUE is not ==
185 to the integer constant with the same value in the type. */
186
187 static inline bool
188 vrp_val_is_max (const_tree val)
189 {
190 tree type_max = vrp_val_max (TREE_TYPE (val));
191 return (val == type_max
192 || (type_max != NULL_TREE
193 && operand_equal_p (val, type_max, 0)));
194 }
195
196 /* Return whether VAL is equal to the minimum value of its type. This
197 will be true for a negative overflow infinity. */
198
199 static inline bool
200 vrp_val_is_min (const_tree val)
201 {
202 tree type_min = vrp_val_min (TREE_TYPE (val));
203 return (val == type_min
204 || (type_min != NULL_TREE
205 && operand_equal_p (val, type_min, 0)));
206 }
207
208
209 /* Return whether TYPE should use an overflow infinity distinct from
210 TYPE_{MIN,MAX}_VALUE. We use an overflow infinity value to
211 represent a signed overflow during VRP computations. An infinity
212 is distinct from a half-range, which will go from some number to
213 TYPE_{MIN,MAX}_VALUE. */
214
215 static inline bool
216 needs_overflow_infinity (const_tree type)
217 {
218 return INTEGRAL_TYPE_P (type) && !TYPE_OVERFLOW_WRAPS (type);
219 }
220
221 /* Return whether TYPE can support our overflow infinity
222 representation: we use the TREE_OVERFLOW flag, which only exists
223 for constants. If TYPE doesn't support this, we don't optimize
224 cases which would require signed overflow--we drop them to
225 VARYING. */
226
227 static inline bool
228 supports_overflow_infinity (const_tree type)
229 {
230 tree min = vrp_val_min (type), max = vrp_val_max (type);
231 #ifdef ENABLE_CHECKING
232 gcc_assert (needs_overflow_infinity (type));
233 #endif
234 return (min != NULL_TREE
235 && CONSTANT_CLASS_P (min)
236 && max != NULL_TREE
237 && CONSTANT_CLASS_P (max));
238 }
239
240 /* VAL is the maximum or minimum value of a type. Return a
241 corresponding overflow infinity. */
242
243 static inline tree
244 make_overflow_infinity (tree val)
245 {
246 gcc_checking_assert (val != NULL_TREE && CONSTANT_CLASS_P (val));
247 val = copy_node (val);
248 TREE_OVERFLOW (val) = 1;
249 return val;
250 }
251
252 /* Return a negative overflow infinity for TYPE. */
253
254 static inline tree
255 negative_overflow_infinity (tree type)
256 {
257 gcc_checking_assert (supports_overflow_infinity (type));
258 return make_overflow_infinity (vrp_val_min (type));
259 }
260
261 /* Return a positive overflow infinity for TYPE. */
262
263 static inline tree
264 positive_overflow_infinity (tree type)
265 {
266 gcc_checking_assert (supports_overflow_infinity (type));
267 return make_overflow_infinity (vrp_val_max (type));
268 }
269
270 /* Return whether VAL is a negative overflow infinity. */
271
272 static inline bool
273 is_negative_overflow_infinity (const_tree val)
274 {
275 return (needs_overflow_infinity (TREE_TYPE (val))
276 && CONSTANT_CLASS_P (val)
277 && TREE_OVERFLOW (val)
278 && vrp_val_is_min (val));
279 }
280
281 /* Return whether VAL is a positive overflow infinity. */
282
283 static inline bool
284 is_positive_overflow_infinity (const_tree val)
285 {
286 return (needs_overflow_infinity (TREE_TYPE (val))
287 && CONSTANT_CLASS_P (val)
288 && TREE_OVERFLOW (val)
289 && vrp_val_is_max (val));
290 }
291
292 /* Return whether VAL is a positive or negative overflow infinity. */
293
294 static inline bool
295 is_overflow_infinity (const_tree val)
296 {
297 return (needs_overflow_infinity (TREE_TYPE (val))
298 && CONSTANT_CLASS_P (val)
299 && TREE_OVERFLOW (val)
300 && (vrp_val_is_min (val) || vrp_val_is_max (val)));
301 }
302
303 /* Return whether STMT has a constant rhs that is_overflow_infinity. */
304
305 static inline bool
306 stmt_overflow_infinity (gimple stmt)
307 {
308 if (is_gimple_assign (stmt)
309 && get_gimple_rhs_class (gimple_assign_rhs_code (stmt)) ==
310 GIMPLE_SINGLE_RHS)
311 return is_overflow_infinity (gimple_assign_rhs1 (stmt));
312 return false;
313 }
314
315 /* If VAL is now an overflow infinity, return VAL. Otherwise, return
316 the same value with TREE_OVERFLOW clear. This can be used to avoid
317 confusing a regular value with an overflow value. */
318
319 static inline tree
320 avoid_overflow_infinity (tree val)
321 {
322 if (!is_overflow_infinity (val))
323 return val;
324
325 if (vrp_val_is_max (val))
326 return vrp_val_max (TREE_TYPE (val));
327 else
328 {
329 gcc_checking_assert (vrp_val_is_min (val));
330 return vrp_val_min (TREE_TYPE (val));
331 }
332 }
333
334
335 /* Return true if ARG is marked with the nonnull attribute in the
336 current function signature. */
337
338 static bool
339 nonnull_arg_p (const_tree arg)
340 {
341 tree t, attrs, fntype;
342 unsigned HOST_WIDE_INT arg_num;
343
344 gcc_assert (TREE_CODE (arg) == PARM_DECL && POINTER_TYPE_P (TREE_TYPE (arg)));
345
346 /* The static chain decl is always non null. */
347 if (arg == cfun->static_chain_decl)
348 return true;
349
350 fntype = TREE_TYPE (current_function_decl);
351 attrs = lookup_attribute ("nonnull", TYPE_ATTRIBUTES (fntype));
352
353 /* If "nonnull" wasn't specified, we know nothing about the argument. */
354 if (attrs == NULL_TREE)
355 return false;
356
357 /* If "nonnull" applies to all the arguments, then ARG is non-null. */
358 if (TREE_VALUE (attrs) == NULL_TREE)
359 return true;
360
361 /* Get the position number for ARG in the function signature. */
362 for (arg_num = 1, t = DECL_ARGUMENTS (current_function_decl);
363 t;
364 t = DECL_CHAIN (t), arg_num++)
365 {
366 if (t == arg)
367 break;
368 }
369
370 gcc_assert (t == arg);
371
372 /* Now see if ARG_NUM is mentioned in the nonnull list. */
373 for (t = TREE_VALUE (attrs); t; t = TREE_CHAIN (t))
374 {
375 if (compare_tree_int (TREE_VALUE (t), arg_num) == 0)
376 return true;
377 }
378
379 return false;
380 }
381
382
383 /* Set value range VR to VR_VARYING. */
384
385 static inline void
386 set_value_range_to_varying (value_range_t *vr)
387 {
388 vr->type = VR_VARYING;
389 vr->min = vr->max = NULL_TREE;
390 if (vr->equiv)
391 bitmap_clear (vr->equiv);
392 }
393
394
395 /* Set value range VR to {T, MIN, MAX, EQUIV}. */
396
397 static void
398 set_value_range (value_range_t *vr, enum value_range_type t, tree min,
399 tree max, bitmap equiv)
400 {
401 #if defined ENABLE_CHECKING
402 /* Check the validity of the range. */
403 if (t == VR_RANGE || t == VR_ANTI_RANGE)
404 {
405 int cmp;
406
407 gcc_assert (min && max);
408
409 if (INTEGRAL_TYPE_P (TREE_TYPE (min)) && t == VR_ANTI_RANGE)
410 gcc_assert (!vrp_val_is_min (min) || !vrp_val_is_max (max));
411
412 cmp = compare_values (min, max);
413 gcc_assert (cmp == 0 || cmp == -1 || cmp == -2);
414
415 if (needs_overflow_infinity (TREE_TYPE (min)))
416 gcc_assert (!is_overflow_infinity (min)
417 || !is_overflow_infinity (max));
418 }
419
420 if (t == VR_UNDEFINED || t == VR_VARYING)
421 gcc_assert (min == NULL_TREE && max == NULL_TREE);
422
423 if (t == VR_UNDEFINED || t == VR_VARYING)
424 gcc_assert (equiv == NULL || bitmap_empty_p (equiv));
425 #endif
426
427 vr->type = t;
428 vr->min = min;
429 vr->max = max;
430
431 /* Since updating the equivalence set involves deep copying the
432 bitmaps, only do it if absolutely necessary. */
433 if (vr->equiv == NULL
434 && equiv != NULL)
435 vr->equiv = BITMAP_ALLOC (NULL);
436
437 if (equiv != vr->equiv)
438 {
439 if (equiv && !bitmap_empty_p (equiv))
440 bitmap_copy (vr->equiv, equiv);
441 else
442 bitmap_clear (vr->equiv);
443 }
444 }
445
446
447 /* Set value range VR to the canonical form of {T, MIN, MAX, EQUIV}.
448 This means adjusting T, MIN and MAX representing the case of a
449 wrapping range with MAX < MIN covering [MIN, type_max] U [type_min, MAX]
450 as anti-rage ~[MAX+1, MIN-1]. Likewise for wrapping anti-ranges.
451 In corner cases where MAX+1 or MIN-1 wraps this will fall back
452 to varying.
453 This routine exists to ease canonicalization in the case where we
454 extract ranges from var + CST op limit. */
455
456 static void
457 set_and_canonicalize_value_range (value_range_t *vr, enum value_range_type t,
458 tree min, tree max, bitmap equiv)
459 {
460 /* Nothing to canonicalize for symbolic or unknown or varying ranges. */
461 if ((t != VR_RANGE
462 && t != VR_ANTI_RANGE)
463 || TREE_CODE (min) != INTEGER_CST
464 || TREE_CODE (max) != INTEGER_CST)
465 {
466 set_value_range (vr, t, min, max, equiv);
467 return;
468 }
469
470 /* Wrong order for min and max, to swap them and the VR type we need
471 to adjust them. */
472 if (tree_int_cst_lt (max, min))
473 {
474 tree one = build_int_cst (TREE_TYPE (min), 1);
475 tree tmp = int_const_binop (PLUS_EXPR, max, one);
476 max = int_const_binop (MINUS_EXPR, min, one);
477 min = tmp;
478
479 /* There's one corner case, if we had [C+1, C] before we now have
480 that again. But this represents an empty value range, so drop
481 to varying in this case. */
482 if (tree_int_cst_lt (max, min))
483 {
484 set_value_range_to_varying (vr);
485 return;
486 }
487
488 t = t == VR_RANGE ? VR_ANTI_RANGE : VR_RANGE;
489 }
490
491 /* Anti-ranges that can be represented as ranges should be so. */
492 if (t == VR_ANTI_RANGE)
493 {
494 bool is_min = vrp_val_is_min (min);
495 bool is_max = vrp_val_is_max (max);
496
497 if (is_min && is_max)
498 {
499 /* We cannot deal with empty ranges, drop to varying. */
500 set_value_range_to_varying (vr);
501 return;
502 }
503 else if (is_min
504 /* As a special exception preserve non-null ranges. */
505 && !(TYPE_UNSIGNED (TREE_TYPE (min))
506 && integer_zerop (max)))
507 {
508 tree one = build_int_cst (TREE_TYPE (max), 1);
509 min = int_const_binop (PLUS_EXPR, max, one);
510 max = vrp_val_max (TREE_TYPE (max));
511 t = VR_RANGE;
512 }
513 else if (is_max)
514 {
515 tree one = build_int_cst (TREE_TYPE (min), 1);
516 max = int_const_binop (MINUS_EXPR, min, one);
517 min = vrp_val_min (TREE_TYPE (min));
518 t = VR_RANGE;
519 }
520 }
521
522 set_value_range (vr, t, min, max, equiv);
523 }
524
525 /* Copy value range FROM into value range TO. */
526
527 static inline void
528 copy_value_range (value_range_t *to, value_range_t *from)
529 {
530 set_value_range (to, from->type, from->min, from->max, from->equiv);
531 }
532
533 /* Set value range VR to a single value. This function is only called
534 with values we get from statements, and exists to clear the
535 TREE_OVERFLOW flag so that we don't think we have an overflow
536 infinity when we shouldn't. */
537
538 static inline void
539 set_value_range_to_value (value_range_t *vr, tree val, bitmap equiv)
540 {
541 gcc_assert (is_gimple_min_invariant (val));
542 val = avoid_overflow_infinity (val);
543 set_value_range (vr, VR_RANGE, val, val, equiv);
544 }
545
546 /* Set value range VR to a non-negative range of type TYPE.
547 OVERFLOW_INFINITY indicates whether to use an overflow infinity
548 rather than TYPE_MAX_VALUE; this should be true if we determine
549 that the range is nonnegative based on the assumption that signed
550 overflow does not occur. */
551
552 static inline void
553 set_value_range_to_nonnegative (value_range_t *vr, tree type,
554 bool overflow_infinity)
555 {
556 tree zero;
557
558 if (overflow_infinity && !supports_overflow_infinity (type))
559 {
560 set_value_range_to_varying (vr);
561 return;
562 }
563
564 zero = build_int_cst (type, 0);
565 set_value_range (vr, VR_RANGE, zero,
566 (overflow_infinity
567 ? positive_overflow_infinity (type)
568 : TYPE_MAX_VALUE (type)),
569 vr->equiv);
570 }
571
572 /* Set value range VR to a non-NULL range of type TYPE. */
573
574 static inline void
575 set_value_range_to_nonnull (value_range_t *vr, tree type)
576 {
577 tree zero = build_int_cst (type, 0);
578 set_value_range (vr, VR_ANTI_RANGE, zero, zero, vr->equiv);
579 }
580
581
582 /* Set value range VR to a NULL range of type TYPE. */
583
584 static inline void
585 set_value_range_to_null (value_range_t *vr, tree type)
586 {
587 set_value_range_to_value (vr, build_int_cst (type, 0), vr->equiv);
588 }
589
590
591 /* Set value range VR to a range of a truthvalue of type TYPE. */
592
593 static inline void
594 set_value_range_to_truthvalue (value_range_t *vr, tree type)
595 {
596 if (TYPE_PRECISION (type) == 1)
597 set_value_range_to_varying (vr);
598 else
599 set_value_range (vr, VR_RANGE,
600 build_int_cst (type, 0), build_int_cst (type, 1),
601 vr->equiv);
602 }
603
604
605 /* Set value range VR to VR_UNDEFINED. */
606
607 static inline void
608 set_value_range_to_undefined (value_range_t *vr)
609 {
610 vr->type = VR_UNDEFINED;
611 vr->min = vr->max = NULL_TREE;
612 if (vr->equiv)
613 bitmap_clear (vr->equiv);
614 }
615
616
617 /* If abs (min) < abs (max), set VR to [-max, max], if
618 abs (min) >= abs (max), set VR to [-min, min]. */
619
620 static void
621 abs_extent_range (value_range_t *vr, tree min, tree max)
622 {
623 int cmp;
624
625 gcc_assert (TREE_CODE (min) == INTEGER_CST);
626 gcc_assert (TREE_CODE (max) == INTEGER_CST);
627 gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (min)));
628 gcc_assert (!TYPE_UNSIGNED (TREE_TYPE (min)));
629 min = fold_unary (ABS_EXPR, TREE_TYPE (min), min);
630 max = fold_unary (ABS_EXPR, TREE_TYPE (max), max);
631 if (TREE_OVERFLOW (min) || TREE_OVERFLOW (max))
632 {
633 set_value_range_to_varying (vr);
634 return;
635 }
636 cmp = compare_values (min, max);
637 if (cmp == -1)
638 min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), max);
639 else if (cmp == 0 || cmp == 1)
640 {
641 max = min;
642 min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), min);
643 }
644 else
645 {
646 set_value_range_to_varying (vr);
647 return;
648 }
649 set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL);
650 }
651
652
653 /* Return value range information for VAR.
654
655 If we have no values ranges recorded (ie, VRP is not running), then
656 return NULL. Otherwise create an empty range if none existed for VAR. */
657
658 static value_range_t *
659 get_value_range (const_tree var)
660 {
661 value_range_t *vr;
662 tree sym;
663 unsigned ver = SSA_NAME_VERSION (var);
664
665 /* If we have no recorded ranges, then return NULL. */
666 if (! vr_value)
667 return NULL;
668
669 vr = vr_value[ver];
670 if (vr)
671 return vr;
672
673 /* Create a default value range. */
674 vr_value[ver] = vr = XCNEW (value_range_t);
675
676 /* Defer allocating the equivalence set. */
677 vr->equiv = NULL;
678
679 /* If VAR is a default definition, the variable can take any value
680 in VAR's type. */
681 sym = SSA_NAME_VAR (var);
682 if (SSA_NAME_IS_DEFAULT_DEF (var))
683 {
684 /* Try to use the "nonnull" attribute to create ~[0, 0]
685 anti-ranges for pointers. Note that this is only valid with
686 default definitions of PARM_DECLs. */
687 if (TREE_CODE (sym) == PARM_DECL
688 && POINTER_TYPE_P (TREE_TYPE (sym))
689 && nonnull_arg_p (sym))
690 set_value_range_to_nonnull (vr, TREE_TYPE (sym));
691 else
692 set_value_range_to_varying (vr);
693 }
694
695 return vr;
696 }
697
698 /* Return true, if VAL1 and VAL2 are equal values for VRP purposes. */
699
700 static inline bool
701 vrp_operand_equal_p (const_tree val1, const_tree val2)
702 {
703 if (val1 == val2)
704 return true;
705 if (!val1 || !val2 || !operand_equal_p (val1, val2, 0))
706 return false;
707 if (is_overflow_infinity (val1))
708 return is_overflow_infinity (val2);
709 return true;
710 }
711
712 /* Return true, if the bitmaps B1 and B2 are equal. */
713
714 static inline bool
715 vrp_bitmap_equal_p (const_bitmap b1, const_bitmap b2)
716 {
717 return (b1 == b2
718 || ((!b1 || bitmap_empty_p (b1))
719 && (!b2 || bitmap_empty_p (b2)))
720 || (b1 && b2
721 && bitmap_equal_p (b1, b2)));
722 }
723
724 /* Update the value range and equivalence set for variable VAR to
725 NEW_VR. Return true if NEW_VR is different from VAR's previous
726 value.
727
728 NOTE: This function assumes that NEW_VR is a temporary value range
729 object created for the sole purpose of updating VAR's range. The
730 storage used by the equivalence set from NEW_VR will be freed by
731 this function. Do not call update_value_range when NEW_VR
732 is the range object associated with another SSA name. */
733
734 static inline bool
735 update_value_range (const_tree var, value_range_t *new_vr)
736 {
737 value_range_t *old_vr;
738 bool is_new;
739
740 /* Update the value range, if necessary. */
741 old_vr = get_value_range (var);
742 is_new = old_vr->type != new_vr->type
743 || !vrp_operand_equal_p (old_vr->min, new_vr->min)
744 || !vrp_operand_equal_p (old_vr->max, new_vr->max)
745 || !vrp_bitmap_equal_p (old_vr->equiv, new_vr->equiv);
746
747 if (is_new)
748 set_value_range (old_vr, new_vr->type, new_vr->min, new_vr->max,
749 new_vr->equiv);
750
751 BITMAP_FREE (new_vr->equiv);
752
753 return is_new;
754 }
755
756
757 /* Add VAR and VAR's equivalence set to EQUIV. This is the central
758 point where equivalence processing can be turned on/off. */
759
760 static void
761 add_equivalence (bitmap *equiv, const_tree var)
762 {
763 unsigned ver = SSA_NAME_VERSION (var);
764 value_range_t *vr = vr_value[ver];
765
766 if (*equiv == NULL)
767 *equiv = BITMAP_ALLOC (NULL);
768 bitmap_set_bit (*equiv, ver);
769 if (vr && vr->equiv)
770 bitmap_ior_into (*equiv, vr->equiv);
771 }
772
773
774 /* Return true if VR is ~[0, 0]. */
775
776 static inline bool
777 range_is_nonnull (value_range_t *vr)
778 {
779 return vr->type == VR_ANTI_RANGE
780 && integer_zerop (vr->min)
781 && integer_zerop (vr->max);
782 }
783
784
785 /* Return true if VR is [0, 0]. */
786
787 static inline bool
788 range_is_null (value_range_t *vr)
789 {
790 return vr->type == VR_RANGE
791 && integer_zerop (vr->min)
792 && integer_zerop (vr->max);
793 }
794
795 /* Return true if max and min of VR are INTEGER_CST. It's not necessary
796 a singleton. */
797
798 static inline bool
799 range_int_cst_p (value_range_t *vr)
800 {
801 return (vr->type == VR_RANGE
802 && TREE_CODE (vr->max) == INTEGER_CST
803 && TREE_CODE (vr->min) == INTEGER_CST
804 && !TREE_OVERFLOW (vr->max)
805 && !TREE_OVERFLOW (vr->min));
806 }
807
808 /* Return true if VR is a INTEGER_CST singleton. */
809
810 static inline bool
811 range_int_cst_singleton_p (value_range_t *vr)
812 {
813 return (range_int_cst_p (vr)
814 && tree_int_cst_equal (vr->min, vr->max));
815 }
816
817 /* Return true if value range VR involves at least one symbol. */
818
819 static inline bool
820 symbolic_range_p (value_range_t *vr)
821 {
822 return (!is_gimple_min_invariant (vr->min)
823 || !is_gimple_min_invariant (vr->max));
824 }
825
826 /* Return true if value range VR uses an overflow infinity. */
827
828 static inline bool
829 overflow_infinity_range_p (value_range_t *vr)
830 {
831 return (vr->type == VR_RANGE
832 && (is_overflow_infinity (vr->min)
833 || is_overflow_infinity (vr->max)));
834 }
835
836 /* Return false if we can not make a valid comparison based on VR;
837 this will be the case if it uses an overflow infinity and overflow
838 is not undefined (i.e., -fno-strict-overflow is in effect).
839 Otherwise return true, and set *STRICT_OVERFLOW_P to true if VR
840 uses an overflow infinity. */
841
842 static bool
843 usable_range_p (value_range_t *vr, bool *strict_overflow_p)
844 {
845 gcc_assert (vr->type == VR_RANGE);
846 if (is_overflow_infinity (vr->min))
847 {
848 *strict_overflow_p = true;
849 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr->min)))
850 return false;
851 }
852 if (is_overflow_infinity (vr->max))
853 {
854 *strict_overflow_p = true;
855 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr->max)))
856 return false;
857 }
858 return true;
859 }
860
861
862 /* Like tree_expr_nonnegative_warnv_p, but this function uses value
863 ranges obtained so far. */
864
865 static bool
866 vrp_expr_computes_nonnegative (tree expr, bool *strict_overflow_p)
867 {
868 return (tree_expr_nonnegative_warnv_p (expr, strict_overflow_p)
869 || (TREE_CODE (expr) == SSA_NAME
870 && ssa_name_nonnegative_p (expr)));
871 }
872
873 /* Return true if the result of assignment STMT is know to be non-negative.
874 If the return value is based on the assumption that signed overflow is
875 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
876 *STRICT_OVERFLOW_P.*/
877
878 static bool
879 gimple_assign_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p)
880 {
881 enum tree_code code = gimple_assign_rhs_code (stmt);
882 switch (get_gimple_rhs_class (code))
883 {
884 case GIMPLE_UNARY_RHS:
885 return tree_unary_nonnegative_warnv_p (gimple_assign_rhs_code (stmt),
886 gimple_expr_type (stmt),
887 gimple_assign_rhs1 (stmt),
888 strict_overflow_p);
889 case GIMPLE_BINARY_RHS:
890 return tree_binary_nonnegative_warnv_p (gimple_assign_rhs_code (stmt),
891 gimple_expr_type (stmt),
892 gimple_assign_rhs1 (stmt),
893 gimple_assign_rhs2 (stmt),
894 strict_overflow_p);
895 case GIMPLE_TERNARY_RHS:
896 return false;
897 case GIMPLE_SINGLE_RHS:
898 return tree_single_nonnegative_warnv_p (gimple_assign_rhs1 (stmt),
899 strict_overflow_p);
900 case GIMPLE_INVALID_RHS:
901 gcc_unreachable ();
902 default:
903 gcc_unreachable ();
904 }
905 }
906
907 /* Return true if return value of call STMT is know to be non-negative.
908 If the return value is based on the assumption that signed overflow is
909 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
910 *STRICT_OVERFLOW_P.*/
911
912 static bool
913 gimple_call_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p)
914 {
915 tree arg0 = gimple_call_num_args (stmt) > 0 ?
916 gimple_call_arg (stmt, 0) : NULL_TREE;
917 tree arg1 = gimple_call_num_args (stmt) > 1 ?
918 gimple_call_arg (stmt, 1) : NULL_TREE;
919
920 return tree_call_nonnegative_warnv_p (gimple_expr_type (stmt),
921 gimple_call_fndecl (stmt),
922 arg0,
923 arg1,
924 strict_overflow_p);
925 }
926
927 /* Return true if STMT is know to to compute a non-negative value.
928 If the return value is based on the assumption that signed overflow is
929 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
930 *STRICT_OVERFLOW_P.*/
931
932 static bool
933 gimple_stmt_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p)
934 {
935 switch (gimple_code (stmt))
936 {
937 case GIMPLE_ASSIGN:
938 return gimple_assign_nonnegative_warnv_p (stmt, strict_overflow_p);
939 case GIMPLE_CALL:
940 return gimple_call_nonnegative_warnv_p (stmt, strict_overflow_p);
941 default:
942 gcc_unreachable ();
943 }
944 }
945
946 /* Return true if the result of assignment STMT is know to be non-zero.
947 If the return value is based on the assumption that signed overflow is
948 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
949 *STRICT_OVERFLOW_P.*/
950
951 static bool
952 gimple_assign_nonzero_warnv_p (gimple stmt, bool *strict_overflow_p)
953 {
954 enum tree_code code = gimple_assign_rhs_code (stmt);
955 switch (get_gimple_rhs_class (code))
956 {
957 case GIMPLE_UNARY_RHS:
958 return tree_unary_nonzero_warnv_p (gimple_assign_rhs_code (stmt),
959 gimple_expr_type (stmt),
960 gimple_assign_rhs1 (stmt),
961 strict_overflow_p);
962 case GIMPLE_BINARY_RHS:
963 return tree_binary_nonzero_warnv_p (gimple_assign_rhs_code (stmt),
964 gimple_expr_type (stmt),
965 gimple_assign_rhs1 (stmt),
966 gimple_assign_rhs2 (stmt),
967 strict_overflow_p);
968 case GIMPLE_TERNARY_RHS:
969 return false;
970 case GIMPLE_SINGLE_RHS:
971 return tree_single_nonzero_warnv_p (gimple_assign_rhs1 (stmt),
972 strict_overflow_p);
973 case GIMPLE_INVALID_RHS:
974 gcc_unreachable ();
975 default:
976 gcc_unreachable ();
977 }
978 }
979
980 /* Return true if STMT is know to to compute a non-zero value.
981 If the return value is based on the assumption that signed overflow is
982 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
983 *STRICT_OVERFLOW_P.*/
984
985 static bool
986 gimple_stmt_nonzero_warnv_p (gimple stmt, bool *strict_overflow_p)
987 {
988 switch (gimple_code (stmt))
989 {
990 case GIMPLE_ASSIGN:
991 return gimple_assign_nonzero_warnv_p (stmt, strict_overflow_p);
992 case GIMPLE_CALL:
993 return gimple_alloca_call_p (stmt);
994 default:
995 gcc_unreachable ();
996 }
997 }
998
999 /* Like tree_expr_nonzero_warnv_p, but this function uses value ranges
1000 obtained so far. */
1001
1002 static bool
1003 vrp_stmt_computes_nonzero (gimple stmt, bool *strict_overflow_p)
1004 {
1005 if (gimple_stmt_nonzero_warnv_p (stmt, strict_overflow_p))
1006 return true;
1007
1008 /* If we have an expression of the form &X->a, then the expression
1009 is nonnull if X is nonnull. */
1010 if (is_gimple_assign (stmt)
1011 && gimple_assign_rhs_code (stmt) == ADDR_EXPR)
1012 {
1013 tree expr = gimple_assign_rhs1 (stmt);
1014 tree base = get_base_address (TREE_OPERAND (expr, 0));
1015
1016 if (base != NULL_TREE
1017 && TREE_CODE (base) == MEM_REF
1018 && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME)
1019 {
1020 value_range_t *vr = get_value_range (TREE_OPERAND (base, 0));
1021 if (range_is_nonnull (vr))
1022 return true;
1023 }
1024 }
1025
1026 return false;
1027 }
1028
1029 /* Returns true if EXPR is a valid value (as expected by compare_values) --
1030 a gimple invariant, or SSA_NAME +- CST. */
1031
1032 static bool
1033 valid_value_p (tree expr)
1034 {
1035 if (TREE_CODE (expr) == SSA_NAME)
1036 return true;
1037
1038 if (TREE_CODE (expr) == PLUS_EXPR
1039 || TREE_CODE (expr) == MINUS_EXPR)
1040 return (TREE_CODE (TREE_OPERAND (expr, 0)) == SSA_NAME
1041 && TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST);
1042
1043 return is_gimple_min_invariant (expr);
1044 }
1045
1046 /* Return
1047 1 if VAL < VAL2
1048 0 if !(VAL < VAL2)
1049 -2 if those are incomparable. */
1050 static inline int
1051 operand_less_p (tree val, tree val2)
1052 {
1053 /* LT is folded faster than GE and others. Inline the common case. */
1054 if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
1055 {
1056 if (TYPE_UNSIGNED (TREE_TYPE (val)))
1057 return INT_CST_LT_UNSIGNED (val, val2);
1058 else
1059 {
1060 if (INT_CST_LT (val, val2))
1061 return 1;
1062 }
1063 }
1064 else
1065 {
1066 tree tcmp;
1067
1068 fold_defer_overflow_warnings ();
1069
1070 tcmp = fold_binary_to_constant (LT_EXPR, boolean_type_node, val, val2);
1071
1072 fold_undefer_and_ignore_overflow_warnings ();
1073
1074 if (!tcmp
1075 || TREE_CODE (tcmp) != INTEGER_CST)
1076 return -2;
1077
1078 if (!integer_zerop (tcmp))
1079 return 1;
1080 }
1081
1082 /* val >= val2, not considering overflow infinity. */
1083 if (is_negative_overflow_infinity (val))
1084 return is_negative_overflow_infinity (val2) ? 0 : 1;
1085 else if (is_positive_overflow_infinity (val2))
1086 return is_positive_overflow_infinity (val) ? 0 : 1;
1087
1088 return 0;
1089 }
1090
1091 /* Compare two values VAL1 and VAL2. Return
1092
1093 -2 if VAL1 and VAL2 cannot be compared at compile-time,
1094 -1 if VAL1 < VAL2,
1095 0 if VAL1 == VAL2,
1096 +1 if VAL1 > VAL2, and
1097 +2 if VAL1 != VAL2
1098
1099 This is similar to tree_int_cst_compare but supports pointer values
1100 and values that cannot be compared at compile time.
1101
1102 If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to
1103 true if the return value is only valid if we assume that signed
1104 overflow is undefined. */
1105
1106 static int
1107 compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p)
1108 {
1109 if (val1 == val2)
1110 return 0;
1111
1112 /* Below we rely on the fact that VAL1 and VAL2 are both pointers or
1113 both integers. */
1114 gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1))
1115 == POINTER_TYPE_P (TREE_TYPE (val2)));
1116 /* Convert the two values into the same type. This is needed because
1117 sizetype causes sign extension even for unsigned types. */
1118 val2 = fold_convert (TREE_TYPE (val1), val2);
1119 STRIP_USELESS_TYPE_CONVERSION (val2);
1120
1121 if ((TREE_CODE (val1) == SSA_NAME
1122 || TREE_CODE (val1) == PLUS_EXPR
1123 || TREE_CODE (val1) == MINUS_EXPR)
1124 && (TREE_CODE (val2) == SSA_NAME
1125 || TREE_CODE (val2) == PLUS_EXPR
1126 || TREE_CODE (val2) == MINUS_EXPR))
1127 {
1128 tree n1, c1, n2, c2;
1129 enum tree_code code1, code2;
1130
1131 /* If VAL1 and VAL2 are of the form 'NAME [+-] CST' or 'NAME',
1132 return -1 or +1 accordingly. If VAL1 and VAL2 don't use the
1133 same name, return -2. */
1134 if (TREE_CODE (val1) == SSA_NAME)
1135 {
1136 code1 = SSA_NAME;
1137 n1 = val1;
1138 c1 = NULL_TREE;
1139 }
1140 else
1141 {
1142 code1 = TREE_CODE (val1);
1143 n1 = TREE_OPERAND (val1, 0);
1144 c1 = TREE_OPERAND (val1, 1);
1145 if (tree_int_cst_sgn (c1) == -1)
1146 {
1147 if (is_negative_overflow_infinity (c1))
1148 return -2;
1149 c1 = fold_unary_to_constant (NEGATE_EXPR, TREE_TYPE (c1), c1);
1150 if (!c1)
1151 return -2;
1152 code1 = code1 == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR;
1153 }
1154 }
1155
1156 if (TREE_CODE (val2) == SSA_NAME)
1157 {
1158 code2 = SSA_NAME;
1159 n2 = val2;
1160 c2 = NULL_TREE;
1161 }
1162 else
1163 {
1164 code2 = TREE_CODE (val2);
1165 n2 = TREE_OPERAND (val2, 0);
1166 c2 = TREE_OPERAND (val2, 1);
1167 if (tree_int_cst_sgn (c2) == -1)
1168 {
1169 if (is_negative_overflow_infinity (c2))
1170 return -2;
1171 c2 = fold_unary_to_constant (NEGATE_EXPR, TREE_TYPE (c2), c2);
1172 if (!c2)
1173 return -2;
1174 code2 = code2 == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR;
1175 }
1176 }
1177
1178 /* Both values must use the same name. */
1179 if (n1 != n2)
1180 return -2;
1181
1182 if (code1 == SSA_NAME
1183 && code2 == SSA_NAME)
1184 /* NAME == NAME */
1185 return 0;
1186
1187 /* If overflow is defined we cannot simplify more. */
1188 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1)))
1189 return -2;
1190
1191 if (strict_overflow_p != NULL
1192 && (code1 == SSA_NAME || !TREE_NO_WARNING (val1))
1193 && (code2 == SSA_NAME || !TREE_NO_WARNING (val2)))
1194 *strict_overflow_p = true;
1195
1196 if (code1 == SSA_NAME)
1197 {
1198 if (code2 == PLUS_EXPR)
1199 /* NAME < NAME + CST */
1200 return -1;
1201 else if (code2 == MINUS_EXPR)
1202 /* NAME > NAME - CST */
1203 return 1;
1204 }
1205 else if (code1 == PLUS_EXPR)
1206 {
1207 if (code2 == SSA_NAME)
1208 /* NAME + CST > NAME */
1209 return 1;
1210 else if (code2 == PLUS_EXPR)
1211 /* NAME + CST1 > NAME + CST2, if CST1 > CST2 */
1212 return compare_values_warnv (c1, c2, strict_overflow_p);
1213 else if (code2 == MINUS_EXPR)
1214 /* NAME + CST1 > NAME - CST2 */
1215 return 1;
1216 }
1217 else if (code1 == MINUS_EXPR)
1218 {
1219 if (code2 == SSA_NAME)
1220 /* NAME - CST < NAME */
1221 return -1;
1222 else if (code2 == PLUS_EXPR)
1223 /* NAME - CST1 < NAME + CST2 */
1224 return -1;
1225 else if (code2 == MINUS_EXPR)
1226 /* NAME - CST1 > NAME - CST2, if CST1 < CST2. Notice that
1227 C1 and C2 are swapped in the call to compare_values. */
1228 return compare_values_warnv (c2, c1, strict_overflow_p);
1229 }
1230
1231 gcc_unreachable ();
1232 }
1233
1234 /* We cannot compare non-constants. */
1235 if (!is_gimple_min_invariant (val1) || !is_gimple_min_invariant (val2))
1236 return -2;
1237
1238 if (!POINTER_TYPE_P (TREE_TYPE (val1)))
1239 {
1240 /* We cannot compare overflowed values, except for overflow
1241 infinities. */
1242 if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2))
1243 {
1244 if (strict_overflow_p != NULL)
1245 *strict_overflow_p = true;
1246 if (is_negative_overflow_infinity (val1))
1247 return is_negative_overflow_infinity (val2) ? 0 : -1;
1248 else if (is_negative_overflow_infinity (val2))
1249 return 1;
1250 else if (is_positive_overflow_infinity (val1))
1251 return is_positive_overflow_infinity (val2) ? 0 : 1;
1252 else if (is_positive_overflow_infinity (val2))
1253 return -1;
1254 return -2;
1255 }
1256
1257 return tree_int_cst_compare (val1, val2);
1258 }
1259 else
1260 {
1261 tree t;
1262
1263 /* First see if VAL1 and VAL2 are not the same. */
1264 if (val1 == val2 || operand_equal_p (val1, val2, 0))
1265 return 0;
1266
1267 /* If VAL1 is a lower address than VAL2, return -1. */
1268 if (operand_less_p (val1, val2) == 1)
1269 return -1;
1270
1271 /* If VAL1 is a higher address than VAL2, return +1. */
1272 if (operand_less_p (val2, val1) == 1)
1273 return 1;
1274
1275 /* If VAL1 is different than VAL2, return +2.
1276 For integer constants we either have already returned -1 or 1
1277 or they are equivalent. We still might succeed in proving
1278 something about non-trivial operands. */
1279 if (TREE_CODE (val1) != INTEGER_CST
1280 || TREE_CODE (val2) != INTEGER_CST)
1281 {
1282 t = fold_binary_to_constant (NE_EXPR, boolean_type_node, val1, val2);
1283 if (t && integer_onep (t))
1284 return 2;
1285 }
1286
1287 return -2;
1288 }
1289 }
1290
1291 /* Compare values like compare_values_warnv, but treat comparisons of
1292 nonconstants which rely on undefined overflow as incomparable. */
1293
1294 static int
1295 compare_values (tree val1, tree val2)
1296 {
1297 bool sop;
1298 int ret;
1299
1300 sop = false;
1301 ret = compare_values_warnv (val1, val2, &sop);
1302 if (sop
1303 && (!is_gimple_min_invariant (val1) || !is_gimple_min_invariant (val2)))
1304 ret = -2;
1305 return ret;
1306 }
1307
1308
1309 /* Return 1 if VAL is inside value range VR (VR->MIN <= VAL <= VR->MAX),
1310 0 if VAL is not inside VR,
1311 -2 if we cannot tell either way.
1312
1313 FIXME, the current semantics of this functions are a bit quirky
1314 when taken in the context of VRP. In here we do not care
1315 about VR's type. If VR is the anti-range ~[3, 5] the call
1316 value_inside_range (4, VR) will return 1.
1317
1318 This is counter-intuitive in a strict sense, but the callers
1319 currently expect this. They are calling the function
1320 merely to determine whether VR->MIN <= VAL <= VR->MAX. The
1321 callers are applying the VR_RANGE/VR_ANTI_RANGE semantics
1322 themselves.
1323
1324 This also applies to value_ranges_intersect_p and
1325 range_includes_zero_p. The semantics of VR_RANGE and
1326 VR_ANTI_RANGE should be encoded here, but that also means
1327 adapting the users of these functions to the new semantics.
1328
1329 Benchmark compile/20001226-1.c compilation time after changing this
1330 function. */
1331
1332 static inline int
1333 value_inside_range (tree val, value_range_t * vr)
1334 {
1335 int cmp1, cmp2;
1336
1337 cmp1 = operand_less_p (val, vr->min);
1338 if (cmp1 == -2)
1339 return -2;
1340 if (cmp1 == 1)
1341 return 0;
1342
1343 cmp2 = operand_less_p (vr->max, val);
1344 if (cmp2 == -2)
1345 return -2;
1346
1347 return !cmp2;
1348 }
1349
1350
1351 /* Return true if value ranges VR0 and VR1 have a non-empty
1352 intersection.
1353
1354 Benchmark compile/20001226-1.c compilation time after changing this
1355 function.
1356 */
1357
1358 static inline bool
1359 value_ranges_intersect_p (value_range_t *vr0, value_range_t *vr1)
1360 {
1361 /* The value ranges do not intersect if the maximum of the first range is
1362 less than the minimum of the second range or vice versa.
1363 When those relations are unknown, we can't do any better. */
1364 if (operand_less_p (vr0->max, vr1->min) != 0)
1365 return false;
1366 if (operand_less_p (vr1->max, vr0->min) != 0)
1367 return false;
1368 return true;
1369 }
1370
1371
1372 /* Return true if VR includes the value zero, false otherwise. FIXME,
1373 currently this will return false for an anti-range like ~[-4, 3].
1374 This will be wrong when the semantics of value_inside_range are
1375 modified (currently the users of this function expect these
1376 semantics). */
1377
1378 static inline bool
1379 range_includes_zero_p (value_range_t *vr)
1380 {
1381 tree zero;
1382
1383 gcc_assert (vr->type != VR_UNDEFINED
1384 && vr->type != VR_VARYING
1385 && !symbolic_range_p (vr));
1386
1387 zero = build_int_cst (TREE_TYPE (vr->min), 0);
1388 return (value_inside_range (zero, vr) == 1);
1389 }
1390
1391 /* Return true if T, an SSA_NAME, is known to be nonnegative. Return
1392 false otherwise or if no value range information is available. */
1393
1394 bool
1395 ssa_name_nonnegative_p (const_tree t)
1396 {
1397 value_range_t *vr = get_value_range (t);
1398
1399 if (INTEGRAL_TYPE_P (t)
1400 && TYPE_UNSIGNED (t))
1401 return true;
1402
1403 if (!vr)
1404 return false;
1405
1406 /* Testing for VR_ANTI_RANGE is not useful here as any anti-range
1407 which would return a useful value should be encoded as a VR_RANGE. */
1408 if (vr->type == VR_RANGE)
1409 {
1410 int result = compare_values (vr->min, integer_zero_node);
1411
1412 return (result == 0 || result == 1);
1413 }
1414 return false;
1415 }
1416
1417 /* If OP has a value range with a single constant value return that,
1418 otherwise return NULL_TREE. This returns OP itself if OP is a
1419 constant. */
1420
1421 static tree
1422 op_with_constant_singleton_value_range (tree op)
1423 {
1424 value_range_t *vr;
1425
1426 if (is_gimple_min_invariant (op))
1427 return op;
1428
1429 if (TREE_CODE (op) != SSA_NAME)
1430 return NULL_TREE;
1431
1432 vr = get_value_range (op);
1433 if (vr->type == VR_RANGE
1434 && operand_equal_p (vr->min, vr->max, 0)
1435 && is_gimple_min_invariant (vr->min))
1436 return vr->min;
1437
1438 return NULL_TREE;
1439 }
1440
1441
1442 /* Extract value range information from an ASSERT_EXPR EXPR and store
1443 it in *VR_P. */
1444
1445 static void
1446 extract_range_from_assert (value_range_t *vr_p, tree expr)
1447 {
1448 tree var, cond, limit, min, max, type;
1449 value_range_t *var_vr, *limit_vr;
1450 enum tree_code cond_code;
1451
1452 var = ASSERT_EXPR_VAR (expr);
1453 cond = ASSERT_EXPR_COND (expr);
1454
1455 gcc_assert (COMPARISON_CLASS_P (cond));
1456
1457 /* Find VAR in the ASSERT_EXPR conditional. */
1458 if (var == TREE_OPERAND (cond, 0)
1459 || TREE_CODE (TREE_OPERAND (cond, 0)) == PLUS_EXPR
1460 || TREE_CODE (TREE_OPERAND (cond, 0)) == NOP_EXPR)
1461 {
1462 /* If the predicate is of the form VAR COMP LIMIT, then we just
1463 take LIMIT from the RHS and use the same comparison code. */
1464 cond_code = TREE_CODE (cond);
1465 limit = TREE_OPERAND (cond, 1);
1466 cond = TREE_OPERAND (cond, 0);
1467 }
1468 else
1469 {
1470 /* If the predicate is of the form LIMIT COMP VAR, then we need
1471 to flip around the comparison code to create the proper range
1472 for VAR. */
1473 cond_code = swap_tree_comparison (TREE_CODE (cond));
1474 limit = TREE_OPERAND (cond, 0);
1475 cond = TREE_OPERAND (cond, 1);
1476 }
1477
1478 limit = avoid_overflow_infinity (limit);
1479
1480 type = TREE_TYPE (limit);
1481 gcc_assert (limit != var);
1482
1483 /* For pointer arithmetic, we only keep track of pointer equality
1484 and inequality. */
1485 if (POINTER_TYPE_P (type) && cond_code != NE_EXPR && cond_code != EQ_EXPR)
1486 {
1487 set_value_range_to_varying (vr_p);
1488 return;
1489 }
1490
1491 /* If LIMIT is another SSA name and LIMIT has a range of its own,
1492 try to use LIMIT's range to avoid creating symbolic ranges
1493 unnecessarily. */
1494 limit_vr = (TREE_CODE (limit) == SSA_NAME) ? get_value_range (limit) : NULL;
1495
1496 /* LIMIT's range is only interesting if it has any useful information. */
1497 if (limit_vr
1498 && (limit_vr->type == VR_UNDEFINED
1499 || limit_vr->type == VR_VARYING
1500 || symbolic_range_p (limit_vr)))
1501 limit_vr = NULL;
1502
1503 /* Initially, the new range has the same set of equivalences of
1504 VAR's range. This will be revised before returning the final
1505 value. Since assertions may be chained via mutually exclusive
1506 predicates, we will need to trim the set of equivalences before
1507 we are done. */
1508 gcc_assert (vr_p->equiv == NULL);
1509 add_equivalence (&vr_p->equiv, var);
1510
1511 /* Extract a new range based on the asserted comparison for VAR and
1512 LIMIT's value range. Notice that if LIMIT has an anti-range, we
1513 will only use it for equality comparisons (EQ_EXPR). For any
1514 other kind of assertion, we cannot derive a range from LIMIT's
1515 anti-range that can be used to describe the new range. For
1516 instance, ASSERT_EXPR <x_2, x_2 <= b_4>. If b_4 is ~[2, 10],
1517 then b_4 takes on the ranges [-INF, 1] and [11, +INF]. There is
1518 no single range for x_2 that could describe LE_EXPR, so we might
1519 as well build the range [b_4, +INF] for it.
1520 One special case we handle is extracting a range from a
1521 range test encoded as (unsigned)var + CST <= limit. */
1522 if (TREE_CODE (cond) == NOP_EXPR
1523 || TREE_CODE (cond) == PLUS_EXPR)
1524 {
1525 if (TREE_CODE (cond) == PLUS_EXPR)
1526 {
1527 min = fold_build1 (NEGATE_EXPR, TREE_TYPE (TREE_OPERAND (cond, 1)),
1528 TREE_OPERAND (cond, 1));
1529 max = int_const_binop (PLUS_EXPR, limit, min);
1530 cond = TREE_OPERAND (cond, 0);
1531 }
1532 else
1533 {
1534 min = build_int_cst (TREE_TYPE (var), 0);
1535 max = limit;
1536 }
1537
1538 /* Make sure to not set TREE_OVERFLOW on the final type
1539 conversion. We are willingly interpreting large positive
1540 unsigned values as negative singed values here. */
1541 min = force_fit_type_double (TREE_TYPE (var), tree_to_double_int (min),
1542 0, false);
1543 max = force_fit_type_double (TREE_TYPE (var), tree_to_double_int (max),
1544 0, false);
1545
1546 /* We can transform a max, min range to an anti-range or
1547 vice-versa. Use set_and_canonicalize_value_range which does
1548 this for us. */
1549 if (cond_code == LE_EXPR)
1550 set_and_canonicalize_value_range (vr_p, VR_RANGE,
1551 min, max, vr_p->equiv);
1552 else if (cond_code == GT_EXPR)
1553 set_and_canonicalize_value_range (vr_p, VR_ANTI_RANGE,
1554 min, max, vr_p->equiv);
1555 else
1556 gcc_unreachable ();
1557 }
1558 else if (cond_code == EQ_EXPR)
1559 {
1560 enum value_range_type range_type;
1561
1562 if (limit_vr)
1563 {
1564 range_type = limit_vr->type;
1565 min = limit_vr->min;
1566 max = limit_vr->max;
1567 }
1568 else
1569 {
1570 range_type = VR_RANGE;
1571 min = limit;
1572 max = limit;
1573 }
1574
1575 set_value_range (vr_p, range_type, min, max, vr_p->equiv);
1576
1577 /* When asserting the equality VAR == LIMIT and LIMIT is another
1578 SSA name, the new range will also inherit the equivalence set
1579 from LIMIT. */
1580 if (TREE_CODE (limit) == SSA_NAME)
1581 add_equivalence (&vr_p->equiv, limit);
1582 }
1583 else if (cond_code == NE_EXPR)
1584 {
1585 /* As described above, when LIMIT's range is an anti-range and
1586 this assertion is an inequality (NE_EXPR), then we cannot
1587 derive anything from the anti-range. For instance, if
1588 LIMIT's range was ~[0, 0], the assertion 'VAR != LIMIT' does
1589 not imply that VAR's range is [0, 0]. So, in the case of
1590 anti-ranges, we just assert the inequality using LIMIT and
1591 not its anti-range.
1592
1593 If LIMIT_VR is a range, we can only use it to build a new
1594 anti-range if LIMIT_VR is a single-valued range. For
1595 instance, if LIMIT_VR is [0, 1], the predicate
1596 VAR != [0, 1] does not mean that VAR's range is ~[0, 1].
1597 Rather, it means that for value 0 VAR should be ~[0, 0]
1598 and for value 1, VAR should be ~[1, 1]. We cannot
1599 represent these ranges.
1600
1601 The only situation in which we can build a valid
1602 anti-range is when LIMIT_VR is a single-valued range
1603 (i.e., LIMIT_VR->MIN == LIMIT_VR->MAX). In that case,
1604 build the anti-range ~[LIMIT_VR->MIN, LIMIT_VR->MAX]. */
1605 if (limit_vr
1606 && limit_vr->type == VR_RANGE
1607 && compare_values (limit_vr->min, limit_vr->max) == 0)
1608 {
1609 min = limit_vr->min;
1610 max = limit_vr->max;
1611 }
1612 else
1613 {
1614 /* In any other case, we cannot use LIMIT's range to build a
1615 valid anti-range. */
1616 min = max = limit;
1617 }
1618
1619 /* If MIN and MAX cover the whole range for their type, then
1620 just use the original LIMIT. */
1621 if (INTEGRAL_TYPE_P (type)
1622 && vrp_val_is_min (min)
1623 && vrp_val_is_max (max))
1624 min = max = limit;
1625
1626 set_value_range (vr_p, VR_ANTI_RANGE, min, max, vr_p->equiv);
1627 }
1628 else if (cond_code == LE_EXPR || cond_code == LT_EXPR)
1629 {
1630 min = TYPE_MIN_VALUE (type);
1631
1632 if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE)
1633 max = limit;
1634 else
1635 {
1636 /* If LIMIT_VR is of the form [N1, N2], we need to build the
1637 range [MIN, N2] for LE_EXPR and [MIN, N2 - 1] for
1638 LT_EXPR. */
1639 max = limit_vr->max;
1640 }
1641
1642 /* If the maximum value forces us to be out of bounds, simply punt.
1643 It would be pointless to try and do anything more since this
1644 all should be optimized away above us. */
1645 if ((cond_code == LT_EXPR
1646 && compare_values (max, min) == 0)
1647 || (CONSTANT_CLASS_P (max) && TREE_OVERFLOW (max)))
1648 set_value_range_to_varying (vr_p);
1649 else
1650 {
1651 /* For LT_EXPR, we create the range [MIN, MAX - 1]. */
1652 if (cond_code == LT_EXPR)
1653 {
1654 tree one = build_int_cst (type, 1);
1655 max = fold_build2 (MINUS_EXPR, type, max, one);
1656 if (EXPR_P (max))
1657 TREE_NO_WARNING (max) = 1;
1658 }
1659
1660 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1661 }
1662 }
1663 else if (cond_code == GE_EXPR || cond_code == GT_EXPR)
1664 {
1665 max = TYPE_MAX_VALUE (type);
1666
1667 if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE)
1668 min = limit;
1669 else
1670 {
1671 /* If LIMIT_VR is of the form [N1, N2], we need to build the
1672 range [N1, MAX] for GE_EXPR and [N1 + 1, MAX] for
1673 GT_EXPR. */
1674 min = limit_vr->min;
1675 }
1676
1677 /* If the minimum value forces us to be out of bounds, simply punt.
1678 It would be pointless to try and do anything more since this
1679 all should be optimized away above us. */
1680 if ((cond_code == GT_EXPR
1681 && compare_values (min, max) == 0)
1682 || (CONSTANT_CLASS_P (min) && TREE_OVERFLOW (min)))
1683 set_value_range_to_varying (vr_p);
1684 else
1685 {
1686 /* For GT_EXPR, we create the range [MIN + 1, MAX]. */
1687 if (cond_code == GT_EXPR)
1688 {
1689 tree one = build_int_cst (type, 1);
1690 min = fold_build2 (PLUS_EXPR, type, min, one);
1691 if (EXPR_P (min))
1692 TREE_NO_WARNING (min) = 1;
1693 }
1694
1695 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1696 }
1697 }
1698 else
1699 gcc_unreachable ();
1700
1701 /* If VAR already had a known range, it may happen that the new
1702 range we have computed and VAR's range are not compatible. For
1703 instance,
1704
1705 if (p_5 == NULL)
1706 p_6 = ASSERT_EXPR <p_5, p_5 == NULL>;
1707 x_7 = p_6->fld;
1708 p_8 = ASSERT_EXPR <p_6, p_6 != NULL>;
1709
1710 While the above comes from a faulty program, it will cause an ICE
1711 later because p_8 and p_6 will have incompatible ranges and at
1712 the same time will be considered equivalent. A similar situation
1713 would arise from
1714
1715 if (i_5 > 10)
1716 i_6 = ASSERT_EXPR <i_5, i_5 > 10>;
1717 if (i_5 < 5)
1718 i_7 = ASSERT_EXPR <i_6, i_6 < 5>;
1719
1720 Again i_6 and i_7 will have incompatible ranges. It would be
1721 pointless to try and do anything with i_7's range because
1722 anything dominated by 'if (i_5 < 5)' will be optimized away.
1723 Note, due to the wa in which simulation proceeds, the statement
1724 i_7 = ASSERT_EXPR <...> we would never be visited because the
1725 conditional 'if (i_5 < 5)' always evaluates to false. However,
1726 this extra check does not hurt and may protect against future
1727 changes to VRP that may get into a situation similar to the
1728 NULL pointer dereference example.
1729
1730 Note that these compatibility tests are only needed when dealing
1731 with ranges or a mix of range and anti-range. If VAR_VR and VR_P
1732 are both anti-ranges, they will always be compatible, because two
1733 anti-ranges will always have a non-empty intersection. */
1734
1735 var_vr = get_value_range (var);
1736
1737 /* We may need to make adjustments when VR_P and VAR_VR are numeric
1738 ranges or anti-ranges. */
1739 if (vr_p->type == VR_VARYING
1740 || vr_p->type == VR_UNDEFINED
1741 || var_vr->type == VR_VARYING
1742 || var_vr->type == VR_UNDEFINED
1743 || symbolic_range_p (vr_p)
1744 || symbolic_range_p (var_vr))
1745 return;
1746
1747 if (var_vr->type == VR_RANGE && vr_p->type == VR_RANGE)
1748 {
1749 /* If the two ranges have a non-empty intersection, we can
1750 refine the resulting range. Since the assert expression
1751 creates an equivalency and at the same time it asserts a
1752 predicate, we can take the intersection of the two ranges to
1753 get better precision. */
1754 if (value_ranges_intersect_p (var_vr, vr_p))
1755 {
1756 /* Use the larger of the two minimums. */
1757 if (compare_values (vr_p->min, var_vr->min) == -1)
1758 min = var_vr->min;
1759 else
1760 min = vr_p->min;
1761
1762 /* Use the smaller of the two maximums. */
1763 if (compare_values (vr_p->max, var_vr->max) == 1)
1764 max = var_vr->max;
1765 else
1766 max = vr_p->max;
1767
1768 set_value_range (vr_p, vr_p->type, min, max, vr_p->equiv);
1769 }
1770 else
1771 {
1772 /* The two ranges do not intersect, set the new range to
1773 VARYING, because we will not be able to do anything
1774 meaningful with it. */
1775 set_value_range_to_varying (vr_p);
1776 }
1777 }
1778 else if ((var_vr->type == VR_RANGE && vr_p->type == VR_ANTI_RANGE)
1779 || (var_vr->type == VR_ANTI_RANGE && vr_p->type == VR_RANGE))
1780 {
1781 /* A range and an anti-range will cancel each other only if
1782 their ends are the same. For instance, in the example above,
1783 p_8's range ~[0, 0] and p_6's range [0, 0] are incompatible,
1784 so VR_P should be set to VR_VARYING. */
1785 if (compare_values (var_vr->min, vr_p->min) == 0
1786 && compare_values (var_vr->max, vr_p->max) == 0)
1787 set_value_range_to_varying (vr_p);
1788 else
1789 {
1790 tree min, max, anti_min, anti_max, real_min, real_max;
1791 int cmp;
1792
1793 /* We want to compute the logical AND of the two ranges;
1794 there are three cases to consider.
1795
1796
1797 1. The VR_ANTI_RANGE range is completely within the
1798 VR_RANGE and the endpoints of the ranges are
1799 different. In that case the resulting range
1800 should be whichever range is more precise.
1801 Typically that will be the VR_RANGE.
1802
1803 2. The VR_ANTI_RANGE is completely disjoint from
1804 the VR_RANGE. In this case the resulting range
1805 should be the VR_RANGE.
1806
1807 3. There is some overlap between the VR_ANTI_RANGE
1808 and the VR_RANGE.
1809
1810 3a. If the high limit of the VR_ANTI_RANGE resides
1811 within the VR_RANGE, then the result is a new
1812 VR_RANGE starting at the high limit of the
1813 VR_ANTI_RANGE + 1 and extending to the
1814 high limit of the original VR_RANGE.
1815
1816 3b. If the low limit of the VR_ANTI_RANGE resides
1817 within the VR_RANGE, then the result is a new
1818 VR_RANGE starting at the low limit of the original
1819 VR_RANGE and extending to the low limit of the
1820 VR_ANTI_RANGE - 1. */
1821 if (vr_p->type == VR_ANTI_RANGE)
1822 {
1823 anti_min = vr_p->min;
1824 anti_max = vr_p->max;
1825 real_min = var_vr->min;
1826 real_max = var_vr->max;
1827 }
1828 else
1829 {
1830 anti_min = var_vr->min;
1831 anti_max = var_vr->max;
1832 real_min = vr_p->min;
1833 real_max = vr_p->max;
1834 }
1835
1836
1837 /* Case 1, VR_ANTI_RANGE completely within VR_RANGE,
1838 not including any endpoints. */
1839 if (compare_values (anti_max, real_max) == -1
1840 && compare_values (anti_min, real_min) == 1)
1841 {
1842 /* If the range is covering the whole valid range of
1843 the type keep the anti-range. */
1844 if (!vrp_val_is_min (real_min)
1845 || !vrp_val_is_max (real_max))
1846 set_value_range (vr_p, VR_RANGE, real_min,
1847 real_max, vr_p->equiv);
1848 }
1849 /* Case 2, VR_ANTI_RANGE completely disjoint from
1850 VR_RANGE. */
1851 else if (compare_values (anti_min, real_max) == 1
1852 || compare_values (anti_max, real_min) == -1)
1853 {
1854 set_value_range (vr_p, VR_RANGE, real_min,
1855 real_max, vr_p->equiv);
1856 }
1857 /* Case 3a, the anti-range extends into the low
1858 part of the real range. Thus creating a new
1859 low for the real range. */
1860 else if (((cmp = compare_values (anti_max, real_min)) == 1
1861 || cmp == 0)
1862 && compare_values (anti_max, real_max) == -1)
1863 {
1864 gcc_assert (!is_positive_overflow_infinity (anti_max));
1865 if (needs_overflow_infinity (TREE_TYPE (anti_max))
1866 && vrp_val_is_max (anti_max))
1867 {
1868 if (!supports_overflow_infinity (TREE_TYPE (var_vr->min)))
1869 {
1870 set_value_range_to_varying (vr_p);
1871 return;
1872 }
1873 min = positive_overflow_infinity (TREE_TYPE (var_vr->min));
1874 }
1875 else if (!POINTER_TYPE_P (TREE_TYPE (var_vr->min)))
1876 min = fold_build2 (PLUS_EXPR, TREE_TYPE (var_vr->min),
1877 anti_max,
1878 build_int_cst (TREE_TYPE (var_vr->min), 1));
1879 else
1880 min = fold_build_pointer_plus_hwi (anti_max, 1);
1881 max = real_max;
1882 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1883 }
1884 /* Case 3b, the anti-range extends into the high
1885 part of the real range. Thus creating a new
1886 higher for the real range. */
1887 else if (compare_values (anti_min, real_min) == 1
1888 && ((cmp = compare_values (anti_min, real_max)) == -1
1889 || cmp == 0))
1890 {
1891 gcc_assert (!is_negative_overflow_infinity (anti_min));
1892 if (needs_overflow_infinity (TREE_TYPE (anti_min))
1893 && vrp_val_is_min (anti_min))
1894 {
1895 if (!supports_overflow_infinity (TREE_TYPE (var_vr->min)))
1896 {
1897 set_value_range_to_varying (vr_p);
1898 return;
1899 }
1900 max = negative_overflow_infinity (TREE_TYPE (var_vr->min));
1901 }
1902 else if (!POINTER_TYPE_P (TREE_TYPE (var_vr->min)))
1903 max = fold_build2 (MINUS_EXPR, TREE_TYPE (var_vr->min),
1904 anti_min,
1905 build_int_cst (TREE_TYPE (var_vr->min), 1));
1906 else
1907 max = fold_build_pointer_plus_hwi (anti_min, -1);
1908 min = real_min;
1909 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1910 }
1911 }
1912 }
1913 }
1914
1915
1916 /* Extract range information from SSA name VAR and store it in VR. If
1917 VAR has an interesting range, use it. Otherwise, create the
1918 range [VAR, VAR] and return it. This is useful in situations where
1919 we may have conditionals testing values of VARYING names. For
1920 instance,
1921
1922 x_3 = y_5;
1923 if (x_3 > y_5)
1924 ...
1925
1926 Even if y_5 is deemed VARYING, we can determine that x_3 > y_5 is
1927 always false. */
1928
1929 static void
1930 extract_range_from_ssa_name (value_range_t *vr, tree var)
1931 {
1932 value_range_t *var_vr = get_value_range (var);
1933
1934 if (var_vr->type != VR_UNDEFINED && var_vr->type != VR_VARYING)
1935 copy_value_range (vr, var_vr);
1936 else
1937 set_value_range (vr, VR_RANGE, var, var, NULL);
1938
1939 add_equivalence (&vr->equiv, var);
1940 }
1941
1942
1943 /* Wrapper around int_const_binop. If the operation overflows and we
1944 are not using wrapping arithmetic, then adjust the result to be
1945 -INF or +INF depending on CODE, VAL1 and VAL2. This can return
1946 NULL_TREE if we need to use an overflow infinity representation but
1947 the type does not support it. */
1948
1949 static tree
1950 vrp_int_const_binop (enum tree_code code, tree val1, tree val2)
1951 {
1952 tree res;
1953
1954 res = int_const_binop (code, val1, val2);
1955
1956 /* If we are using unsigned arithmetic, operate symbolically
1957 on -INF and +INF as int_const_binop only handles signed overflow. */
1958 if (TYPE_UNSIGNED (TREE_TYPE (val1)))
1959 {
1960 int checkz = compare_values (res, val1);
1961 bool overflow = false;
1962
1963 /* Ensure that res = val1 [+*] val2 >= val1
1964 or that res = val1 - val2 <= val1. */
1965 if ((code == PLUS_EXPR
1966 && !(checkz == 1 || checkz == 0))
1967 || (code == MINUS_EXPR
1968 && !(checkz == 0 || checkz == -1)))
1969 {
1970 overflow = true;
1971 }
1972 /* Checking for multiplication overflow is done by dividing the
1973 output of the multiplication by the first input of the
1974 multiplication. If the result of that division operation is
1975 not equal to the second input of the multiplication, then the
1976 multiplication overflowed. */
1977 else if (code == MULT_EXPR && !integer_zerop (val1))
1978 {
1979 tree tmp = int_const_binop (TRUNC_DIV_EXPR,
1980 res,
1981 val1);
1982 int check = compare_values (tmp, val2);
1983
1984 if (check != 0)
1985 overflow = true;
1986 }
1987
1988 if (overflow)
1989 {
1990 res = copy_node (res);
1991 TREE_OVERFLOW (res) = 1;
1992 }
1993
1994 }
1995 else if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (val1)))
1996 /* If the singed operation wraps then int_const_binop has done
1997 everything we want. */
1998 ;
1999 else if ((TREE_OVERFLOW (res)
2000 && !TREE_OVERFLOW (val1)
2001 && !TREE_OVERFLOW (val2))
2002 || is_overflow_infinity (val1)
2003 || is_overflow_infinity (val2))
2004 {
2005 /* If the operation overflowed but neither VAL1 nor VAL2 are
2006 overflown, return -INF or +INF depending on the operation
2007 and the combination of signs of the operands. */
2008 int sgn1 = tree_int_cst_sgn (val1);
2009 int sgn2 = tree_int_cst_sgn (val2);
2010
2011 if (needs_overflow_infinity (TREE_TYPE (res))
2012 && !supports_overflow_infinity (TREE_TYPE (res)))
2013 return NULL_TREE;
2014
2015 /* We have to punt on adding infinities of different signs,
2016 since we can't tell what the sign of the result should be.
2017 Likewise for subtracting infinities of the same sign. */
2018 if (((code == PLUS_EXPR && sgn1 != sgn2)
2019 || (code == MINUS_EXPR && sgn1 == sgn2))
2020 && is_overflow_infinity (val1)
2021 && is_overflow_infinity (val2))
2022 return NULL_TREE;
2023
2024 /* Don't try to handle division or shifting of infinities. */
2025 if ((code == TRUNC_DIV_EXPR
2026 || code == FLOOR_DIV_EXPR
2027 || code == CEIL_DIV_EXPR
2028 || code == EXACT_DIV_EXPR
2029 || code == ROUND_DIV_EXPR
2030 || code == RSHIFT_EXPR)
2031 && (is_overflow_infinity (val1)
2032 || is_overflow_infinity (val2)))
2033 return NULL_TREE;
2034
2035 /* Notice that we only need to handle the restricted set of
2036 operations handled by extract_range_from_binary_expr.
2037 Among them, only multiplication, addition and subtraction
2038 can yield overflow without overflown operands because we
2039 are working with integral types only... except in the
2040 case VAL1 = -INF and VAL2 = -1 which overflows to +INF
2041 for division too. */
2042
2043 /* For multiplication, the sign of the overflow is given
2044 by the comparison of the signs of the operands. */
2045 if ((code == MULT_EXPR && sgn1 == sgn2)
2046 /* For addition, the operands must be of the same sign
2047 to yield an overflow. Its sign is therefore that
2048 of one of the operands, for example the first. For
2049 infinite operands X + -INF is negative, not positive. */
2050 || (code == PLUS_EXPR
2051 && (sgn1 >= 0
2052 ? !is_negative_overflow_infinity (val2)
2053 : is_positive_overflow_infinity (val2)))
2054 /* For subtraction, non-infinite operands must be of
2055 different signs to yield an overflow. Its sign is
2056 therefore that of the first operand or the opposite of
2057 that of the second operand. A first operand of 0 counts
2058 as positive here, for the corner case 0 - (-INF), which
2059 overflows, but must yield +INF. For infinite operands 0
2060 - INF is negative, not positive. */
2061 || (code == MINUS_EXPR
2062 && (sgn1 >= 0
2063 ? !is_positive_overflow_infinity (val2)
2064 : is_negative_overflow_infinity (val2)))
2065 /* We only get in here with positive shift count, so the
2066 overflow direction is the same as the sign of val1.
2067 Actually rshift does not overflow at all, but we only
2068 handle the case of shifting overflowed -INF and +INF. */
2069 || (code == RSHIFT_EXPR
2070 && sgn1 >= 0)
2071 /* For division, the only case is -INF / -1 = +INF. */
2072 || code == TRUNC_DIV_EXPR
2073 || code == FLOOR_DIV_EXPR
2074 || code == CEIL_DIV_EXPR
2075 || code == EXACT_DIV_EXPR
2076 || code == ROUND_DIV_EXPR)
2077 return (needs_overflow_infinity (TREE_TYPE (res))
2078 ? positive_overflow_infinity (TREE_TYPE (res))
2079 : TYPE_MAX_VALUE (TREE_TYPE (res)));
2080 else
2081 return (needs_overflow_infinity (TREE_TYPE (res))
2082 ? negative_overflow_infinity (TREE_TYPE (res))
2083 : TYPE_MIN_VALUE (TREE_TYPE (res)));
2084 }
2085
2086 return res;
2087 }
2088
2089
2090 /* For range VR compute two double_int bitmasks. In *MAY_BE_NONZERO
2091 bitmask if some bit is unset, it means for all numbers in the range
2092 the bit is 0, otherwise it might be 0 or 1. In *MUST_BE_NONZERO
2093 bitmask if some bit is set, it means for all numbers in the range
2094 the bit is 1, otherwise it might be 0 or 1. */
2095
2096 static bool
2097 zero_nonzero_bits_from_vr (value_range_t *vr, double_int *may_be_nonzero,
2098 double_int *must_be_nonzero)
2099 {
2100 if (range_int_cst_p (vr))
2101 {
2102 if (range_int_cst_singleton_p (vr))
2103 {
2104 *may_be_nonzero = tree_to_double_int (vr->min);
2105 *must_be_nonzero = *may_be_nonzero;
2106 return true;
2107 }
2108 if (tree_int_cst_sgn (vr->min) >= 0)
2109 {
2110 double_int dmin = tree_to_double_int (vr->min);
2111 double_int dmax = tree_to_double_int (vr->max);
2112 double_int xor_mask = double_int_xor (dmin, dmax);
2113 *may_be_nonzero = double_int_ior (dmin, dmax);
2114 *must_be_nonzero = double_int_and (dmin, dmax);
2115 if (xor_mask.high != 0)
2116 {
2117 unsigned HOST_WIDE_INT mask
2118 = ((unsigned HOST_WIDE_INT) 1
2119 << floor_log2 (xor_mask.high)) - 1;
2120 may_be_nonzero->low = ALL_ONES;
2121 may_be_nonzero->high |= mask;
2122 must_be_nonzero->low = 0;
2123 must_be_nonzero->high &= ~mask;
2124 }
2125 else if (xor_mask.low != 0)
2126 {
2127 unsigned HOST_WIDE_INT mask
2128 = ((unsigned HOST_WIDE_INT) 1
2129 << floor_log2 (xor_mask.low)) - 1;
2130 may_be_nonzero->low |= mask;
2131 must_be_nonzero->low &= ~mask;
2132 }
2133 return true;
2134 }
2135 }
2136 may_be_nonzero->low = ALL_ONES;
2137 may_be_nonzero->high = ALL_ONES;
2138 must_be_nonzero->low = 0;
2139 must_be_nonzero->high = 0;
2140 return false;
2141 }
2142
2143
2144 /* Extract range information from a binary expression EXPR based on
2145 the ranges of each of its operands and the expression code. */
2146
2147 static void
2148 extract_range_from_binary_expr (value_range_t *vr,
2149 enum tree_code code,
2150 tree expr_type, tree op0, tree op1)
2151 {
2152 enum value_range_type type;
2153 tree min, max;
2154 int cmp;
2155 value_range_t vr0 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
2156 value_range_t vr1 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
2157
2158 /* Not all binary expressions can be applied to ranges in a
2159 meaningful way. Handle only arithmetic operations. */
2160 if (code != PLUS_EXPR
2161 && code != MINUS_EXPR
2162 && code != POINTER_PLUS_EXPR
2163 && code != MULT_EXPR
2164 && code != TRUNC_DIV_EXPR
2165 && code != FLOOR_DIV_EXPR
2166 && code != CEIL_DIV_EXPR
2167 && code != EXACT_DIV_EXPR
2168 && code != ROUND_DIV_EXPR
2169 && code != TRUNC_MOD_EXPR
2170 && code != RSHIFT_EXPR
2171 && code != MIN_EXPR
2172 && code != MAX_EXPR
2173 && code != BIT_AND_EXPR
2174 && code != BIT_IOR_EXPR
2175 && code != TRUTH_AND_EXPR
2176 && code != TRUTH_OR_EXPR)
2177 {
2178 /* We can still do constant propagation here. */
2179 tree const_op0 = op_with_constant_singleton_value_range (op0);
2180 tree const_op1 = op_with_constant_singleton_value_range (op1);
2181 if (const_op0 || const_op1)
2182 {
2183 tree tem = fold_binary (code, expr_type,
2184 const_op0 ? const_op0 : op0,
2185 const_op1 ? const_op1 : op1);
2186 if (tem
2187 && is_gimple_min_invariant (tem)
2188 && !is_overflow_infinity (tem))
2189 {
2190 set_value_range (vr, VR_RANGE, tem, tem, NULL);
2191 return;
2192 }
2193 }
2194 set_value_range_to_varying (vr);
2195 return;
2196 }
2197
2198 /* Get value ranges for each operand. For constant operands, create
2199 a new value range with the operand to simplify processing. */
2200 if (TREE_CODE (op0) == SSA_NAME)
2201 vr0 = *(get_value_range (op0));
2202 else if (is_gimple_min_invariant (op0))
2203 set_value_range_to_value (&vr0, op0, NULL);
2204 else
2205 set_value_range_to_varying (&vr0);
2206
2207 if (TREE_CODE (op1) == SSA_NAME)
2208 vr1 = *(get_value_range (op1));
2209 else if (is_gimple_min_invariant (op1))
2210 set_value_range_to_value (&vr1, op1, NULL);
2211 else
2212 set_value_range_to_varying (&vr1);
2213
2214 /* If either range is UNDEFINED, so is the result. */
2215 if (vr0.type == VR_UNDEFINED || vr1.type == VR_UNDEFINED)
2216 {
2217 set_value_range_to_undefined (vr);
2218 return;
2219 }
2220
2221 /* The type of the resulting value range defaults to VR0.TYPE. */
2222 type = vr0.type;
2223
2224 /* Refuse to operate on VARYING ranges, ranges of different kinds
2225 and symbolic ranges. As an exception, we allow BIT_AND_EXPR
2226 because we may be able to derive a useful range even if one of
2227 the operands is VR_VARYING or symbolic range. Similarly for
2228 divisions. TODO, we may be able to derive anti-ranges in
2229 some cases. */
2230 if (code != BIT_AND_EXPR
2231 && code != TRUTH_AND_EXPR
2232 && code != TRUTH_OR_EXPR
2233 && code != TRUNC_DIV_EXPR
2234 && code != FLOOR_DIV_EXPR
2235 && code != CEIL_DIV_EXPR
2236 && code != EXACT_DIV_EXPR
2237 && code != ROUND_DIV_EXPR
2238 && code != TRUNC_MOD_EXPR
2239 && (vr0.type == VR_VARYING
2240 || vr1.type == VR_VARYING
2241 || vr0.type != vr1.type
2242 || symbolic_range_p (&vr0)
2243 || symbolic_range_p (&vr1)))
2244 {
2245 set_value_range_to_varying (vr);
2246 return;
2247 }
2248
2249 /* Now evaluate the expression to determine the new range. */
2250 if (POINTER_TYPE_P (expr_type)
2251 || POINTER_TYPE_P (TREE_TYPE (op0))
2252 || POINTER_TYPE_P (TREE_TYPE (op1)))
2253 {
2254 if (code == MIN_EXPR || code == MAX_EXPR)
2255 {
2256 /* For MIN/MAX expressions with pointers, we only care about
2257 nullness, if both are non null, then the result is nonnull.
2258 If both are null, then the result is null. Otherwise they
2259 are varying. */
2260 if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1))
2261 set_value_range_to_nonnull (vr, expr_type);
2262 else if (range_is_null (&vr0) && range_is_null (&vr1))
2263 set_value_range_to_null (vr, expr_type);
2264 else
2265 set_value_range_to_varying (vr);
2266
2267 return;
2268 }
2269 if (code == POINTER_PLUS_EXPR)
2270 {
2271 /* For pointer types, we are really only interested in asserting
2272 whether the expression evaluates to non-NULL. */
2273 if (range_is_nonnull (&vr0) || range_is_nonnull (&vr1))
2274 set_value_range_to_nonnull (vr, expr_type);
2275 else if (range_is_null (&vr0) && range_is_null (&vr1))
2276 set_value_range_to_null (vr, expr_type);
2277 else
2278 set_value_range_to_varying (vr);
2279 }
2280 else if (code == BIT_AND_EXPR)
2281 {
2282 /* For pointer types, we are really only interested in asserting
2283 whether the expression evaluates to non-NULL. */
2284 if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1))
2285 set_value_range_to_nonnull (vr, expr_type);
2286 else if (range_is_null (&vr0) || range_is_null (&vr1))
2287 set_value_range_to_null (vr, expr_type);
2288 else
2289 set_value_range_to_varying (vr);
2290 }
2291 else
2292 gcc_unreachable ();
2293
2294 return;
2295 }
2296
2297 /* For integer ranges, apply the operation to each end of the
2298 range and see what we end up with. */
2299 if (code == TRUTH_AND_EXPR
2300 || code == TRUTH_OR_EXPR)
2301 {
2302 /* If one of the operands is zero, we know that the whole
2303 expression evaluates zero. */
2304 if (code == TRUTH_AND_EXPR
2305 && ((vr0.type == VR_RANGE
2306 && integer_zerop (vr0.min)
2307 && integer_zerop (vr0.max))
2308 || (vr1.type == VR_RANGE
2309 && integer_zerop (vr1.min)
2310 && integer_zerop (vr1.max))))
2311 {
2312 type = VR_RANGE;
2313 min = max = build_int_cst (expr_type, 0);
2314 }
2315 /* If one of the operands is one, we know that the whole
2316 expression evaluates one. */
2317 else if (code == TRUTH_OR_EXPR
2318 && ((vr0.type == VR_RANGE
2319 && integer_onep (vr0.min)
2320 && integer_onep (vr0.max))
2321 || (vr1.type == VR_RANGE
2322 && integer_onep (vr1.min)
2323 && integer_onep (vr1.max))))
2324 {
2325 type = VR_RANGE;
2326 min = max = build_int_cst (expr_type, 1);
2327 }
2328 else if (vr0.type != VR_VARYING
2329 && vr1.type != VR_VARYING
2330 && vr0.type == vr1.type
2331 && !symbolic_range_p (&vr0)
2332 && !overflow_infinity_range_p (&vr0)
2333 && !symbolic_range_p (&vr1)
2334 && !overflow_infinity_range_p (&vr1))
2335 {
2336 /* Boolean expressions cannot be folded with int_const_binop. */
2337 min = fold_binary (code, expr_type, vr0.min, vr1.min);
2338 max = fold_binary (code, expr_type, vr0.max, vr1.max);
2339 }
2340 else
2341 {
2342 /* The result of a TRUTH_*_EXPR is always true or false. */
2343 set_value_range_to_truthvalue (vr, expr_type);
2344 return;
2345 }
2346 }
2347 else if (code == PLUS_EXPR
2348 || code == MIN_EXPR
2349 || code == MAX_EXPR)
2350 {
2351 /* If we have a PLUS_EXPR with two VR_ANTI_RANGEs, drop to
2352 VR_VARYING. It would take more effort to compute a precise
2353 range for such a case. For example, if we have op0 == 1 and
2354 op1 == -1 with their ranges both being ~[0,0], we would have
2355 op0 + op1 == 0, so we cannot claim that the sum is in ~[0,0].
2356 Note that we are guaranteed to have vr0.type == vr1.type at
2357 this point. */
2358 if (vr0.type == VR_ANTI_RANGE)
2359 {
2360 if (code == PLUS_EXPR)
2361 {
2362 set_value_range_to_varying (vr);
2363 return;
2364 }
2365 /* For MIN_EXPR and MAX_EXPR with two VR_ANTI_RANGEs,
2366 the resulting VR_ANTI_RANGE is the same - intersection
2367 of the two ranges. */
2368 min = vrp_int_const_binop (MAX_EXPR, vr0.min, vr1.min);
2369 max = vrp_int_const_binop (MIN_EXPR, vr0.max, vr1.max);
2370 }
2371 else
2372 {
2373 /* For operations that make the resulting range directly
2374 proportional to the original ranges, apply the operation to
2375 the same end of each range. */
2376 min = vrp_int_const_binop (code, vr0.min, vr1.min);
2377 max = vrp_int_const_binop (code, vr0.max, vr1.max);
2378 }
2379
2380 /* If both additions overflowed the range kind is still correct.
2381 This happens regularly with subtracting something in unsigned
2382 arithmetic.
2383 ??? See PR30318 for all the cases we do not handle. */
2384 if (code == PLUS_EXPR
2385 && (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
2386 && (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
2387 {
2388 min = build_int_cst_wide (TREE_TYPE (min),
2389 TREE_INT_CST_LOW (min),
2390 TREE_INT_CST_HIGH (min));
2391 max = build_int_cst_wide (TREE_TYPE (max),
2392 TREE_INT_CST_LOW (max),
2393 TREE_INT_CST_HIGH (max));
2394 }
2395 }
2396 else if (code == MULT_EXPR
2397 || code == TRUNC_DIV_EXPR
2398 || code == FLOOR_DIV_EXPR
2399 || code == CEIL_DIV_EXPR
2400 || code == EXACT_DIV_EXPR
2401 || code == ROUND_DIV_EXPR
2402 || code == RSHIFT_EXPR)
2403 {
2404 tree val[4];
2405 size_t i;
2406 bool sop;
2407
2408 /* If we have an unsigned MULT_EXPR with two VR_ANTI_RANGEs,
2409 drop to VR_VARYING. It would take more effort to compute a
2410 precise range for such a case. For example, if we have
2411 op0 == 65536 and op1 == 65536 with their ranges both being
2412 ~[0,0] on a 32-bit machine, we would have op0 * op1 == 0, so
2413 we cannot claim that the product is in ~[0,0]. Note that we
2414 are guaranteed to have vr0.type == vr1.type at this
2415 point. */
2416 if (code == MULT_EXPR
2417 && vr0.type == VR_ANTI_RANGE
2418 && !TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (op0)))
2419 {
2420 set_value_range_to_varying (vr);
2421 return;
2422 }
2423
2424 /* If we have a RSHIFT_EXPR with any shift values outside [0..prec-1],
2425 then drop to VR_VARYING. Outside of this range we get undefined
2426 behavior from the shift operation. We cannot even trust
2427 SHIFT_COUNT_TRUNCATED at this stage, because that applies to rtl
2428 shifts, and the operation at the tree level may be widened. */
2429 if (code == RSHIFT_EXPR)
2430 {
2431 if (vr1.type == VR_ANTI_RANGE
2432 || !vrp_expr_computes_nonnegative (op1, &sop)
2433 || (operand_less_p
2434 (build_int_cst (TREE_TYPE (vr1.max),
2435 TYPE_PRECISION (expr_type) - 1),
2436 vr1.max) != 0))
2437 {
2438 set_value_range_to_varying (vr);
2439 return;
2440 }
2441 }
2442
2443 else if ((code == TRUNC_DIV_EXPR
2444 || code == FLOOR_DIV_EXPR
2445 || code == CEIL_DIV_EXPR
2446 || code == EXACT_DIV_EXPR
2447 || code == ROUND_DIV_EXPR)
2448 && (vr0.type != VR_RANGE || symbolic_range_p (&vr0)))
2449 {
2450 /* For division, if op1 has VR_RANGE but op0 does not, something
2451 can be deduced just from that range. Say [min, max] / [4, max]
2452 gives [min / 4, max / 4] range. */
2453 if (vr1.type == VR_RANGE
2454 && !symbolic_range_p (&vr1)
2455 && !range_includes_zero_p (&vr1))
2456 {
2457 vr0.type = type = VR_RANGE;
2458 vr0.min = vrp_val_min (TREE_TYPE (op0));
2459 vr0.max = vrp_val_max (TREE_TYPE (op1));
2460 }
2461 else
2462 {
2463 set_value_range_to_varying (vr);
2464 return;
2465 }
2466 }
2467
2468 /* For divisions, if flag_non_call_exceptions is true, we must
2469 not eliminate a division by zero. */
2470 if ((code == TRUNC_DIV_EXPR
2471 || code == FLOOR_DIV_EXPR
2472 || code == CEIL_DIV_EXPR
2473 || code == EXACT_DIV_EXPR
2474 || code == ROUND_DIV_EXPR)
2475 && cfun->can_throw_non_call_exceptions
2476 && (vr1.type != VR_RANGE
2477 || symbolic_range_p (&vr1)
2478 || range_includes_zero_p (&vr1)))
2479 {
2480 set_value_range_to_varying (vr);
2481 return;
2482 }
2483
2484 /* For divisions, if op0 is VR_RANGE, we can deduce a range
2485 even if op1 is VR_VARYING, VR_ANTI_RANGE, symbolic or can
2486 include 0. */
2487 if ((code == TRUNC_DIV_EXPR
2488 || code == FLOOR_DIV_EXPR
2489 || code == CEIL_DIV_EXPR
2490 || code == EXACT_DIV_EXPR
2491 || code == ROUND_DIV_EXPR)
2492 && vr0.type == VR_RANGE
2493 && (vr1.type != VR_RANGE
2494 || symbolic_range_p (&vr1)
2495 || range_includes_zero_p (&vr1)))
2496 {
2497 tree zero = build_int_cst (TREE_TYPE (vr0.min), 0);
2498 int cmp;
2499
2500 sop = false;
2501 min = NULL_TREE;
2502 max = NULL_TREE;
2503 if (vrp_expr_computes_nonnegative (op1, &sop) && !sop)
2504 {
2505 /* For unsigned division or when divisor is known
2506 to be non-negative, the range has to cover
2507 all numbers from 0 to max for positive max
2508 and all numbers from min to 0 for negative min. */
2509 cmp = compare_values (vr0.max, zero);
2510 if (cmp == -1)
2511 max = zero;
2512 else if (cmp == 0 || cmp == 1)
2513 max = vr0.max;
2514 else
2515 type = VR_VARYING;
2516 cmp = compare_values (vr0.min, zero);
2517 if (cmp == 1)
2518 min = zero;
2519 else if (cmp == 0 || cmp == -1)
2520 min = vr0.min;
2521 else
2522 type = VR_VARYING;
2523 }
2524 else
2525 {
2526 /* Otherwise the range is -max .. max or min .. -min
2527 depending on which bound is bigger in absolute value,
2528 as the division can change the sign. */
2529 abs_extent_range (vr, vr0.min, vr0.max);
2530 return;
2531 }
2532 if (type == VR_VARYING)
2533 {
2534 set_value_range_to_varying (vr);
2535 return;
2536 }
2537 }
2538
2539 /* Multiplications and divisions are a bit tricky to handle,
2540 depending on the mix of signs we have in the two ranges, we
2541 need to operate on different values to get the minimum and
2542 maximum values for the new range. One approach is to figure
2543 out all the variations of range combinations and do the
2544 operations.
2545
2546 However, this involves several calls to compare_values and it
2547 is pretty convoluted. It's simpler to do the 4 operations
2548 (MIN0 OP MIN1, MIN0 OP MAX1, MAX0 OP MIN1 and MAX0 OP MAX0 OP
2549 MAX1) and then figure the smallest and largest values to form
2550 the new range. */
2551 else
2552 {
2553 gcc_assert ((vr0.type == VR_RANGE
2554 || (code == MULT_EXPR && vr0.type == VR_ANTI_RANGE))
2555 && vr0.type == vr1.type);
2556
2557 /* Compute the 4 cross operations. */
2558 sop = false;
2559 val[0] = vrp_int_const_binop (code, vr0.min, vr1.min);
2560 if (val[0] == NULL_TREE)
2561 sop = true;
2562
2563 if (vr1.max == vr1.min)
2564 val[1] = NULL_TREE;
2565 else
2566 {
2567 val[1] = vrp_int_const_binop (code, vr0.min, vr1.max);
2568 if (val[1] == NULL_TREE)
2569 sop = true;
2570 }
2571
2572 if (vr0.max == vr0.min)
2573 val[2] = NULL_TREE;
2574 else
2575 {
2576 val[2] = vrp_int_const_binop (code, vr0.max, vr1.min);
2577 if (val[2] == NULL_TREE)
2578 sop = true;
2579 }
2580
2581 if (vr0.min == vr0.max || vr1.min == vr1.max)
2582 val[3] = NULL_TREE;
2583 else
2584 {
2585 val[3] = vrp_int_const_binop (code, vr0.max, vr1.max);
2586 if (val[3] == NULL_TREE)
2587 sop = true;
2588 }
2589
2590 if (sop)
2591 {
2592 set_value_range_to_varying (vr);
2593 return;
2594 }
2595
2596 /* Set MIN to the minimum of VAL[i] and MAX to the maximum
2597 of VAL[i]. */
2598 min = val[0];
2599 max = val[0];
2600 for (i = 1; i < 4; i++)
2601 {
2602 if (!is_gimple_min_invariant (min)
2603 || (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
2604 || !is_gimple_min_invariant (max)
2605 || (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
2606 break;
2607
2608 if (val[i])
2609 {
2610 if (!is_gimple_min_invariant (val[i])
2611 || (TREE_OVERFLOW (val[i])
2612 && !is_overflow_infinity (val[i])))
2613 {
2614 /* If we found an overflowed value, set MIN and MAX
2615 to it so that we set the resulting range to
2616 VARYING. */
2617 min = max = val[i];
2618 break;
2619 }
2620
2621 if (compare_values (val[i], min) == -1)
2622 min = val[i];
2623
2624 if (compare_values (val[i], max) == 1)
2625 max = val[i];
2626 }
2627 }
2628 }
2629 }
2630 else if (code == TRUNC_MOD_EXPR)
2631 {
2632 bool sop = false;
2633 if (vr1.type != VR_RANGE
2634 || symbolic_range_p (&vr1)
2635 || range_includes_zero_p (&vr1)
2636 || vrp_val_is_min (vr1.min))
2637 {
2638 set_value_range_to_varying (vr);
2639 return;
2640 }
2641 type = VR_RANGE;
2642 /* Compute MAX <|vr1.min|, |vr1.max|> - 1. */
2643 max = fold_unary_to_constant (ABS_EXPR, TREE_TYPE (vr1.min), vr1.min);
2644 if (tree_int_cst_lt (max, vr1.max))
2645 max = vr1.max;
2646 max = int_const_binop (MINUS_EXPR, max, integer_one_node);
2647 /* If the dividend is non-negative the modulus will be
2648 non-negative as well. */
2649 if (TYPE_UNSIGNED (TREE_TYPE (max))
2650 || (vrp_expr_computes_nonnegative (op0, &sop) && !sop))
2651 min = build_int_cst (TREE_TYPE (max), 0);
2652 else
2653 min = fold_unary_to_constant (NEGATE_EXPR, TREE_TYPE (max), max);
2654 }
2655 else if (code == MINUS_EXPR)
2656 {
2657 /* If we have a MINUS_EXPR with two VR_ANTI_RANGEs, drop to
2658 VR_VARYING. It would take more effort to compute a precise
2659 range for such a case. For example, if we have op0 == 1 and
2660 op1 == 1 with their ranges both being ~[0,0], we would have
2661 op0 - op1 == 0, so we cannot claim that the difference is in
2662 ~[0,0]. Note that we are guaranteed to have
2663 vr0.type == vr1.type at this point. */
2664 if (vr0.type == VR_ANTI_RANGE)
2665 {
2666 set_value_range_to_varying (vr);
2667 return;
2668 }
2669
2670 /* For MINUS_EXPR, apply the operation to the opposite ends of
2671 each range. */
2672 min = vrp_int_const_binop (code, vr0.min, vr1.max);
2673 max = vrp_int_const_binop (code, vr0.max, vr1.min);
2674 }
2675 else if (code == BIT_AND_EXPR || code == BIT_IOR_EXPR)
2676 {
2677 bool vr0_int_cst_singleton_p, vr1_int_cst_singleton_p;
2678 bool int_cst_range0, int_cst_range1;
2679 double_int may_be_nonzero0, may_be_nonzero1;
2680 double_int must_be_nonzero0, must_be_nonzero1;
2681
2682 vr0_int_cst_singleton_p = range_int_cst_singleton_p (&vr0);
2683 vr1_int_cst_singleton_p = range_int_cst_singleton_p (&vr1);
2684 int_cst_range0 = zero_nonzero_bits_from_vr (&vr0, &may_be_nonzero0,
2685 &must_be_nonzero0);
2686 int_cst_range1 = zero_nonzero_bits_from_vr (&vr1, &may_be_nonzero1,
2687 &must_be_nonzero1);
2688
2689 type = VR_RANGE;
2690 if (vr0_int_cst_singleton_p && vr1_int_cst_singleton_p)
2691 min = max = int_const_binop (code, vr0.max, vr1.max);
2692 else if (!int_cst_range0 && !int_cst_range1)
2693 {
2694 set_value_range_to_varying (vr);
2695 return;
2696 }
2697 else if (code == BIT_AND_EXPR)
2698 {
2699 min = double_int_to_tree (expr_type,
2700 double_int_and (must_be_nonzero0,
2701 must_be_nonzero1));
2702 max = double_int_to_tree (expr_type,
2703 double_int_and (may_be_nonzero0,
2704 may_be_nonzero1));
2705 if (TREE_OVERFLOW (min) || tree_int_cst_sgn (min) < 0)
2706 min = NULL_TREE;
2707 if (TREE_OVERFLOW (max) || tree_int_cst_sgn (max) < 0)
2708 max = NULL_TREE;
2709 if (int_cst_range0 && tree_int_cst_sgn (vr0.min) >= 0)
2710 {
2711 if (min == NULL_TREE)
2712 min = build_int_cst (expr_type, 0);
2713 if (max == NULL_TREE || tree_int_cst_lt (vr0.max, max))
2714 max = vr0.max;
2715 }
2716 if (int_cst_range1 && tree_int_cst_sgn (vr1.min) >= 0)
2717 {
2718 if (min == NULL_TREE)
2719 min = build_int_cst (expr_type, 0);
2720 if (max == NULL_TREE || tree_int_cst_lt (vr1.max, max))
2721 max = vr1.max;
2722 }
2723 }
2724 else if (!int_cst_range0
2725 || !int_cst_range1
2726 || tree_int_cst_sgn (vr0.min) < 0
2727 || tree_int_cst_sgn (vr1.min) < 0)
2728 {
2729 set_value_range_to_varying (vr);
2730 return;
2731 }
2732 else
2733 {
2734 min = double_int_to_tree (expr_type,
2735 double_int_ior (must_be_nonzero0,
2736 must_be_nonzero1));
2737 max = double_int_to_tree (expr_type,
2738 double_int_ior (may_be_nonzero0,
2739 may_be_nonzero1));
2740 if (TREE_OVERFLOW (min) || tree_int_cst_sgn (min) < 0)
2741 min = vr0.min;
2742 else
2743 min = vrp_int_const_binop (MAX_EXPR, min, vr0.min);
2744 if (TREE_OVERFLOW (max) || tree_int_cst_sgn (max) < 0)
2745 max = NULL_TREE;
2746 min = vrp_int_const_binop (MAX_EXPR, min, vr1.min);
2747 }
2748 }
2749 else
2750 gcc_unreachable ();
2751
2752 /* If either MIN or MAX overflowed, then set the resulting range to
2753 VARYING. But we do accept an overflow infinity
2754 representation. */
2755 if (min == NULL_TREE
2756 || !is_gimple_min_invariant (min)
2757 || (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
2758 || max == NULL_TREE
2759 || !is_gimple_min_invariant (max)
2760 || (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
2761 {
2762 set_value_range_to_varying (vr);
2763 return;
2764 }
2765
2766 /* We punt if:
2767 1) [-INF, +INF]
2768 2) [-INF, +-INF(OVF)]
2769 3) [+-INF(OVF), +INF]
2770 4) [+-INF(OVF), +-INF(OVF)]
2771 We learn nothing when we have INF and INF(OVF) on both sides.
2772 Note that we do accept [-INF, -INF] and [+INF, +INF] without
2773 overflow. */
2774 if ((vrp_val_is_min (min) || is_overflow_infinity (min))
2775 && (vrp_val_is_max (max) || is_overflow_infinity (max)))
2776 {
2777 set_value_range_to_varying (vr);
2778 return;
2779 }
2780
2781 cmp = compare_values (min, max);
2782 if (cmp == -2 || cmp == 1)
2783 {
2784 /* If the new range has its limits swapped around (MIN > MAX),
2785 then the operation caused one of them to wrap around, mark
2786 the new range VARYING. */
2787 set_value_range_to_varying (vr);
2788 }
2789 else
2790 set_value_range (vr, type, min, max, NULL);
2791 }
2792
2793
2794 /* Extract range information from a unary expression EXPR based on
2795 the range of its operand and the expression code. */
2796
2797 static void
2798 extract_range_from_unary_expr (value_range_t *vr, enum tree_code code,
2799 tree type, tree op0)
2800 {
2801 tree min, max;
2802 int cmp;
2803 value_range_t vr0 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
2804
2805 /* Refuse to operate on certain unary expressions for which we
2806 cannot easily determine a resulting range. */
2807 if (code == FIX_TRUNC_EXPR
2808 || code == FLOAT_EXPR
2809 || code == BIT_NOT_EXPR
2810 || code == CONJ_EXPR)
2811 {
2812 /* We can still do constant propagation here. */
2813 if ((op0 = op_with_constant_singleton_value_range (op0)) != NULL_TREE)
2814 {
2815 tree tem = fold_unary (code, type, op0);
2816 if (tem
2817 && is_gimple_min_invariant (tem)
2818 && !is_overflow_infinity (tem))
2819 {
2820 set_value_range (vr, VR_RANGE, tem, tem, NULL);
2821 return;
2822 }
2823 }
2824 set_value_range_to_varying (vr);
2825 return;
2826 }
2827
2828 /* Get value ranges for the operand. For constant operands, create
2829 a new value range with the operand to simplify processing. */
2830 if (TREE_CODE (op0) == SSA_NAME)
2831 vr0 = *(get_value_range (op0));
2832 else if (is_gimple_min_invariant (op0))
2833 set_value_range_to_value (&vr0, op0, NULL);
2834 else
2835 set_value_range_to_varying (&vr0);
2836
2837 /* If VR0 is UNDEFINED, so is the result. */
2838 if (vr0.type == VR_UNDEFINED)
2839 {
2840 set_value_range_to_undefined (vr);
2841 return;
2842 }
2843
2844 /* Refuse to operate on symbolic ranges, or if neither operand is
2845 a pointer or integral type. */
2846 if ((!INTEGRAL_TYPE_P (TREE_TYPE (op0))
2847 && !POINTER_TYPE_P (TREE_TYPE (op0)))
2848 || (vr0.type != VR_VARYING
2849 && symbolic_range_p (&vr0)))
2850 {
2851 set_value_range_to_varying (vr);
2852 return;
2853 }
2854
2855 /* If the expression involves pointers, we are only interested in
2856 determining if it evaluates to NULL [0, 0] or non-NULL (~[0, 0]). */
2857 if (POINTER_TYPE_P (type) || POINTER_TYPE_P (TREE_TYPE (op0)))
2858 {
2859 bool sop;
2860
2861 sop = false;
2862 if (range_is_nonnull (&vr0)
2863 || (tree_unary_nonzero_warnv_p (code, type, op0, &sop)
2864 && !sop))
2865 set_value_range_to_nonnull (vr, type);
2866 else if (range_is_null (&vr0))
2867 set_value_range_to_null (vr, type);
2868 else
2869 set_value_range_to_varying (vr);
2870
2871 return;
2872 }
2873
2874 /* Handle unary expressions on integer ranges. */
2875 if (CONVERT_EXPR_CODE_P (code)
2876 && INTEGRAL_TYPE_P (type)
2877 && INTEGRAL_TYPE_P (TREE_TYPE (op0)))
2878 {
2879 tree inner_type = TREE_TYPE (op0);
2880 tree outer_type = type;
2881
2882 /* If VR0 is varying and we increase the type precision, assume
2883 a full range for the following transformation. */
2884 if (vr0.type == VR_VARYING
2885 && TYPE_PRECISION (inner_type) < TYPE_PRECISION (outer_type))
2886 {
2887 vr0.type = VR_RANGE;
2888 vr0.min = TYPE_MIN_VALUE (inner_type);
2889 vr0.max = TYPE_MAX_VALUE (inner_type);
2890 }
2891
2892 /* If VR0 is a constant range or anti-range and the conversion is
2893 not truncating we can convert the min and max values and
2894 canonicalize the resulting range. Otherwise we can do the
2895 conversion if the size of the range is less than what the
2896 precision of the target type can represent and the range is
2897 not an anti-range. */
2898 if ((vr0.type == VR_RANGE
2899 || vr0.type == VR_ANTI_RANGE)
2900 && TREE_CODE (vr0.min) == INTEGER_CST
2901 && TREE_CODE (vr0.max) == INTEGER_CST
2902 && (!is_overflow_infinity (vr0.min)
2903 || (vr0.type == VR_RANGE
2904 && TYPE_PRECISION (outer_type) > TYPE_PRECISION (inner_type)
2905 && needs_overflow_infinity (outer_type)
2906 && supports_overflow_infinity (outer_type)))
2907 && (!is_overflow_infinity (vr0.max)
2908 || (vr0.type == VR_RANGE
2909 && TYPE_PRECISION (outer_type) > TYPE_PRECISION (inner_type)
2910 && needs_overflow_infinity (outer_type)
2911 && supports_overflow_infinity (outer_type)))
2912 && (TYPE_PRECISION (outer_type) >= TYPE_PRECISION (inner_type)
2913 || (vr0.type == VR_RANGE
2914 && integer_zerop (int_const_binop (RSHIFT_EXPR,
2915 int_const_binop (MINUS_EXPR, vr0.max, vr0.min),
2916 size_int (TYPE_PRECISION (outer_type)))))))
2917 {
2918 tree new_min, new_max;
2919 new_min = force_fit_type_double (outer_type,
2920 tree_to_double_int (vr0.min),
2921 0, false);
2922 new_max = force_fit_type_double (outer_type,
2923 tree_to_double_int (vr0.max),
2924 0, false);
2925 if (is_overflow_infinity (vr0.min))
2926 new_min = negative_overflow_infinity (outer_type);
2927 if (is_overflow_infinity (vr0.max))
2928 new_max = positive_overflow_infinity (outer_type);
2929 set_and_canonicalize_value_range (vr, vr0.type,
2930 new_min, new_max, NULL);
2931 return;
2932 }
2933
2934 set_value_range_to_varying (vr);
2935 return;
2936 }
2937
2938 /* Conversion of a VR_VARYING value to a wider type can result
2939 in a usable range. So wait until after we've handled conversions
2940 before dropping the result to VR_VARYING if we had a source
2941 operand that is VR_VARYING. */
2942 if (vr0.type == VR_VARYING)
2943 {
2944 set_value_range_to_varying (vr);
2945 return;
2946 }
2947
2948 /* Apply the operation to each end of the range and see what we end
2949 up with. */
2950 if (code == NEGATE_EXPR
2951 && !TYPE_UNSIGNED (type))
2952 {
2953 /* NEGATE_EXPR flips the range around. We need to treat
2954 TYPE_MIN_VALUE specially. */
2955 if (is_positive_overflow_infinity (vr0.max))
2956 min = negative_overflow_infinity (type);
2957 else if (is_negative_overflow_infinity (vr0.max))
2958 min = positive_overflow_infinity (type);
2959 else if (!vrp_val_is_min (vr0.max))
2960 min = fold_unary_to_constant (code, type, vr0.max);
2961 else if (needs_overflow_infinity (type))
2962 {
2963 if (supports_overflow_infinity (type)
2964 && !is_overflow_infinity (vr0.min)
2965 && !vrp_val_is_min (vr0.min))
2966 min = positive_overflow_infinity (type);
2967 else
2968 {
2969 set_value_range_to_varying (vr);
2970 return;
2971 }
2972 }
2973 else
2974 min = TYPE_MIN_VALUE (type);
2975
2976 if (is_positive_overflow_infinity (vr0.min))
2977 max = negative_overflow_infinity (type);
2978 else if (is_negative_overflow_infinity (vr0.min))
2979 max = positive_overflow_infinity (type);
2980 else if (!vrp_val_is_min (vr0.min))
2981 max = fold_unary_to_constant (code, type, vr0.min);
2982 else if (needs_overflow_infinity (type))
2983 {
2984 if (supports_overflow_infinity (type))
2985 max = positive_overflow_infinity (type);
2986 else
2987 {
2988 set_value_range_to_varying (vr);
2989 return;
2990 }
2991 }
2992 else
2993 max = TYPE_MIN_VALUE (type);
2994 }
2995 else if (code == NEGATE_EXPR
2996 && TYPE_UNSIGNED (type))
2997 {
2998 if (!range_includes_zero_p (&vr0))
2999 {
3000 max = fold_unary_to_constant (code, type, vr0.min);
3001 min = fold_unary_to_constant (code, type, vr0.max);
3002 }
3003 else
3004 {
3005 if (range_is_null (&vr0))
3006 set_value_range_to_null (vr, type);
3007 else
3008 set_value_range_to_varying (vr);
3009 return;
3010 }
3011 }
3012 else if (code == ABS_EXPR
3013 && !TYPE_UNSIGNED (type))
3014 {
3015 /* -TYPE_MIN_VALUE = TYPE_MIN_VALUE with flag_wrapv so we can't get a
3016 useful range. */
3017 if (!TYPE_OVERFLOW_UNDEFINED (type)
3018 && ((vr0.type == VR_RANGE
3019 && vrp_val_is_min (vr0.min))
3020 || (vr0.type == VR_ANTI_RANGE
3021 && !vrp_val_is_min (vr0.min)
3022 && !range_includes_zero_p (&vr0))))
3023 {
3024 set_value_range_to_varying (vr);
3025 return;
3026 }
3027
3028 /* ABS_EXPR may flip the range around, if the original range
3029 included negative values. */
3030 if (is_overflow_infinity (vr0.min))
3031 min = positive_overflow_infinity (type);
3032 else if (!vrp_val_is_min (vr0.min))
3033 min = fold_unary_to_constant (code, type, vr0.min);
3034 else if (!needs_overflow_infinity (type))
3035 min = TYPE_MAX_VALUE (type);
3036 else if (supports_overflow_infinity (type))
3037 min = positive_overflow_infinity (type);
3038 else
3039 {
3040 set_value_range_to_varying (vr);
3041 return;
3042 }
3043
3044 if (is_overflow_infinity (vr0.max))
3045 max = positive_overflow_infinity (type);
3046 else if (!vrp_val_is_min (vr0.max))
3047 max = fold_unary_to_constant (code, type, vr0.max);
3048 else if (!needs_overflow_infinity (type))
3049 max = TYPE_MAX_VALUE (type);
3050 else if (supports_overflow_infinity (type)
3051 /* We shouldn't generate [+INF, +INF] as set_value_range
3052 doesn't like this and ICEs. */
3053 && !is_positive_overflow_infinity (min))
3054 max = positive_overflow_infinity (type);
3055 else
3056 {
3057 set_value_range_to_varying (vr);
3058 return;
3059 }
3060
3061 cmp = compare_values (min, max);
3062
3063 /* If a VR_ANTI_RANGEs contains zero, then we have
3064 ~[-INF, min(MIN, MAX)]. */
3065 if (vr0.type == VR_ANTI_RANGE)
3066 {
3067 if (range_includes_zero_p (&vr0))
3068 {
3069 /* Take the lower of the two values. */
3070 if (cmp != 1)
3071 max = min;
3072
3073 /* Create ~[-INF, min (abs(MIN), abs(MAX))]
3074 or ~[-INF + 1, min (abs(MIN), abs(MAX))] when
3075 flag_wrapv is set and the original anti-range doesn't include
3076 TYPE_MIN_VALUE, remember -TYPE_MIN_VALUE = TYPE_MIN_VALUE. */
3077 if (TYPE_OVERFLOW_WRAPS (type))
3078 {
3079 tree type_min_value = TYPE_MIN_VALUE (type);
3080
3081 min = (vr0.min != type_min_value
3082 ? int_const_binop (PLUS_EXPR, type_min_value,
3083 integer_one_node)
3084 : type_min_value);
3085 }
3086 else
3087 {
3088 if (overflow_infinity_range_p (&vr0))
3089 min = negative_overflow_infinity (type);
3090 else
3091 min = TYPE_MIN_VALUE (type);
3092 }
3093 }
3094 else
3095 {
3096 /* All else has failed, so create the range [0, INF], even for
3097 flag_wrapv since TYPE_MIN_VALUE is in the original
3098 anti-range. */
3099 vr0.type = VR_RANGE;
3100 min = build_int_cst (type, 0);
3101 if (needs_overflow_infinity (type))
3102 {
3103 if (supports_overflow_infinity (type))
3104 max = positive_overflow_infinity (type);
3105 else
3106 {
3107 set_value_range_to_varying (vr);
3108 return;
3109 }
3110 }
3111 else
3112 max = TYPE_MAX_VALUE (type);
3113 }
3114 }
3115
3116 /* If the range contains zero then we know that the minimum value in the
3117 range will be zero. */
3118 else if (range_includes_zero_p (&vr0))
3119 {
3120 if (cmp == 1)
3121 max = min;
3122 min = build_int_cst (type, 0);
3123 }
3124 else
3125 {
3126 /* If the range was reversed, swap MIN and MAX. */
3127 if (cmp == 1)
3128 {
3129 tree t = min;
3130 min = max;
3131 max = t;
3132 }
3133 }
3134 }
3135 else
3136 {
3137 /* Otherwise, operate on each end of the range. */
3138 min = fold_unary_to_constant (code, type, vr0.min);
3139 max = fold_unary_to_constant (code, type, vr0.max);
3140
3141 if (needs_overflow_infinity (type))
3142 {
3143 gcc_assert (code != NEGATE_EXPR && code != ABS_EXPR);
3144
3145 /* If both sides have overflowed, we don't know
3146 anything. */
3147 if ((is_overflow_infinity (vr0.min)
3148 || TREE_OVERFLOW (min))
3149 && (is_overflow_infinity (vr0.max)
3150 || TREE_OVERFLOW (max)))
3151 {
3152 set_value_range_to_varying (vr);
3153 return;
3154 }
3155
3156 if (is_overflow_infinity (vr0.min))
3157 min = vr0.min;
3158 else if (TREE_OVERFLOW (min))
3159 {
3160 if (supports_overflow_infinity (type))
3161 min = (tree_int_cst_sgn (min) >= 0
3162 ? positive_overflow_infinity (TREE_TYPE (min))
3163 : negative_overflow_infinity (TREE_TYPE (min)));
3164 else
3165 {
3166 set_value_range_to_varying (vr);
3167 return;
3168 }
3169 }
3170
3171 if (is_overflow_infinity (vr0.max))
3172 max = vr0.max;
3173 else if (TREE_OVERFLOW (max))
3174 {
3175 if (supports_overflow_infinity (type))
3176 max = (tree_int_cst_sgn (max) >= 0
3177 ? positive_overflow_infinity (TREE_TYPE (max))
3178 : negative_overflow_infinity (TREE_TYPE (max)));
3179 else
3180 {
3181 set_value_range_to_varying (vr);
3182 return;
3183 }
3184 }
3185 }
3186 }
3187
3188 cmp = compare_values (min, max);
3189 if (cmp == -2 || cmp == 1)
3190 {
3191 /* If the new range has its limits swapped around (MIN > MAX),
3192 then the operation caused one of them to wrap around, mark
3193 the new range VARYING. */
3194 set_value_range_to_varying (vr);
3195 }
3196 else
3197 set_value_range (vr, vr0.type, min, max, NULL);
3198 }
3199
3200
3201 /* Extract range information from a conditional expression EXPR based on
3202 the ranges of each of its operands and the expression code. */
3203
3204 static void
3205 extract_range_from_cond_expr (value_range_t *vr, tree expr)
3206 {
3207 tree op0, op1;
3208 value_range_t vr0 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
3209 value_range_t vr1 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
3210
3211 /* Get value ranges for each operand. For constant operands, create
3212 a new value range with the operand to simplify processing. */
3213 op0 = COND_EXPR_THEN (expr);
3214 if (TREE_CODE (op0) == SSA_NAME)
3215 vr0 = *(get_value_range (op0));
3216 else if (is_gimple_min_invariant (op0))
3217 set_value_range_to_value (&vr0, op0, NULL);
3218 else
3219 set_value_range_to_varying (&vr0);
3220
3221 op1 = COND_EXPR_ELSE (expr);
3222 if (TREE_CODE (op1) == SSA_NAME)
3223 vr1 = *(get_value_range (op1));
3224 else if (is_gimple_min_invariant (op1))
3225 set_value_range_to_value (&vr1, op1, NULL);
3226 else
3227 set_value_range_to_varying (&vr1);
3228
3229 /* The resulting value range is the union of the operand ranges */
3230 vrp_meet (&vr0, &vr1);
3231 copy_value_range (vr, &vr0);
3232 }
3233
3234
3235 /* Extract range information from a comparison expression EXPR based
3236 on the range of its operand and the expression code. */
3237
3238 static void
3239 extract_range_from_comparison (value_range_t *vr, enum tree_code code,
3240 tree type, tree op0, tree op1)
3241 {
3242 bool sop = false;
3243 tree val;
3244
3245 val = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, false, &sop,
3246 NULL);
3247
3248 /* A disadvantage of using a special infinity as an overflow
3249 representation is that we lose the ability to record overflow
3250 when we don't have an infinity. So we have to ignore a result
3251 which relies on overflow. */
3252
3253 if (val && !is_overflow_infinity (val) && !sop)
3254 {
3255 /* Since this expression was found on the RHS of an assignment,
3256 its type may be different from _Bool. Convert VAL to EXPR's
3257 type. */
3258 val = fold_convert (type, val);
3259 if (is_gimple_min_invariant (val))
3260 set_value_range_to_value (vr, val, vr->equiv);
3261 else
3262 set_value_range (vr, VR_RANGE, val, val, vr->equiv);
3263 }
3264 else
3265 /* The result of a comparison is always true or false. */
3266 set_value_range_to_truthvalue (vr, type);
3267 }
3268
3269 /* Try to derive a nonnegative or nonzero range out of STMT relying
3270 primarily on generic routines in fold in conjunction with range data.
3271 Store the result in *VR */
3272
3273 static void
3274 extract_range_basic (value_range_t *vr, gimple stmt)
3275 {
3276 bool sop = false;
3277 tree type = gimple_expr_type (stmt);
3278
3279 if (INTEGRAL_TYPE_P (type)
3280 && gimple_stmt_nonnegative_warnv_p (stmt, &sop))
3281 set_value_range_to_nonnegative (vr, type,
3282 sop || stmt_overflow_infinity (stmt));
3283 else if (vrp_stmt_computes_nonzero (stmt, &sop)
3284 && !sop)
3285 set_value_range_to_nonnull (vr, type);
3286 else
3287 set_value_range_to_varying (vr);
3288 }
3289
3290
3291 /* Try to compute a useful range out of assignment STMT and store it
3292 in *VR. */
3293
3294 static void
3295 extract_range_from_assignment (value_range_t *vr, gimple stmt)
3296 {
3297 enum tree_code code = gimple_assign_rhs_code (stmt);
3298
3299 if (code == ASSERT_EXPR)
3300 extract_range_from_assert (vr, gimple_assign_rhs1 (stmt));
3301 else if (code == SSA_NAME)
3302 extract_range_from_ssa_name (vr, gimple_assign_rhs1 (stmt));
3303 else if (TREE_CODE_CLASS (code) == tcc_binary
3304 || code == TRUTH_AND_EXPR
3305 || code == TRUTH_OR_EXPR
3306 || code == TRUTH_XOR_EXPR)
3307 extract_range_from_binary_expr (vr, gimple_assign_rhs_code (stmt),
3308 gimple_expr_type (stmt),
3309 gimple_assign_rhs1 (stmt),
3310 gimple_assign_rhs2 (stmt));
3311 else if (TREE_CODE_CLASS (code) == tcc_unary)
3312 extract_range_from_unary_expr (vr, gimple_assign_rhs_code (stmt),
3313 gimple_expr_type (stmt),
3314 gimple_assign_rhs1 (stmt));
3315 else if (code == COND_EXPR)
3316 extract_range_from_cond_expr (vr, gimple_assign_rhs1 (stmt));
3317 else if (TREE_CODE_CLASS (code) == tcc_comparison)
3318 extract_range_from_comparison (vr, gimple_assign_rhs_code (stmt),
3319 gimple_expr_type (stmt),
3320 gimple_assign_rhs1 (stmt),
3321 gimple_assign_rhs2 (stmt));
3322 else if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS
3323 && is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
3324 set_value_range_to_value (vr, gimple_assign_rhs1 (stmt), NULL);
3325 else
3326 set_value_range_to_varying (vr);
3327
3328 if (vr->type == VR_VARYING)
3329 extract_range_basic (vr, stmt);
3330 }
3331
3332 /* Given a range VR, a LOOP and a variable VAR, determine whether it
3333 would be profitable to adjust VR using scalar evolution information
3334 for VAR. If so, update VR with the new limits. */
3335
3336 static void
3337 adjust_range_with_scev (value_range_t *vr, struct loop *loop,
3338 gimple stmt, tree var)
3339 {
3340 tree init, step, chrec, tmin, tmax, min, max, type, tem;
3341 enum ev_direction dir;
3342
3343 /* TODO. Don't adjust anti-ranges. An anti-range may provide
3344 better opportunities than a regular range, but I'm not sure. */
3345 if (vr->type == VR_ANTI_RANGE)
3346 return;
3347
3348 chrec = instantiate_parameters (loop, analyze_scalar_evolution (loop, var));
3349
3350 /* Like in PR19590, scev can return a constant function. */
3351 if (is_gimple_min_invariant (chrec))
3352 {
3353 set_value_range_to_value (vr, chrec, vr->equiv);
3354 return;
3355 }
3356
3357 if (TREE_CODE (chrec) != POLYNOMIAL_CHREC)
3358 return;
3359
3360 init = initial_condition_in_loop_num (chrec, loop->num);
3361 tem = op_with_constant_singleton_value_range (init);
3362 if (tem)
3363 init = tem;
3364 step = evolution_part_in_loop_num (chrec, loop->num);
3365 tem = op_with_constant_singleton_value_range (step);
3366 if (tem)
3367 step = tem;
3368
3369 /* If STEP is symbolic, we can't know whether INIT will be the
3370 minimum or maximum value in the range. Also, unless INIT is
3371 a simple expression, compare_values and possibly other functions
3372 in tree-vrp won't be able to handle it. */
3373 if (step == NULL_TREE
3374 || !is_gimple_min_invariant (step)
3375 || !valid_value_p (init))
3376 return;
3377
3378 dir = scev_direction (chrec);
3379 if (/* Do not adjust ranges if we do not know whether the iv increases
3380 or decreases, ... */
3381 dir == EV_DIR_UNKNOWN
3382 /* ... or if it may wrap. */
3383 || scev_probably_wraps_p (init, step, stmt, get_chrec_loop (chrec),
3384 true))
3385 return;
3386
3387 /* We use TYPE_MIN_VALUE and TYPE_MAX_VALUE here instead of
3388 negative_overflow_infinity and positive_overflow_infinity,
3389 because we have concluded that the loop probably does not
3390 wrap. */
3391
3392 type = TREE_TYPE (var);
3393 if (POINTER_TYPE_P (type) || !TYPE_MIN_VALUE (type))
3394 tmin = lower_bound_in_type (type, type);
3395 else
3396 tmin = TYPE_MIN_VALUE (type);
3397 if (POINTER_TYPE_P (type) || !TYPE_MAX_VALUE (type))
3398 tmax = upper_bound_in_type (type, type);
3399 else
3400 tmax = TYPE_MAX_VALUE (type);
3401
3402 /* Try to use estimated number of iterations for the loop to constrain the
3403 final value in the evolution. */
3404 if (TREE_CODE (step) == INTEGER_CST
3405 && is_gimple_val (init)
3406 && (TREE_CODE (init) != SSA_NAME
3407 || get_value_range (init)->type == VR_RANGE))
3408 {
3409 double_int nit;
3410
3411 if (estimated_loop_iterations (loop, true, &nit))
3412 {
3413 value_range_t maxvr = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
3414 double_int dtmp;
3415 bool unsigned_p = TYPE_UNSIGNED (TREE_TYPE (step));
3416 int overflow = 0;
3417
3418 dtmp = double_int_mul_with_sign (tree_to_double_int (step), nit,
3419 unsigned_p, &overflow);
3420 /* If the multiplication overflowed we can't do a meaningful
3421 adjustment. Likewise if the result doesn't fit in the type
3422 of the induction variable. For a signed type we have to
3423 check whether the result has the expected signedness which
3424 is that of the step as number of iterations is unsigned. */
3425 if (!overflow
3426 && double_int_fits_to_tree_p (TREE_TYPE (init), dtmp)
3427 && (unsigned_p
3428 || ((dtmp.high ^ TREE_INT_CST_HIGH (step)) >= 0)))
3429 {
3430 tem = double_int_to_tree (TREE_TYPE (init), dtmp);
3431 extract_range_from_binary_expr (&maxvr, PLUS_EXPR,
3432 TREE_TYPE (init), init, tem);
3433 /* Likewise if the addition did. */
3434 if (maxvr.type == VR_RANGE)
3435 {
3436 tmin = maxvr.min;
3437 tmax = maxvr.max;
3438 }
3439 }
3440 }
3441 }
3442
3443 if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED)
3444 {
3445 min = tmin;
3446 max = tmax;
3447
3448 /* For VARYING or UNDEFINED ranges, just about anything we get
3449 from scalar evolutions should be better. */
3450
3451 if (dir == EV_DIR_DECREASES)
3452 max = init;
3453 else
3454 min = init;
3455
3456 /* If we would create an invalid range, then just assume we
3457 know absolutely nothing. This may be over-conservative,
3458 but it's clearly safe, and should happen only in unreachable
3459 parts of code, or for invalid programs. */
3460 if (compare_values (min, max) == 1)
3461 return;
3462
3463 set_value_range (vr, VR_RANGE, min, max, vr->equiv);
3464 }
3465 else if (vr->type == VR_RANGE)
3466 {
3467 min = vr->min;
3468 max = vr->max;
3469
3470 if (dir == EV_DIR_DECREASES)
3471 {
3472 /* INIT is the maximum value. If INIT is lower than VR->MAX
3473 but no smaller than VR->MIN, set VR->MAX to INIT. */
3474 if (compare_values (init, max) == -1)
3475 max = init;
3476
3477 /* According to the loop information, the variable does not
3478 overflow. If we think it does, probably because of an
3479 overflow due to arithmetic on a different INF value,
3480 reset now. */
3481 if (is_negative_overflow_infinity (min)
3482 || compare_values (min, tmin) == -1)
3483 min = tmin;
3484
3485 }
3486 else
3487 {
3488 /* If INIT is bigger than VR->MIN, set VR->MIN to INIT. */
3489 if (compare_values (init, min) == 1)
3490 min = init;
3491
3492 if (is_positive_overflow_infinity (max)
3493 || compare_values (tmax, max) == -1)
3494 max = tmax;
3495 }
3496
3497 /* If we just created an invalid range with the minimum
3498 greater than the maximum, we fail conservatively.
3499 This should happen only in unreachable
3500 parts of code, or for invalid programs. */
3501 if (compare_values (min, max) == 1)
3502 return;
3503
3504 set_value_range (vr, VR_RANGE, min, max, vr->equiv);
3505 }
3506 }
3507
3508 /* Return true if VAR may overflow at STMT. This checks any available
3509 loop information to see if we can determine that VAR does not
3510 overflow. */
3511
3512 static bool
3513 vrp_var_may_overflow (tree var, gimple stmt)
3514 {
3515 struct loop *l;
3516 tree chrec, init, step;
3517
3518 if (current_loops == NULL)
3519 return true;
3520
3521 l = loop_containing_stmt (stmt);
3522 if (l == NULL
3523 || !loop_outer (l))
3524 return true;
3525
3526 chrec = instantiate_parameters (l, analyze_scalar_evolution (l, var));
3527 if (TREE_CODE (chrec) != POLYNOMIAL_CHREC)
3528 return true;
3529
3530 init = initial_condition_in_loop_num (chrec, l->num);
3531 step = evolution_part_in_loop_num (chrec, l->num);
3532
3533 if (step == NULL_TREE
3534 || !is_gimple_min_invariant (step)
3535 || !valid_value_p (init))
3536 return true;
3537
3538 /* If we get here, we know something useful about VAR based on the
3539 loop information. If it wraps, it may overflow. */
3540
3541 if (scev_probably_wraps_p (init, step, stmt, get_chrec_loop (chrec),
3542 true))
3543 return true;
3544
3545 if (dump_file && (dump_flags & TDF_DETAILS) != 0)
3546 {
3547 print_generic_expr (dump_file, var, 0);
3548 fprintf (dump_file, ": loop information indicates does not overflow\n");
3549 }
3550
3551 return false;
3552 }
3553
3554
3555 /* Given two numeric value ranges VR0, VR1 and a comparison code COMP:
3556
3557 - Return BOOLEAN_TRUE_NODE if VR0 COMP VR1 always returns true for
3558 all the values in the ranges.
3559
3560 - Return BOOLEAN_FALSE_NODE if the comparison always returns false.
3561
3562 - Return NULL_TREE if it is not always possible to determine the
3563 value of the comparison.
3564
3565 Also set *STRICT_OVERFLOW_P to indicate whether a range with an
3566 overflow infinity was used in the test. */
3567
3568
3569 static tree
3570 compare_ranges (enum tree_code comp, value_range_t *vr0, value_range_t *vr1,
3571 bool *strict_overflow_p)
3572 {
3573 /* VARYING or UNDEFINED ranges cannot be compared. */
3574 if (vr0->type == VR_VARYING
3575 || vr0->type == VR_UNDEFINED
3576 || vr1->type == VR_VARYING
3577 || vr1->type == VR_UNDEFINED)
3578 return NULL_TREE;
3579
3580 /* Anti-ranges need to be handled separately. */
3581 if (vr0->type == VR_ANTI_RANGE || vr1->type == VR_ANTI_RANGE)
3582 {
3583 /* If both are anti-ranges, then we cannot compute any
3584 comparison. */
3585 if (vr0->type == VR_ANTI_RANGE && vr1->type == VR_ANTI_RANGE)
3586 return NULL_TREE;
3587
3588 /* These comparisons are never statically computable. */
3589 if (comp == GT_EXPR
3590 || comp == GE_EXPR
3591 || comp == LT_EXPR
3592 || comp == LE_EXPR)
3593 return NULL_TREE;
3594
3595 /* Equality can be computed only between a range and an
3596 anti-range. ~[VAL1, VAL2] == [VAL1, VAL2] is always false. */
3597 if (vr0->type == VR_RANGE)
3598 {
3599 /* To simplify processing, make VR0 the anti-range. */
3600 value_range_t *tmp = vr0;
3601 vr0 = vr1;
3602 vr1 = tmp;
3603 }
3604
3605 gcc_assert (comp == NE_EXPR || comp == EQ_EXPR);
3606
3607 if (compare_values_warnv (vr0->min, vr1->min, strict_overflow_p) == 0
3608 && compare_values_warnv (vr0->max, vr1->max, strict_overflow_p) == 0)
3609 return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node;
3610
3611 return NULL_TREE;
3612 }
3613
3614 if (!usable_range_p (vr0, strict_overflow_p)
3615 || !usable_range_p (vr1, strict_overflow_p))
3616 return NULL_TREE;
3617
3618 /* Simplify processing. If COMP is GT_EXPR or GE_EXPR, switch the
3619 operands around and change the comparison code. */
3620 if (comp == GT_EXPR || comp == GE_EXPR)
3621 {
3622 value_range_t *tmp;
3623 comp = (comp == GT_EXPR) ? LT_EXPR : LE_EXPR;
3624 tmp = vr0;
3625 vr0 = vr1;
3626 vr1 = tmp;
3627 }
3628
3629 if (comp == EQ_EXPR)
3630 {
3631 /* Equality may only be computed if both ranges represent
3632 exactly one value. */
3633 if (compare_values_warnv (vr0->min, vr0->max, strict_overflow_p) == 0
3634 && compare_values_warnv (vr1->min, vr1->max, strict_overflow_p) == 0)
3635 {
3636 int cmp_min = compare_values_warnv (vr0->min, vr1->min,
3637 strict_overflow_p);
3638 int cmp_max = compare_values_warnv (vr0->max, vr1->max,
3639 strict_overflow_p);
3640 if (cmp_min == 0 && cmp_max == 0)
3641 return boolean_true_node;
3642 else if (cmp_min != -2 && cmp_max != -2)
3643 return boolean_false_node;
3644 }
3645 /* If [V0_MIN, V1_MAX] < [V1_MIN, V1_MAX] then V0 != V1. */
3646 else if (compare_values_warnv (vr0->min, vr1->max,
3647 strict_overflow_p) == 1
3648 || compare_values_warnv (vr1->min, vr0->max,
3649 strict_overflow_p) == 1)
3650 return boolean_false_node;
3651
3652 return NULL_TREE;
3653 }
3654 else if (comp == NE_EXPR)
3655 {
3656 int cmp1, cmp2;
3657
3658 /* If VR0 is completely to the left or completely to the right
3659 of VR1, they are always different. Notice that we need to
3660 make sure that both comparisons yield similar results to
3661 avoid comparing values that cannot be compared at
3662 compile-time. */
3663 cmp1 = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p);
3664 cmp2 = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p);
3665 if ((cmp1 == -1 && cmp2 == -1) || (cmp1 == 1 && cmp2 == 1))
3666 return boolean_true_node;
3667
3668 /* If VR0 and VR1 represent a single value and are identical,
3669 return false. */
3670 else if (compare_values_warnv (vr0->min, vr0->max,
3671 strict_overflow_p) == 0
3672 && compare_values_warnv (vr1->min, vr1->max,
3673 strict_overflow_p) == 0
3674 && compare_values_warnv (vr0->min, vr1->min,
3675 strict_overflow_p) == 0
3676 && compare_values_warnv (vr0->max, vr1->max,
3677 strict_overflow_p) == 0)
3678 return boolean_false_node;
3679
3680 /* Otherwise, they may or may not be different. */
3681 else
3682 return NULL_TREE;
3683 }
3684 else if (comp == LT_EXPR || comp == LE_EXPR)
3685 {
3686 int tst;
3687
3688 /* If VR0 is to the left of VR1, return true. */
3689 tst = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p);
3690 if ((comp == LT_EXPR && tst == -1)
3691 || (comp == LE_EXPR && (tst == -1 || tst == 0)))
3692 {
3693 if (overflow_infinity_range_p (vr0)
3694 || overflow_infinity_range_p (vr1))
3695 *strict_overflow_p = true;
3696 return boolean_true_node;
3697 }
3698
3699 /* If VR0 is to the right of VR1, return false. */
3700 tst = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p);
3701 if ((comp == LT_EXPR && (tst == 0 || tst == 1))
3702 || (comp == LE_EXPR && tst == 1))
3703 {
3704 if (overflow_infinity_range_p (vr0)
3705 || overflow_infinity_range_p (vr1))
3706 *strict_overflow_p = true;
3707 return boolean_false_node;
3708 }
3709
3710 /* Otherwise, we don't know. */
3711 return NULL_TREE;
3712 }
3713
3714 gcc_unreachable ();
3715 }
3716
3717
3718 /* Given a value range VR, a value VAL and a comparison code COMP, return
3719 BOOLEAN_TRUE_NODE if VR COMP VAL always returns true for all the
3720 values in VR. Return BOOLEAN_FALSE_NODE if the comparison
3721 always returns false. Return NULL_TREE if it is not always
3722 possible to determine the value of the comparison. Also set
3723 *STRICT_OVERFLOW_P to indicate whether a range with an overflow
3724 infinity was used in the test. */
3725
3726 static tree
3727 compare_range_with_value (enum tree_code comp, value_range_t *vr, tree val,
3728 bool *strict_overflow_p)
3729 {
3730 if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED)
3731 return NULL_TREE;
3732
3733 /* Anti-ranges need to be handled separately. */
3734 if (vr->type == VR_ANTI_RANGE)
3735 {
3736 /* For anti-ranges, the only predicates that we can compute at
3737 compile time are equality and inequality. */
3738 if (comp == GT_EXPR
3739 || comp == GE_EXPR
3740 || comp == LT_EXPR
3741 || comp == LE_EXPR)
3742 return NULL_TREE;
3743
3744 /* ~[VAL_1, VAL_2] OP VAL is known if VAL_1 <= VAL <= VAL_2. */
3745 if (value_inside_range (val, vr) == 1)
3746 return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node;
3747
3748 return NULL_TREE;
3749 }
3750
3751 if (!usable_range_p (vr, strict_overflow_p))
3752 return NULL_TREE;
3753
3754 if (comp == EQ_EXPR)
3755 {
3756 /* EQ_EXPR may only be computed if VR represents exactly
3757 one value. */
3758 if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0)
3759 {
3760 int cmp = compare_values_warnv (vr->min, val, strict_overflow_p);
3761 if (cmp == 0)
3762 return boolean_true_node;
3763 else if (cmp == -1 || cmp == 1 || cmp == 2)
3764 return boolean_false_node;
3765 }
3766 else if (compare_values_warnv (val, vr->min, strict_overflow_p) == -1
3767 || compare_values_warnv (vr->max, val, strict_overflow_p) == -1)
3768 return boolean_false_node;
3769
3770 return NULL_TREE;
3771 }
3772 else if (comp == NE_EXPR)
3773 {
3774 /* If VAL is not inside VR, then they are always different. */
3775 if (compare_values_warnv (vr->max, val, strict_overflow_p) == -1
3776 || compare_values_warnv (vr->min, val, strict_overflow_p) == 1)
3777 return boolean_true_node;
3778
3779 /* If VR represents exactly one value equal to VAL, then return
3780 false. */
3781 if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0
3782 && compare_values_warnv (vr->min, val, strict_overflow_p) == 0)
3783 return boolean_false_node;
3784
3785 /* Otherwise, they may or may not be different. */
3786 return NULL_TREE;
3787 }
3788 else if (comp == LT_EXPR || comp == LE_EXPR)
3789 {
3790 int tst;
3791
3792 /* If VR is to the left of VAL, return true. */
3793 tst = compare_values_warnv (vr->max, val, strict_overflow_p);
3794 if ((comp == LT_EXPR && tst == -1)
3795 || (comp == LE_EXPR && (tst == -1 || tst == 0)))
3796 {
3797 if (overflow_infinity_range_p (vr))
3798 *strict_overflow_p = true;
3799 return boolean_true_node;
3800 }
3801
3802 /* If VR is to the right of VAL, return false. */
3803 tst = compare_values_warnv (vr->min, val, strict_overflow_p);
3804 if ((comp == LT_EXPR && (tst == 0 || tst == 1))
3805 || (comp == LE_EXPR && tst == 1))
3806 {
3807 if (overflow_infinity_range_p (vr))
3808 *strict_overflow_p = true;
3809 return boolean_false_node;
3810 }
3811
3812 /* Otherwise, we don't know. */
3813 return NULL_TREE;
3814 }
3815 else if (comp == GT_EXPR || comp == GE_EXPR)
3816 {
3817 int tst;
3818
3819 /* If VR is to the right of VAL, return true. */
3820 tst = compare_values_warnv (vr->min, val, strict_overflow_p);
3821 if ((comp == GT_EXPR && tst == 1)
3822 || (comp == GE_EXPR && (tst == 0 || tst == 1)))
3823 {
3824 if (overflow_infinity_range_p (vr))
3825 *strict_overflow_p = true;
3826 return boolean_true_node;
3827 }
3828
3829 /* If VR is to the left of VAL, return false. */
3830 tst = compare_values_warnv (vr->max, val, strict_overflow_p);
3831 if ((comp == GT_EXPR && (tst == -1 || tst == 0))
3832 || (comp == GE_EXPR && tst == -1))
3833 {
3834 if (overflow_infinity_range_p (vr))
3835 *strict_overflow_p = true;
3836 return boolean_false_node;
3837 }
3838
3839 /* Otherwise, we don't know. */
3840 return NULL_TREE;
3841 }
3842
3843 gcc_unreachable ();
3844 }
3845
3846
3847 /* Debugging dumps. */
3848
3849 void dump_value_range (FILE *, value_range_t *);
3850 void debug_value_range (value_range_t *);
3851 void dump_all_value_ranges (FILE *);
3852 void debug_all_value_ranges (void);
3853 void dump_vr_equiv (FILE *, bitmap);
3854 void debug_vr_equiv (bitmap);
3855
3856
3857 /* Dump value range VR to FILE. */
3858
3859 void
3860 dump_value_range (FILE *file, value_range_t *vr)
3861 {
3862 if (vr == NULL)
3863 fprintf (file, "[]");
3864 else if (vr->type == VR_UNDEFINED)
3865 fprintf (file, "UNDEFINED");
3866 else if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
3867 {
3868 tree type = TREE_TYPE (vr->min);
3869
3870 fprintf (file, "%s[", (vr->type == VR_ANTI_RANGE) ? "~" : "");
3871
3872 if (is_negative_overflow_infinity (vr->min))
3873 fprintf (file, "-INF(OVF)");
3874 else if (INTEGRAL_TYPE_P (type)
3875 && !TYPE_UNSIGNED (type)
3876 && vrp_val_is_min (vr->min))
3877 fprintf (file, "-INF");
3878 else
3879 print_generic_expr (file, vr->min, 0);
3880
3881 fprintf (file, ", ");
3882
3883 if (is_positive_overflow_infinity (vr->max))
3884 fprintf (file, "+INF(OVF)");
3885 else if (INTEGRAL_TYPE_P (type)
3886 && vrp_val_is_max (vr->max))
3887 fprintf (file, "+INF");
3888 else
3889 print_generic_expr (file, vr->max, 0);
3890
3891 fprintf (file, "]");
3892
3893 if (vr->equiv)
3894 {
3895 bitmap_iterator bi;
3896 unsigned i, c = 0;
3897
3898 fprintf (file, " EQUIVALENCES: { ");
3899
3900 EXECUTE_IF_SET_IN_BITMAP (vr->equiv, 0, i, bi)
3901 {
3902 print_generic_expr (file, ssa_name (i), 0);
3903 fprintf (file, " ");
3904 c++;
3905 }
3906
3907 fprintf (file, "} (%u elements)", c);
3908 }
3909 }
3910 else if (vr->type == VR_VARYING)
3911 fprintf (file, "VARYING");
3912 else
3913 fprintf (file, "INVALID RANGE");
3914 }
3915
3916
3917 /* Dump value range VR to stderr. */
3918
3919 DEBUG_FUNCTION void
3920 debug_value_range (value_range_t *vr)
3921 {
3922 dump_value_range (stderr, vr);
3923 fprintf (stderr, "\n");
3924 }
3925
3926
3927 /* Dump value ranges of all SSA_NAMEs to FILE. */
3928
3929 void
3930 dump_all_value_ranges (FILE *file)
3931 {
3932 size_t i;
3933
3934 for (i = 0; i < num_ssa_names; i++)
3935 {
3936 if (vr_value[i])
3937 {
3938 print_generic_expr (file, ssa_name (i), 0);
3939 fprintf (file, ": ");
3940 dump_value_range (file, vr_value[i]);
3941 fprintf (file, "\n");
3942 }
3943 }
3944
3945 fprintf (file, "\n");
3946 }
3947
3948
3949 /* Dump all value ranges to stderr. */
3950
3951 DEBUG_FUNCTION void
3952 debug_all_value_ranges (void)
3953 {
3954 dump_all_value_ranges (stderr);
3955 }
3956
3957
3958 /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
3959 create a new SSA name N and return the assertion assignment
3960 'V = ASSERT_EXPR <V, V OP W>'. */
3961
3962 static gimple
3963 build_assert_expr_for (tree cond, tree v)
3964 {
3965 tree n;
3966 gimple assertion;
3967
3968 gcc_assert (TREE_CODE (v) == SSA_NAME);
3969 n = duplicate_ssa_name (v, NULL);
3970
3971 if (COMPARISON_CLASS_P (cond))
3972 {
3973 tree a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond);
3974 assertion = gimple_build_assign (n, a);
3975 }
3976 else if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
3977 {
3978 /* Given !V, build the assignment N = false. */
3979 tree op0 = TREE_OPERAND (cond, 0);
3980 gcc_assert (op0 == v);
3981 assertion = gimple_build_assign (n, boolean_false_node);
3982 }
3983 else if (TREE_CODE (cond) == SSA_NAME)
3984 {
3985 /* Given V, build the assignment N = true. */
3986 gcc_assert (v == cond);
3987 assertion = gimple_build_assign (n, boolean_true_node);
3988 }
3989 else
3990 gcc_unreachable ();
3991
3992 SSA_NAME_DEF_STMT (n) = assertion;
3993
3994 /* The new ASSERT_EXPR, creates a new SSA name that replaces the
3995 operand of the ASSERT_EXPR. Register the new name and the old one
3996 in the replacement table so that we can fix the SSA web after
3997 adding all the ASSERT_EXPRs. */
3998 register_new_name_mapping (n, v);
3999
4000 return assertion;
4001 }
4002
4003
4004 /* Return false if EXPR is a predicate expression involving floating
4005 point values. */
4006
4007 static inline bool
4008 fp_predicate (gimple stmt)
4009 {
4010 GIMPLE_CHECK (stmt, GIMPLE_COND);
4011
4012 return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)));
4013 }
4014
4015
4016 /* If the range of values taken by OP can be inferred after STMT executes,
4017 return the comparison code (COMP_CODE_P) and value (VAL_P) that
4018 describes the inferred range. Return true if a range could be
4019 inferred. */
4020
4021 static bool
4022 infer_value_range (gimple stmt, tree op, enum tree_code *comp_code_p, tree *val_p)
4023 {
4024 *val_p = NULL_TREE;
4025 *comp_code_p = ERROR_MARK;
4026
4027 /* Do not attempt to infer anything in names that flow through
4028 abnormal edges. */
4029 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op))
4030 return false;
4031
4032 /* Similarly, don't infer anything from statements that may throw
4033 exceptions. */
4034 if (stmt_could_throw_p (stmt))
4035 return false;
4036
4037 /* If STMT is the last statement of a basic block with no
4038 successors, there is no point inferring anything about any of its
4039 operands. We would not be able to find a proper insertion point
4040 for the assertion, anyway. */
4041 if (stmt_ends_bb_p (stmt) && EDGE_COUNT (gimple_bb (stmt)->succs) == 0)
4042 return false;
4043
4044 /* We can only assume that a pointer dereference will yield
4045 non-NULL if -fdelete-null-pointer-checks is enabled. */
4046 if (flag_delete_null_pointer_checks
4047 && POINTER_TYPE_P (TREE_TYPE (op))
4048 && gimple_code (stmt) != GIMPLE_ASM)
4049 {
4050 unsigned num_uses, num_loads, num_stores;
4051
4052 count_uses_and_derefs (op, stmt, &num_uses, &num_loads, &num_stores);
4053 if (num_loads + num_stores > 0)
4054 {
4055 *val_p = build_int_cst (TREE_TYPE (op), 0);
4056 *comp_code_p = NE_EXPR;
4057 return true;
4058 }
4059 }
4060
4061 return false;
4062 }
4063
4064
4065 void dump_asserts_for (FILE *, tree);
4066 void debug_asserts_for (tree);
4067 void dump_all_asserts (FILE *);
4068 void debug_all_asserts (void);
4069
4070 /* Dump all the registered assertions for NAME to FILE. */
4071
4072 void
4073 dump_asserts_for (FILE *file, tree name)
4074 {
4075 assert_locus_t loc;
4076
4077 fprintf (file, "Assertions to be inserted for ");
4078 print_generic_expr (file, name, 0);
4079 fprintf (file, "\n");
4080
4081 loc = asserts_for[SSA_NAME_VERSION (name)];
4082 while (loc)
4083 {
4084 fprintf (file, "\t");
4085 print_gimple_stmt (file, gsi_stmt (loc->si), 0, 0);
4086 fprintf (file, "\n\tBB #%d", loc->bb->index);
4087 if (loc->e)
4088 {
4089 fprintf (file, "\n\tEDGE %d->%d", loc->e->src->index,
4090 loc->e->dest->index);
4091 dump_edge_info (file, loc->e, 0);
4092 }
4093 fprintf (file, "\n\tPREDICATE: ");
4094 print_generic_expr (file, name, 0);
4095 fprintf (file, " %s ", tree_code_name[(int)loc->comp_code]);
4096 print_generic_expr (file, loc->val, 0);
4097 fprintf (file, "\n\n");
4098 loc = loc->next;
4099 }
4100
4101 fprintf (file, "\n");
4102 }
4103
4104
4105 /* Dump all the registered assertions for NAME to stderr. */
4106
4107 DEBUG_FUNCTION void
4108 debug_asserts_for (tree name)
4109 {
4110 dump_asserts_for (stderr, name);
4111 }
4112
4113
4114 /* Dump all the registered assertions for all the names to FILE. */
4115
4116 void
4117 dump_all_asserts (FILE *file)
4118 {
4119 unsigned i;
4120 bitmap_iterator bi;
4121
4122 fprintf (file, "\nASSERT_EXPRs to be inserted\n\n");
4123 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
4124 dump_asserts_for (file, ssa_name (i));
4125 fprintf (file, "\n");
4126 }
4127
4128
4129 /* Dump all the registered assertions for all the names to stderr. */
4130
4131 DEBUG_FUNCTION void
4132 debug_all_asserts (void)
4133 {
4134 dump_all_asserts (stderr);
4135 }
4136
4137
4138 /* If NAME doesn't have an ASSERT_EXPR registered for asserting
4139 'EXPR COMP_CODE VAL' at a location that dominates block BB or
4140 E->DEST, then register this location as a possible insertion point
4141 for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
4142
4143 BB, E and SI provide the exact insertion point for the new
4144 ASSERT_EXPR. If BB is NULL, then the ASSERT_EXPR is to be inserted
4145 on edge E. Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
4146 BB. If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
4147 must not be NULL. */
4148
4149 static void
4150 register_new_assert_for (tree name, tree expr,
4151 enum tree_code comp_code,
4152 tree val,
4153 basic_block bb,
4154 edge e,
4155 gimple_stmt_iterator si)
4156 {
4157 assert_locus_t n, loc, last_loc;
4158 basic_block dest_bb;
4159
4160 gcc_checking_assert (bb == NULL || e == NULL);
4161
4162 if (e == NULL)
4163 gcc_checking_assert (gimple_code (gsi_stmt (si)) != GIMPLE_COND
4164 && gimple_code (gsi_stmt (si)) != GIMPLE_SWITCH);
4165
4166 /* Never build an assert comparing against an integer constant with
4167 TREE_OVERFLOW set. This confuses our undefined overflow warning
4168 machinery. */
4169 if (TREE_CODE (val) == INTEGER_CST
4170 && TREE_OVERFLOW (val))
4171 val = build_int_cst_wide (TREE_TYPE (val),
4172 TREE_INT_CST_LOW (val), TREE_INT_CST_HIGH (val));
4173
4174 /* The new assertion A will be inserted at BB or E. We need to
4175 determine if the new location is dominated by a previously
4176 registered location for A. If we are doing an edge insertion,
4177 assume that A will be inserted at E->DEST. Note that this is not
4178 necessarily true.
4179
4180 If E is a critical edge, it will be split. But even if E is
4181 split, the new block will dominate the same set of blocks that
4182 E->DEST dominates.
4183
4184 The reverse, however, is not true, blocks dominated by E->DEST
4185 will not be dominated by the new block created to split E. So,
4186 if the insertion location is on a critical edge, we will not use
4187 the new location to move another assertion previously registered
4188 at a block dominated by E->DEST. */
4189 dest_bb = (bb) ? bb : e->dest;
4190
4191 /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and
4192 VAL at a block dominating DEST_BB, then we don't need to insert a new
4193 one. Similarly, if the same assertion already exists at a block
4194 dominated by DEST_BB and the new location is not on a critical
4195 edge, then update the existing location for the assertion (i.e.,
4196 move the assertion up in the dominance tree).
4197
4198 Note, this is implemented as a simple linked list because there
4199 should not be more than a handful of assertions registered per
4200 name. If this becomes a performance problem, a table hashed by
4201 COMP_CODE and VAL could be implemented. */
4202 loc = asserts_for[SSA_NAME_VERSION (name)];
4203 last_loc = loc;
4204 while (loc)
4205 {
4206 if (loc->comp_code == comp_code
4207 && (loc->val == val
4208 || operand_equal_p (loc->val, val, 0))
4209 && (loc->expr == expr
4210 || operand_equal_p (loc->expr, expr, 0)))
4211 {
4212 /* If the assertion NAME COMP_CODE VAL has already been
4213 registered at a basic block that dominates DEST_BB, then
4214 we don't need to insert the same assertion again. Note
4215 that we don't check strict dominance here to avoid
4216 replicating the same assertion inside the same basic
4217 block more than once (e.g., when a pointer is
4218 dereferenced several times inside a block).
4219
4220 An exception to this rule are edge insertions. If the
4221 new assertion is to be inserted on edge E, then it will
4222 dominate all the other insertions that we may want to
4223 insert in DEST_BB. So, if we are doing an edge
4224 insertion, don't do this dominance check. */
4225 if (e == NULL
4226 && dominated_by_p (CDI_DOMINATORS, dest_bb, loc->bb))
4227 return;
4228
4229 /* Otherwise, if E is not a critical edge and DEST_BB
4230 dominates the existing location for the assertion, move
4231 the assertion up in the dominance tree by updating its
4232 location information. */
4233 if ((e == NULL || !EDGE_CRITICAL_P (e))
4234 && dominated_by_p (CDI_DOMINATORS, loc->bb, dest_bb))
4235 {
4236 loc->bb = dest_bb;
4237 loc->e = e;
4238 loc->si = si;
4239 return;
4240 }
4241 }
4242
4243 /* Update the last node of the list and move to the next one. */
4244 last_loc = loc;
4245 loc = loc->next;
4246 }
4247
4248 /* If we didn't find an assertion already registered for
4249 NAME COMP_CODE VAL, add a new one at the end of the list of
4250 assertions associated with NAME. */
4251 n = XNEW (struct assert_locus_d);
4252 n->bb = dest_bb;
4253 n->e = e;
4254 n->si = si;
4255 n->comp_code = comp_code;
4256 n->val = val;
4257 n->expr = expr;
4258 n->next = NULL;
4259
4260 if (last_loc)
4261 last_loc->next = n;
4262 else
4263 asserts_for[SSA_NAME_VERSION (name)] = n;
4264
4265 bitmap_set_bit (need_assert_for, SSA_NAME_VERSION (name));
4266 }
4267
4268 /* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME.
4269 Extract a suitable test code and value and store them into *CODE_P and
4270 *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P.
4271
4272 If no extraction was possible, return FALSE, otherwise return TRUE.
4273
4274 If INVERT is true, then we invert the result stored into *CODE_P. */
4275
4276 static bool
4277 extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code,
4278 tree cond_op0, tree cond_op1,
4279 bool invert, enum tree_code *code_p,
4280 tree *val_p)
4281 {
4282 enum tree_code comp_code;
4283 tree val;
4284
4285 /* Otherwise, we have a comparison of the form NAME COMP VAL
4286 or VAL COMP NAME. */
4287 if (name == cond_op1)
4288 {
4289 /* If the predicate is of the form VAL COMP NAME, flip
4290 COMP around because we need to register NAME as the
4291 first operand in the predicate. */
4292 comp_code = swap_tree_comparison (cond_code);
4293 val = cond_op0;
4294 }
4295 else
4296 {
4297 /* The comparison is of the form NAME COMP VAL, so the
4298 comparison code remains unchanged. */
4299 comp_code = cond_code;
4300 val = cond_op1;
4301 }
4302
4303 /* Invert the comparison code as necessary. */
4304 if (invert)
4305 comp_code = invert_tree_comparison (comp_code, 0);
4306
4307 /* VRP does not handle float types. */
4308 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (val)))
4309 return false;
4310
4311 /* Do not register always-false predicates.
4312 FIXME: this works around a limitation in fold() when dealing with
4313 enumerations. Given 'enum { N1, N2 } x;', fold will not
4314 fold 'if (x > N2)' to 'if (0)'. */
4315 if ((comp_code == GT_EXPR || comp_code == LT_EXPR)
4316 && INTEGRAL_TYPE_P (TREE_TYPE (val)))
4317 {
4318 tree min = TYPE_MIN_VALUE (TREE_TYPE (val));
4319 tree max = TYPE_MAX_VALUE (TREE_TYPE (val));
4320
4321 if (comp_code == GT_EXPR
4322 && (!max
4323 || compare_values (val, max) == 0))
4324 return false;
4325
4326 if (comp_code == LT_EXPR
4327 && (!min
4328 || compare_values (val, min) == 0))
4329 return false;
4330 }
4331 *code_p = comp_code;
4332 *val_p = val;
4333 return true;
4334 }
4335
4336 /* Try to register an edge assertion for SSA name NAME on edge E for
4337 the condition COND contributing to the conditional jump pointed to by BSI.
4338 Invert the condition COND if INVERT is true.
4339 Return true if an assertion for NAME could be registered. */
4340
4341 static bool
4342 register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi,
4343 enum tree_code cond_code,
4344 tree cond_op0, tree cond_op1, bool invert)
4345 {
4346 tree val;
4347 enum tree_code comp_code;
4348 bool retval = false;
4349
4350 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
4351 cond_op0,
4352 cond_op1,
4353 invert, &comp_code, &val))
4354 return false;
4355
4356 /* Only register an ASSERT_EXPR if NAME was found in the sub-graph
4357 reachable from E. */
4358 if (live_on_edge (e, name)
4359 && !has_single_use (name))
4360 {
4361 register_new_assert_for (name, name, comp_code, val, NULL, e, bsi);
4362 retval = true;
4363 }
4364
4365 /* In the case of NAME <= CST and NAME being defined as
4366 NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2
4367 and NAME2 <= CST - CST2. We can do the same for NAME > CST.
4368 This catches range and anti-range tests. */
4369 if ((comp_code == LE_EXPR
4370 || comp_code == GT_EXPR)
4371 && TREE_CODE (val) == INTEGER_CST
4372 && TYPE_UNSIGNED (TREE_TYPE (val)))
4373 {
4374 gimple def_stmt = SSA_NAME_DEF_STMT (name);
4375 tree cst2 = NULL_TREE, name2 = NULL_TREE, name3 = NULL_TREE;
4376
4377 /* Extract CST2 from the (optional) addition. */
4378 if (is_gimple_assign (def_stmt)
4379 && gimple_assign_rhs_code (def_stmt) == PLUS_EXPR)
4380 {
4381 name2 = gimple_assign_rhs1 (def_stmt);
4382 cst2 = gimple_assign_rhs2 (def_stmt);
4383 if (TREE_CODE (name2) == SSA_NAME
4384 && TREE_CODE (cst2) == INTEGER_CST)
4385 def_stmt = SSA_NAME_DEF_STMT (name2);
4386 }
4387
4388 /* Extract NAME2 from the (optional) sign-changing cast. */
4389 if (gimple_assign_cast_p (def_stmt))
4390 {
4391 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt))
4392 && ! TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
4393 && (TYPE_PRECISION (gimple_expr_type (def_stmt))
4394 == TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))))
4395 name3 = gimple_assign_rhs1 (def_stmt);
4396 }
4397
4398 /* If name3 is used later, create an ASSERT_EXPR for it. */
4399 if (name3 != NULL_TREE
4400 && TREE_CODE (name3) == SSA_NAME
4401 && (cst2 == NULL_TREE
4402 || TREE_CODE (cst2) == INTEGER_CST)
4403 && INTEGRAL_TYPE_P (TREE_TYPE (name3))
4404 && live_on_edge (e, name3)
4405 && !has_single_use (name3))
4406 {
4407 tree tmp;
4408
4409 /* Build an expression for the range test. */
4410 tmp = build1 (NOP_EXPR, TREE_TYPE (name), name3);
4411 if (cst2 != NULL_TREE)
4412 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
4413
4414 if (dump_file)
4415 {
4416 fprintf (dump_file, "Adding assert for ");
4417 print_generic_expr (dump_file, name3, 0);
4418 fprintf (dump_file, " from ");
4419 print_generic_expr (dump_file, tmp, 0);
4420 fprintf (dump_file, "\n");
4421 }
4422
4423 register_new_assert_for (name3, tmp, comp_code, val, NULL, e, bsi);
4424
4425 retval = true;
4426 }
4427
4428 /* If name2 is used later, create an ASSERT_EXPR for it. */
4429 if (name2 != NULL_TREE
4430 && TREE_CODE (name2) == SSA_NAME
4431 && TREE_CODE (cst2) == INTEGER_CST
4432 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
4433 && live_on_edge (e, name2)
4434 && !has_single_use (name2))
4435 {
4436 tree tmp;
4437
4438 /* Build an expression for the range test. */
4439 tmp = name2;
4440 if (TREE_TYPE (name) != TREE_TYPE (name2))
4441 tmp = build1 (NOP_EXPR, TREE_TYPE (name), tmp);
4442 if (cst2 != NULL_TREE)
4443 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
4444
4445 if (dump_file)
4446 {
4447 fprintf (dump_file, "Adding assert for ");
4448 print_generic_expr (dump_file, name2, 0);
4449 fprintf (dump_file, " from ");
4450 print_generic_expr (dump_file, tmp, 0);
4451 fprintf (dump_file, "\n");
4452 }
4453
4454 register_new_assert_for (name2, tmp, comp_code, val, NULL, e, bsi);
4455
4456 retval = true;
4457 }
4458 }
4459
4460 return retval;
4461 }
4462
4463 /* OP is an operand of a truth value expression which is known to have
4464 a particular value. Register any asserts for OP and for any
4465 operands in OP's defining statement.
4466
4467 If CODE is EQ_EXPR, then we want to register OP is zero (false),
4468 if CODE is NE_EXPR, then we want to register OP is nonzero (true). */
4469
4470 static bool
4471 register_edge_assert_for_1 (tree op, enum tree_code code,
4472 edge e, gimple_stmt_iterator bsi)
4473 {
4474 bool retval = false;
4475 gimple op_def;
4476 tree val;
4477 enum tree_code rhs_code;
4478
4479 /* We only care about SSA_NAMEs. */
4480 if (TREE_CODE (op) != SSA_NAME)
4481 return false;
4482
4483 /* We know that OP will have a zero or nonzero value. If OP is used
4484 more than once go ahead and register an assert for OP.
4485
4486 The FOUND_IN_SUBGRAPH support is not helpful in this situation as
4487 it will always be set for OP (because OP is used in a COND_EXPR in
4488 the subgraph). */
4489 if (!has_single_use (op))
4490 {
4491 val = build_int_cst (TREE_TYPE (op), 0);
4492 register_new_assert_for (op, op, code, val, NULL, e, bsi);
4493 retval = true;
4494 }
4495
4496 /* Now look at how OP is set. If it's set from a comparison,
4497 a truth operation or some bit operations, then we may be able
4498 to register information about the operands of that assignment. */
4499 op_def = SSA_NAME_DEF_STMT (op);
4500 if (gimple_code (op_def) != GIMPLE_ASSIGN)
4501 return retval;
4502
4503 rhs_code = gimple_assign_rhs_code (op_def);
4504
4505 if (TREE_CODE_CLASS (rhs_code) == tcc_comparison)
4506 {
4507 bool invert = (code == EQ_EXPR ? true : false);
4508 tree op0 = gimple_assign_rhs1 (op_def);
4509 tree op1 = gimple_assign_rhs2 (op_def);
4510
4511 if (TREE_CODE (op0) == SSA_NAME)
4512 retval |= register_edge_assert_for_2 (op0, e, bsi, rhs_code, op0, op1,
4513 invert);
4514 if (TREE_CODE (op1) == SSA_NAME)
4515 retval |= register_edge_assert_for_2 (op1, e, bsi, rhs_code, op0, op1,
4516 invert);
4517 }
4518 else if ((code == NE_EXPR
4519 && (gimple_assign_rhs_code (op_def) == TRUTH_AND_EXPR
4520 || gimple_assign_rhs_code (op_def) == BIT_AND_EXPR))
4521 || (code == EQ_EXPR
4522 && (gimple_assign_rhs_code (op_def) == TRUTH_OR_EXPR
4523 || gimple_assign_rhs_code (op_def) == BIT_IOR_EXPR)))
4524 {
4525 /* Recurse on each operand. */
4526 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def),
4527 code, e, bsi);
4528 retval |= register_edge_assert_for_1 (gimple_assign_rhs2 (op_def),
4529 code, e, bsi);
4530 }
4531 else if (gimple_assign_rhs_code (op_def) == TRUTH_NOT_EXPR)
4532 {
4533 /* Recurse, flipping CODE. */
4534 code = invert_tree_comparison (code, false);
4535 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def),
4536 code, e, bsi);
4537 }
4538 else if (gimple_assign_rhs_code (op_def) == SSA_NAME)
4539 {
4540 /* Recurse through the copy. */
4541 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def),
4542 code, e, bsi);
4543 }
4544 else if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (op_def)))
4545 {
4546 /* Recurse through the type conversion. */
4547 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def),
4548 code, e, bsi);
4549 }
4550
4551 return retval;
4552 }
4553
4554 /* Try to register an edge assertion for SSA name NAME on edge E for
4555 the condition COND contributing to the conditional jump pointed to by SI.
4556 Return true if an assertion for NAME could be registered. */
4557
4558 static bool
4559 register_edge_assert_for (tree name, edge e, gimple_stmt_iterator si,
4560 enum tree_code cond_code, tree cond_op0,
4561 tree cond_op1)
4562 {
4563 tree val;
4564 enum tree_code comp_code;
4565 bool retval = false;
4566 bool is_else_edge = (e->flags & EDGE_FALSE_VALUE) != 0;
4567
4568 /* Do not attempt to infer anything in names that flow through
4569 abnormal edges. */
4570 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
4571 return false;
4572
4573 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
4574 cond_op0, cond_op1,
4575 is_else_edge,
4576 &comp_code, &val))
4577 return false;
4578
4579 /* Register ASSERT_EXPRs for name. */
4580 retval |= register_edge_assert_for_2 (name, e, si, cond_code, cond_op0,
4581 cond_op1, is_else_edge);
4582
4583
4584 /* If COND is effectively an equality test of an SSA_NAME against
4585 the value zero or one, then we may be able to assert values
4586 for SSA_NAMEs which flow into COND. */
4587
4588 /* In the case of NAME == 1 or NAME != 0, for TRUTH_AND_EXPR defining
4589 statement of NAME we can assert both operands of the TRUTH_AND_EXPR
4590 have nonzero value. */
4591 if (((comp_code == EQ_EXPR && integer_onep (val))
4592 || (comp_code == NE_EXPR && integer_zerop (val))))
4593 {
4594 gimple def_stmt = SSA_NAME_DEF_STMT (name);
4595
4596 if (is_gimple_assign (def_stmt)
4597 && (gimple_assign_rhs_code (def_stmt) == TRUTH_AND_EXPR
4598 || gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR))
4599 {
4600 tree op0 = gimple_assign_rhs1 (def_stmt);
4601 tree op1 = gimple_assign_rhs2 (def_stmt);
4602 retval |= register_edge_assert_for_1 (op0, NE_EXPR, e, si);
4603 retval |= register_edge_assert_for_1 (op1, NE_EXPR, e, si);
4604 }
4605 }
4606
4607 /* In the case of NAME == 0 or NAME != 1, for TRUTH_OR_EXPR defining
4608 statement of NAME we can assert both operands of the TRUTH_OR_EXPR
4609 have zero value. */
4610 if (((comp_code == EQ_EXPR && integer_zerop (val))
4611 || (comp_code == NE_EXPR && integer_onep (val))))
4612 {
4613 gimple def_stmt = SSA_NAME_DEF_STMT (name);
4614
4615 if (is_gimple_assign (def_stmt)
4616 && (gimple_assign_rhs_code (def_stmt) == TRUTH_OR_EXPR
4617 /* For BIT_IOR_EXPR only if NAME == 0 both operands have
4618 necessarily zero value. */
4619 || (comp_code == EQ_EXPR
4620 && (gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR))))
4621 {
4622 tree op0 = gimple_assign_rhs1 (def_stmt);
4623 tree op1 = gimple_assign_rhs2 (def_stmt);
4624 retval |= register_edge_assert_for_1 (op0, EQ_EXPR, e, si);
4625 retval |= register_edge_assert_for_1 (op1, EQ_EXPR, e, si);
4626 }
4627 }
4628
4629 return retval;
4630 }
4631
4632
4633 /* Determine whether the outgoing edges of BB should receive an
4634 ASSERT_EXPR for each of the operands of BB's LAST statement.
4635 The last statement of BB must be a COND_EXPR.
4636
4637 If any of the sub-graphs rooted at BB have an interesting use of
4638 the predicate operands, an assert location node is added to the
4639 list of assertions for the corresponding operands. */
4640
4641 static bool
4642 find_conditional_asserts (basic_block bb, gimple last)
4643 {
4644 bool need_assert;
4645 gimple_stmt_iterator bsi;
4646 tree op;
4647 edge_iterator ei;
4648 edge e;
4649 ssa_op_iter iter;
4650
4651 need_assert = false;
4652 bsi = gsi_for_stmt (last);
4653
4654 /* Look for uses of the operands in each of the sub-graphs
4655 rooted at BB. We need to check each of the outgoing edges
4656 separately, so that we know what kind of ASSERT_EXPR to
4657 insert. */
4658 FOR_EACH_EDGE (e, ei, bb->succs)
4659 {
4660 if (e->dest == bb)
4661 continue;
4662
4663 /* Register the necessary assertions for each operand in the
4664 conditional predicate. */
4665 FOR_EACH_SSA_TREE_OPERAND (op, last, iter, SSA_OP_USE)
4666 {
4667 need_assert |= register_edge_assert_for (op, e, bsi,
4668 gimple_cond_code (last),
4669 gimple_cond_lhs (last),
4670 gimple_cond_rhs (last));
4671 }
4672 }
4673
4674 return need_assert;
4675 }
4676
4677 struct case_info
4678 {
4679 tree expr;
4680 basic_block bb;
4681 };
4682
4683 /* Compare two case labels sorting first by the destination bb index
4684 and then by the case value. */
4685
4686 static int
4687 compare_case_labels (const void *p1, const void *p2)
4688 {
4689 const struct case_info *ci1 = (const struct case_info *) p1;
4690 const struct case_info *ci2 = (const struct case_info *) p2;
4691 int idx1 = ci1->bb->index;
4692 int idx2 = ci2->bb->index;
4693
4694 if (idx1 < idx2)
4695 return -1;
4696 else if (idx1 == idx2)
4697 {
4698 /* Make sure the default label is first in a group. */
4699 if (!CASE_LOW (ci1->expr))
4700 return -1;
4701 else if (!CASE_LOW (ci2->expr))
4702 return 1;
4703 else
4704 return tree_int_cst_compare (CASE_LOW (ci1->expr),
4705 CASE_LOW (ci2->expr));
4706 }
4707 else
4708 return 1;
4709 }
4710
4711 /* Determine whether the outgoing edges of BB should receive an
4712 ASSERT_EXPR for each of the operands of BB's LAST statement.
4713 The last statement of BB must be a SWITCH_EXPR.
4714
4715 If any of the sub-graphs rooted at BB have an interesting use of
4716 the predicate operands, an assert location node is added to the
4717 list of assertions for the corresponding operands. */
4718
4719 static bool
4720 find_switch_asserts (basic_block bb, gimple last)
4721 {
4722 bool need_assert;
4723 gimple_stmt_iterator bsi;
4724 tree op;
4725 edge e;
4726 struct case_info *ci;
4727 size_t n = gimple_switch_num_labels (last);
4728 #if GCC_VERSION >= 4000
4729 unsigned int idx;
4730 #else
4731 /* Work around GCC 3.4 bug (PR 37086). */
4732 volatile unsigned int idx;
4733 #endif
4734
4735 need_assert = false;
4736 bsi = gsi_for_stmt (last);
4737 op = gimple_switch_index (last);
4738 if (TREE_CODE (op) != SSA_NAME)
4739 return false;
4740
4741 /* Build a vector of case labels sorted by destination label. */
4742 ci = XNEWVEC (struct case_info, n);
4743 for (idx = 0; idx < n; ++idx)
4744 {
4745 ci[idx].expr = gimple_switch_label (last, idx);
4746 ci[idx].bb = label_to_block (CASE_LABEL (ci[idx].expr));
4747 }
4748 qsort (ci, n, sizeof (struct case_info), compare_case_labels);
4749
4750 for (idx = 0; idx < n; ++idx)
4751 {
4752 tree min, max;
4753 tree cl = ci[idx].expr;
4754 basic_block cbb = ci[idx].bb;
4755
4756 min = CASE_LOW (cl);
4757 max = CASE_HIGH (cl);
4758
4759 /* If there are multiple case labels with the same destination
4760 we need to combine them to a single value range for the edge. */
4761 if (idx + 1 < n && cbb == ci[idx + 1].bb)
4762 {
4763 /* Skip labels until the last of the group. */
4764 do {
4765 ++idx;
4766 } while (idx < n && cbb == ci[idx].bb);
4767 --idx;
4768
4769 /* Pick up the maximum of the case label range. */
4770 if (CASE_HIGH (ci[idx].expr))
4771 max = CASE_HIGH (ci[idx].expr);
4772 else
4773 max = CASE_LOW (ci[idx].expr);
4774 }
4775
4776 /* Nothing to do if the range includes the default label until we
4777 can register anti-ranges. */
4778 if (min == NULL_TREE)
4779 continue;
4780
4781 /* Find the edge to register the assert expr on. */
4782 e = find_edge (bb, cbb);
4783
4784 /* Register the necessary assertions for the operand in the
4785 SWITCH_EXPR. */
4786 need_assert |= register_edge_assert_for (op, e, bsi,
4787 max ? GE_EXPR : EQ_EXPR,
4788 op,
4789 fold_convert (TREE_TYPE (op),
4790 min));
4791 if (max)
4792 {
4793 need_assert |= register_edge_assert_for (op, e, bsi, LE_EXPR,
4794 op,
4795 fold_convert (TREE_TYPE (op),
4796 max));
4797 }
4798 }
4799
4800 XDELETEVEC (ci);
4801 return need_assert;
4802 }
4803
4804
4805 /* Traverse all the statements in block BB looking for statements that
4806 may generate useful assertions for the SSA names in their operand.
4807 If a statement produces a useful assertion A for name N_i, then the
4808 list of assertions already generated for N_i is scanned to
4809 determine if A is actually needed.
4810
4811 If N_i already had the assertion A at a location dominating the
4812 current location, then nothing needs to be done. Otherwise, the
4813 new location for A is recorded instead.
4814
4815 1- For every statement S in BB, all the variables used by S are
4816 added to bitmap FOUND_IN_SUBGRAPH.
4817
4818 2- If statement S uses an operand N in a way that exposes a known
4819 value range for N, then if N was not already generated by an
4820 ASSERT_EXPR, create a new assert location for N. For instance,
4821 if N is a pointer and the statement dereferences it, we can
4822 assume that N is not NULL.
4823
4824 3- COND_EXPRs are a special case of #2. We can derive range
4825 information from the predicate but need to insert different
4826 ASSERT_EXPRs for each of the sub-graphs rooted at the
4827 conditional block. If the last statement of BB is a conditional
4828 expression of the form 'X op Y', then
4829
4830 a) Remove X and Y from the set FOUND_IN_SUBGRAPH.
4831
4832 b) If the conditional is the only entry point to the sub-graph
4833 corresponding to the THEN_CLAUSE, recurse into it. On
4834 return, if X and/or Y are marked in FOUND_IN_SUBGRAPH, then
4835 an ASSERT_EXPR is added for the corresponding variable.
4836
4837 c) Repeat step (b) on the ELSE_CLAUSE.
4838
4839 d) Mark X and Y in FOUND_IN_SUBGRAPH.
4840
4841 For instance,
4842
4843 if (a == 9)
4844 b = a;
4845 else
4846 b = c + 1;
4847
4848 In this case, an assertion on the THEN clause is useful to
4849 determine that 'a' is always 9 on that edge. However, an assertion
4850 on the ELSE clause would be unnecessary.
4851
4852 4- If BB does not end in a conditional expression, then we recurse
4853 into BB's dominator children.
4854
4855 At the end of the recursive traversal, every SSA name will have a
4856 list of locations where ASSERT_EXPRs should be added. When a new
4857 location for name N is found, it is registered by calling
4858 register_new_assert_for. That function keeps track of all the
4859 registered assertions to prevent adding unnecessary assertions.
4860 For instance, if a pointer P_4 is dereferenced more than once in a
4861 dominator tree, only the location dominating all the dereference of
4862 P_4 will receive an ASSERT_EXPR.
4863
4864 If this function returns true, then it means that there are names
4865 for which we need to generate ASSERT_EXPRs. Those assertions are
4866 inserted by process_assert_insertions. */
4867
4868 static bool
4869 find_assert_locations_1 (basic_block bb, sbitmap live)
4870 {
4871 gimple_stmt_iterator si;
4872 gimple last;
4873 gimple phi;
4874 bool need_assert;
4875
4876 need_assert = false;
4877 last = last_stmt (bb);
4878
4879 /* If BB's last statement is a conditional statement involving integer
4880 operands, determine if we need to add ASSERT_EXPRs. */
4881 if (last
4882 && gimple_code (last) == GIMPLE_COND
4883 && !fp_predicate (last)
4884 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
4885 need_assert |= find_conditional_asserts (bb, last);
4886
4887 /* If BB's last statement is a switch statement involving integer
4888 operands, determine if we need to add ASSERT_EXPRs. */
4889 if (last
4890 && gimple_code (last) == GIMPLE_SWITCH
4891 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
4892 need_assert |= find_switch_asserts (bb, last);
4893
4894 /* Traverse all the statements in BB marking used names and looking
4895 for statements that may infer assertions for their used operands. */
4896 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
4897 {
4898 gimple stmt;
4899 tree op;
4900 ssa_op_iter i;
4901
4902 stmt = gsi_stmt (si);
4903
4904 if (is_gimple_debug (stmt))
4905 continue;
4906
4907 /* See if we can derive an assertion for any of STMT's operands. */
4908 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
4909 {
4910 tree value;
4911 enum tree_code comp_code;
4912
4913 /* Mark OP in our live bitmap. */
4914 SET_BIT (live, SSA_NAME_VERSION (op));
4915
4916 /* If OP is used in such a way that we can infer a value
4917 range for it, and we don't find a previous assertion for
4918 it, create a new assertion location node for OP. */
4919 if (infer_value_range (stmt, op, &comp_code, &value))
4920 {
4921 /* If we are able to infer a nonzero value range for OP,
4922 then walk backwards through the use-def chain to see if OP
4923 was set via a typecast.
4924
4925 If so, then we can also infer a nonzero value range
4926 for the operand of the NOP_EXPR. */
4927 if (comp_code == NE_EXPR && integer_zerop (value))
4928 {
4929 tree t = op;
4930 gimple def_stmt = SSA_NAME_DEF_STMT (t);
4931
4932 while (is_gimple_assign (def_stmt)
4933 && gimple_assign_rhs_code (def_stmt) == NOP_EXPR
4934 && TREE_CODE
4935 (gimple_assign_rhs1 (def_stmt)) == SSA_NAME
4936 && POINTER_TYPE_P
4937 (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))
4938 {
4939 t = gimple_assign_rhs1 (def_stmt);
4940 def_stmt = SSA_NAME_DEF_STMT (t);
4941
4942 /* Note we want to register the assert for the
4943 operand of the NOP_EXPR after SI, not after the
4944 conversion. */
4945 if (! has_single_use (t))
4946 {
4947 register_new_assert_for (t, t, comp_code, value,
4948 bb, NULL, si);
4949 need_assert = true;
4950 }
4951 }
4952 }
4953
4954 /* If OP is used only once, namely in this STMT, don't
4955 bother creating an ASSERT_EXPR for it. Such an
4956 ASSERT_EXPR would do nothing but increase compile time. */
4957 if (!has_single_use (op))
4958 {
4959 register_new_assert_for (op, op, comp_code, value,
4960 bb, NULL, si);
4961 need_assert = true;
4962 }
4963 }
4964 }
4965 }
4966
4967 /* Traverse all PHI nodes in BB marking used operands. */
4968 for (si = gsi_start_phis (bb); !gsi_end_p(si); gsi_next (&si))
4969 {
4970 use_operand_p arg_p;
4971 ssa_op_iter i;
4972 phi = gsi_stmt (si);
4973
4974 FOR_EACH_PHI_ARG (arg_p, phi, i, SSA_OP_USE)
4975 {
4976 tree arg = USE_FROM_PTR (arg_p);
4977 if (TREE_CODE (arg) == SSA_NAME)
4978 SET_BIT (live, SSA_NAME_VERSION (arg));
4979 }
4980 }
4981
4982 return need_assert;
4983 }
4984
4985 /* Do an RPO walk over the function computing SSA name liveness
4986 on-the-fly and deciding on assert expressions to insert.
4987 Returns true if there are assert expressions to be inserted. */
4988
4989 static bool
4990 find_assert_locations (void)
4991 {
4992 int *rpo = XCNEWVEC (int, last_basic_block + NUM_FIXED_BLOCKS);
4993 int *bb_rpo = XCNEWVEC (int, last_basic_block + NUM_FIXED_BLOCKS);
4994 int *last_rpo = XCNEWVEC (int, last_basic_block + NUM_FIXED_BLOCKS);
4995 int rpo_cnt, i;
4996 bool need_asserts;
4997
4998 live = XCNEWVEC (sbitmap, last_basic_block + NUM_FIXED_BLOCKS);
4999 rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
5000 for (i = 0; i < rpo_cnt; ++i)
5001 bb_rpo[rpo[i]] = i;
5002
5003 need_asserts = false;
5004 for (i = rpo_cnt-1; i >= 0; --i)
5005 {
5006 basic_block bb = BASIC_BLOCK (rpo[i]);
5007 edge e;
5008 edge_iterator ei;
5009
5010 if (!live[rpo[i]])
5011 {
5012 live[rpo[i]] = sbitmap_alloc (num_ssa_names);
5013 sbitmap_zero (live[rpo[i]]);
5014 }
5015
5016 /* Process BB and update the live information with uses in
5017 this block. */
5018 need_asserts |= find_assert_locations_1 (bb, live[rpo[i]]);
5019
5020 /* Merge liveness into the predecessor blocks and free it. */
5021 if (!sbitmap_empty_p (live[rpo[i]]))
5022 {
5023 int pred_rpo = i;
5024 FOR_EACH_EDGE (e, ei, bb->preds)
5025 {
5026 int pred = e->src->index;
5027 if (e->flags & EDGE_DFS_BACK)
5028 continue;
5029
5030 if (!live[pred])
5031 {
5032 live[pred] = sbitmap_alloc (num_ssa_names);
5033 sbitmap_zero (live[pred]);
5034 }
5035 sbitmap_a_or_b (live[pred], live[pred], live[rpo[i]]);
5036
5037 if (bb_rpo[pred] < pred_rpo)
5038 pred_rpo = bb_rpo[pred];
5039 }
5040
5041 /* Record the RPO number of the last visited block that needs
5042 live information from this block. */
5043 last_rpo[rpo[i]] = pred_rpo;
5044 }
5045 else
5046 {
5047 sbitmap_free (live[rpo[i]]);
5048 live[rpo[i]] = NULL;
5049 }
5050
5051 /* We can free all successors live bitmaps if all their
5052 predecessors have been visited already. */
5053 FOR_EACH_EDGE (e, ei, bb->succs)
5054 if (last_rpo[e->dest->index] == i
5055 && live[e->dest->index])
5056 {
5057 sbitmap_free (live[e->dest->index]);
5058 live[e->dest->index] = NULL;
5059 }
5060 }
5061
5062 XDELETEVEC (rpo);
5063 XDELETEVEC (bb_rpo);
5064 XDELETEVEC (last_rpo);
5065 for (i = 0; i < last_basic_block + NUM_FIXED_BLOCKS; ++i)
5066 if (live[i])
5067 sbitmap_free (live[i]);
5068 XDELETEVEC (live);
5069
5070 return need_asserts;
5071 }
5072
5073 /* Create an ASSERT_EXPR for NAME and insert it in the location
5074 indicated by LOC. Return true if we made any edge insertions. */
5075
5076 static bool
5077 process_assert_insertions_for (tree name, assert_locus_t loc)
5078 {
5079 /* Build the comparison expression NAME_i COMP_CODE VAL. */
5080 gimple stmt;
5081 tree cond;
5082 gimple assert_stmt;
5083 edge_iterator ei;
5084 edge e;
5085
5086 /* If we have X <=> X do not insert an assert expr for that. */
5087 if (loc->expr == loc->val)
5088 return false;
5089
5090 cond = build2 (loc->comp_code, boolean_type_node, loc->expr, loc->val);
5091 assert_stmt = build_assert_expr_for (cond, name);
5092 if (loc->e)
5093 {
5094 /* We have been asked to insert the assertion on an edge. This
5095 is used only by COND_EXPR and SWITCH_EXPR assertions. */
5096 gcc_checking_assert (gimple_code (gsi_stmt (loc->si)) == GIMPLE_COND
5097 || (gimple_code (gsi_stmt (loc->si))
5098 == GIMPLE_SWITCH));
5099
5100 gsi_insert_on_edge (loc->e, assert_stmt);
5101 return true;
5102 }
5103
5104 /* Otherwise, we can insert right after LOC->SI iff the
5105 statement must not be the last statement in the block. */
5106 stmt = gsi_stmt (loc->si);
5107 if (!stmt_ends_bb_p (stmt))
5108 {
5109 gsi_insert_after (&loc->si, assert_stmt, GSI_SAME_STMT);
5110 return false;
5111 }
5112
5113 /* If STMT must be the last statement in BB, we can only insert new
5114 assertions on the non-abnormal edge out of BB. Note that since
5115 STMT is not control flow, there may only be one non-abnormal edge
5116 out of BB. */
5117 FOR_EACH_EDGE (e, ei, loc->bb->succs)
5118 if (!(e->flags & EDGE_ABNORMAL))
5119 {
5120 gsi_insert_on_edge (e, assert_stmt);
5121 return true;
5122 }
5123
5124 gcc_unreachable ();
5125 }
5126
5127
5128 /* Process all the insertions registered for every name N_i registered
5129 in NEED_ASSERT_FOR. The list of assertions to be inserted are
5130 found in ASSERTS_FOR[i]. */
5131
5132 static void
5133 process_assert_insertions (void)
5134 {
5135 unsigned i;
5136 bitmap_iterator bi;
5137 bool update_edges_p = false;
5138 int num_asserts = 0;
5139
5140 if (dump_file && (dump_flags & TDF_DETAILS))
5141 dump_all_asserts (dump_file);
5142
5143 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
5144 {
5145 assert_locus_t loc = asserts_for[i];
5146 gcc_assert (loc);
5147
5148 while (loc)
5149 {
5150 assert_locus_t next = loc->next;
5151 update_edges_p |= process_assert_insertions_for (ssa_name (i), loc);
5152 free (loc);
5153 loc = next;
5154 num_asserts++;
5155 }
5156 }
5157
5158 if (update_edges_p)
5159 gsi_commit_edge_inserts ();
5160
5161 statistics_counter_event (cfun, "Number of ASSERT_EXPR expressions inserted",
5162 num_asserts);
5163 }
5164
5165
5166 /* Traverse the flowgraph looking for conditional jumps to insert range
5167 expressions. These range expressions are meant to provide information
5168 to optimizations that need to reason in terms of value ranges. They
5169 will not be expanded into RTL. For instance, given:
5170
5171 x = ...
5172 y = ...
5173 if (x < y)
5174 y = x - 2;
5175 else
5176 x = y + 3;
5177
5178 this pass will transform the code into:
5179
5180 x = ...
5181 y = ...
5182 if (x < y)
5183 {
5184 x = ASSERT_EXPR <x, x < y>
5185 y = x - 2
5186 }
5187 else
5188 {
5189 y = ASSERT_EXPR <y, x <= y>
5190 x = y + 3
5191 }
5192
5193 The idea is that once copy and constant propagation have run, other
5194 optimizations will be able to determine what ranges of values can 'x'
5195 take in different paths of the code, simply by checking the reaching
5196 definition of 'x'. */
5197
5198 static void
5199 insert_range_assertions (void)
5200 {
5201 need_assert_for = BITMAP_ALLOC (NULL);
5202 asserts_for = XCNEWVEC (assert_locus_t, num_ssa_names);
5203
5204 calculate_dominance_info (CDI_DOMINATORS);
5205
5206 if (find_assert_locations ())
5207 {
5208 process_assert_insertions ();
5209 update_ssa (TODO_update_ssa_no_phi);
5210 }
5211
5212 if (dump_file && (dump_flags & TDF_DETAILS))
5213 {
5214 fprintf (dump_file, "\nSSA form after inserting ASSERT_EXPRs\n");
5215 dump_function_to_file (current_function_decl, dump_file, dump_flags);
5216 }
5217
5218 free (asserts_for);
5219 BITMAP_FREE (need_assert_for);
5220 }
5221
5222 /* Checks one ARRAY_REF in REF, located at LOCUS. Ignores flexible arrays
5223 and "struct" hacks. If VRP can determine that the
5224 array subscript is a constant, check if it is outside valid
5225 range. If the array subscript is a RANGE, warn if it is
5226 non-overlapping with valid range.
5227 IGNORE_OFF_BY_ONE is true if the ARRAY_REF is inside a ADDR_EXPR. */
5228
5229 static void
5230 check_array_ref (location_t location, tree ref, bool ignore_off_by_one)
5231 {
5232 value_range_t* vr = NULL;
5233 tree low_sub, up_sub;
5234 tree low_bound, up_bound, up_bound_p1;
5235 tree base;
5236
5237 if (TREE_NO_WARNING (ref))
5238 return;
5239
5240 low_sub = up_sub = TREE_OPERAND (ref, 1);
5241 up_bound = array_ref_up_bound (ref);
5242
5243 /* Can not check flexible arrays. */
5244 if (!up_bound
5245 || TREE_CODE (up_bound) != INTEGER_CST)
5246 return;
5247
5248 /* Accesses to trailing arrays via pointers may access storage
5249 beyond the types array bounds. */
5250 base = get_base_address (ref);
5251 if (base && TREE_CODE (base) == MEM_REF)
5252 {
5253 tree cref, next = NULL_TREE;
5254
5255 if (TREE_CODE (TREE_OPERAND (ref, 0)) != COMPONENT_REF)
5256 return;
5257
5258 cref = TREE_OPERAND (ref, 0);
5259 if (TREE_CODE (TREE_TYPE (TREE_OPERAND (cref, 0))) == RECORD_TYPE)
5260 for (next = DECL_CHAIN (TREE_OPERAND (cref, 1));
5261 next && TREE_CODE (next) != FIELD_DECL;
5262 next = DECL_CHAIN (next))
5263 ;
5264
5265 /* If this is the last field in a struct type or a field in a
5266 union type do not warn. */
5267 if (!next)
5268 return;
5269 }
5270
5271 low_bound = array_ref_low_bound (ref);
5272 up_bound_p1 = int_const_binop (PLUS_EXPR, up_bound, integer_one_node);
5273
5274 if (TREE_CODE (low_sub) == SSA_NAME)
5275 {
5276 vr = get_value_range (low_sub);
5277 if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
5278 {
5279 low_sub = vr->type == VR_RANGE ? vr->max : vr->min;
5280 up_sub = vr->type == VR_RANGE ? vr->min : vr->max;
5281 }
5282 }
5283
5284 if (vr && vr->type == VR_ANTI_RANGE)
5285 {
5286 if (TREE_CODE (up_sub) == INTEGER_CST
5287 && tree_int_cst_lt (up_bound, up_sub)
5288 && TREE_CODE (low_sub) == INTEGER_CST
5289 && tree_int_cst_lt (low_sub, low_bound))
5290 {
5291 warning_at (location, OPT_Warray_bounds,
5292 "array subscript is outside array bounds");
5293 TREE_NO_WARNING (ref) = 1;
5294 }
5295 }
5296 else if (TREE_CODE (up_sub) == INTEGER_CST
5297 && (ignore_off_by_one
5298 ? (tree_int_cst_lt (up_bound, up_sub)
5299 && !tree_int_cst_equal (up_bound_p1, up_sub))
5300 : (tree_int_cst_lt (up_bound, up_sub)
5301 || tree_int_cst_equal (up_bound_p1, up_sub))))
5302 {
5303 warning_at (location, OPT_Warray_bounds,
5304 "array subscript is above array bounds");
5305 TREE_NO_WARNING (ref) = 1;
5306 }
5307 else if (TREE_CODE (low_sub) == INTEGER_CST
5308 && tree_int_cst_lt (low_sub, low_bound))
5309 {
5310 warning_at (location, OPT_Warray_bounds,
5311 "array subscript is below array bounds");
5312 TREE_NO_WARNING (ref) = 1;
5313 }
5314 }
5315
5316 /* Searches if the expr T, located at LOCATION computes
5317 address of an ARRAY_REF, and call check_array_ref on it. */
5318
5319 static void
5320 search_for_addr_array (tree t, location_t location)
5321 {
5322 while (TREE_CODE (t) == SSA_NAME)
5323 {
5324 gimple g = SSA_NAME_DEF_STMT (t);
5325
5326 if (gimple_code (g) != GIMPLE_ASSIGN)
5327 return;
5328
5329 if (get_gimple_rhs_class (gimple_assign_rhs_code (g))
5330 != GIMPLE_SINGLE_RHS)
5331 return;
5332
5333 t = gimple_assign_rhs1 (g);
5334 }
5335
5336
5337 /* We are only interested in addresses of ARRAY_REF's. */
5338 if (TREE_CODE (t) != ADDR_EXPR)
5339 return;
5340
5341 /* Check each ARRAY_REFs in the reference chain. */
5342 do
5343 {
5344 if (TREE_CODE (t) == ARRAY_REF)
5345 check_array_ref (location, t, true /*ignore_off_by_one*/);
5346
5347 t = TREE_OPERAND (t, 0);
5348 }
5349 while (handled_component_p (t));
5350
5351 if (TREE_CODE (t) == MEM_REF
5352 && TREE_CODE (TREE_OPERAND (t, 0)) == ADDR_EXPR
5353 && !TREE_NO_WARNING (t))
5354 {
5355 tree tem = TREE_OPERAND (TREE_OPERAND (t, 0), 0);
5356 tree low_bound, up_bound, el_sz;
5357 double_int idx;
5358 if (TREE_CODE (TREE_TYPE (tem)) != ARRAY_TYPE
5359 || TREE_CODE (TREE_TYPE (TREE_TYPE (tem))) == ARRAY_TYPE
5360 || !TYPE_DOMAIN (TREE_TYPE (tem)))
5361 return;
5362
5363 low_bound = TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
5364 up_bound = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
5365 el_sz = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (tem)));
5366 if (!low_bound
5367 || TREE_CODE (low_bound) != INTEGER_CST
5368 || !up_bound
5369 || TREE_CODE (up_bound) != INTEGER_CST
5370 || !el_sz
5371 || TREE_CODE (el_sz) != INTEGER_CST)
5372 return;
5373
5374 idx = mem_ref_offset (t);
5375 idx = double_int_sdiv (idx, tree_to_double_int (el_sz), TRUNC_DIV_EXPR);
5376 if (double_int_scmp (idx, double_int_zero) < 0)
5377 {
5378 warning_at (location, OPT_Warray_bounds,
5379 "array subscript is below array bounds");
5380 TREE_NO_WARNING (t) = 1;
5381 }
5382 else if (double_int_scmp (idx,
5383 double_int_add
5384 (double_int_add
5385 (tree_to_double_int (up_bound),
5386 double_int_neg
5387 (tree_to_double_int (low_bound))),
5388 double_int_one)) > 0)
5389 {
5390 warning_at (location, OPT_Warray_bounds,
5391 "array subscript is above array bounds");
5392 TREE_NO_WARNING (t) = 1;
5393 }
5394 }
5395 }
5396
5397 /* walk_tree() callback that checks if *TP is
5398 an ARRAY_REF inside an ADDR_EXPR (in which an array
5399 subscript one outside the valid range is allowed). Call
5400 check_array_ref for each ARRAY_REF found. The location is
5401 passed in DATA. */
5402
5403 static tree
5404 check_array_bounds (tree *tp, int *walk_subtree, void *data)
5405 {
5406 tree t = *tp;
5407 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
5408 location_t location;
5409
5410 if (EXPR_HAS_LOCATION (t))
5411 location = EXPR_LOCATION (t);
5412 else
5413 {
5414 location_t *locp = (location_t *) wi->info;
5415 location = *locp;
5416 }
5417
5418 *walk_subtree = TRUE;
5419
5420 if (TREE_CODE (t) == ARRAY_REF)
5421 check_array_ref (location, t, false /*ignore_off_by_one*/);
5422
5423 if (TREE_CODE (t) == MEM_REF
5424 || (TREE_CODE (t) == RETURN_EXPR && TREE_OPERAND (t, 0)))
5425 search_for_addr_array (TREE_OPERAND (t, 0), location);
5426
5427 if (TREE_CODE (t) == ADDR_EXPR)
5428 *walk_subtree = FALSE;
5429
5430 return NULL_TREE;
5431 }
5432
5433 /* Walk over all statements of all reachable BBs and call check_array_bounds
5434 on them. */
5435
5436 static void
5437 check_all_array_refs (void)
5438 {
5439 basic_block bb;
5440 gimple_stmt_iterator si;
5441
5442 FOR_EACH_BB (bb)
5443 {
5444 edge_iterator ei;
5445 edge e;
5446 bool executable = false;
5447
5448 /* Skip blocks that were found to be unreachable. */
5449 FOR_EACH_EDGE (e, ei, bb->preds)
5450 executable |= !!(e->flags & EDGE_EXECUTABLE);
5451 if (!executable)
5452 continue;
5453
5454 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
5455 {
5456 gimple stmt = gsi_stmt (si);
5457 struct walk_stmt_info wi;
5458 if (!gimple_has_location (stmt))
5459 continue;
5460
5461 if (is_gimple_call (stmt))
5462 {
5463 size_t i;
5464 size_t n = gimple_call_num_args (stmt);
5465 for (i = 0; i < n; i++)
5466 {
5467 tree arg = gimple_call_arg (stmt, i);
5468 search_for_addr_array (arg, gimple_location (stmt));
5469 }
5470 }
5471 else
5472 {
5473 memset (&wi, 0, sizeof (wi));
5474 wi.info = CONST_CAST (void *, (const void *)
5475 gimple_location_ptr (stmt));
5476
5477 walk_gimple_op (gsi_stmt (si),
5478 check_array_bounds,
5479 &wi);
5480 }
5481 }
5482 }
5483 }
5484
5485 /* Convert range assertion expressions into the implied copies and
5486 copy propagate away the copies. Doing the trivial copy propagation
5487 here avoids the need to run the full copy propagation pass after
5488 VRP.
5489
5490 FIXME, this will eventually lead to copy propagation removing the
5491 names that had useful range information attached to them. For
5492 instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>,
5493 then N_i will have the range [3, +INF].
5494
5495 However, by converting the assertion into the implied copy
5496 operation N_i = N_j, we will then copy-propagate N_j into the uses
5497 of N_i and lose the range information. We may want to hold on to
5498 ASSERT_EXPRs a little while longer as the ranges could be used in
5499 things like jump threading.
5500
5501 The problem with keeping ASSERT_EXPRs around is that passes after
5502 VRP need to handle them appropriately.
5503
5504 Another approach would be to make the range information a first
5505 class property of the SSA_NAME so that it can be queried from
5506 any pass. This is made somewhat more complex by the need for
5507 multiple ranges to be associated with one SSA_NAME. */
5508
5509 static void
5510 remove_range_assertions (void)
5511 {
5512 basic_block bb;
5513 gimple_stmt_iterator si;
5514
5515 /* Note that the BSI iterator bump happens at the bottom of the
5516 loop and no bump is necessary if we're removing the statement
5517 referenced by the current BSI. */
5518 FOR_EACH_BB (bb)
5519 for (si = gsi_start_bb (bb); !gsi_end_p (si);)
5520 {
5521 gimple stmt = gsi_stmt (si);
5522 gimple use_stmt;
5523
5524 if (is_gimple_assign (stmt)
5525 && gimple_assign_rhs_code (stmt) == ASSERT_EXPR)
5526 {
5527 tree rhs = gimple_assign_rhs1 (stmt);
5528 tree var;
5529 tree cond = fold (ASSERT_EXPR_COND (rhs));
5530 use_operand_p use_p;
5531 imm_use_iterator iter;
5532
5533 gcc_assert (cond != boolean_false_node);
5534
5535 /* Propagate the RHS into every use of the LHS. */
5536 var = ASSERT_EXPR_VAR (rhs);
5537 FOR_EACH_IMM_USE_STMT (use_stmt, iter,
5538 gimple_assign_lhs (stmt))
5539 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
5540 {
5541 SET_USE (use_p, var);
5542 gcc_assert (TREE_CODE (var) == SSA_NAME);
5543 }
5544
5545 /* And finally, remove the copy, it is not needed. */
5546 gsi_remove (&si, true);
5547 release_defs (stmt);
5548 }
5549 else
5550 gsi_next (&si);
5551 }
5552 }
5553
5554
5555 /* Return true if STMT is interesting for VRP. */
5556
5557 static bool
5558 stmt_interesting_for_vrp (gimple stmt)
5559 {
5560 if (gimple_code (stmt) == GIMPLE_PHI
5561 && is_gimple_reg (gimple_phi_result (stmt))
5562 && (INTEGRAL_TYPE_P (TREE_TYPE (gimple_phi_result (stmt)))
5563 || POINTER_TYPE_P (TREE_TYPE (gimple_phi_result (stmt)))))
5564 return true;
5565 else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
5566 {
5567 tree lhs = gimple_get_lhs (stmt);
5568
5569 /* In general, assignments with virtual operands are not useful
5570 for deriving ranges, with the obvious exception of calls to
5571 builtin functions. */
5572 if (lhs && TREE_CODE (lhs) == SSA_NAME
5573 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
5574 || POINTER_TYPE_P (TREE_TYPE (lhs)))
5575 && ((is_gimple_call (stmt)
5576 && gimple_call_fndecl (stmt) != NULL_TREE
5577 && DECL_IS_BUILTIN (gimple_call_fndecl (stmt)))
5578 || !gimple_vuse (stmt)))
5579 return true;
5580 }
5581 else if (gimple_code (stmt) == GIMPLE_COND
5582 || gimple_code (stmt) == GIMPLE_SWITCH)
5583 return true;
5584
5585 return false;
5586 }
5587
5588
5589 /* Initialize local data structures for VRP. */
5590
5591 static void
5592 vrp_initialize (void)
5593 {
5594 basic_block bb;
5595
5596 vr_value = XCNEWVEC (value_range_t *, num_ssa_names);
5597 vr_phi_edge_counts = XCNEWVEC (int, num_ssa_names);
5598
5599 FOR_EACH_BB (bb)
5600 {
5601 gimple_stmt_iterator si;
5602
5603 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
5604 {
5605 gimple phi = gsi_stmt (si);
5606 if (!stmt_interesting_for_vrp (phi))
5607 {
5608 tree lhs = PHI_RESULT (phi);
5609 set_value_range_to_varying (get_value_range (lhs));
5610 prop_set_simulate_again (phi, false);
5611 }
5612 else
5613 prop_set_simulate_again (phi, true);
5614 }
5615
5616 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
5617 {
5618 gimple stmt = gsi_stmt (si);
5619
5620 /* If the statement is a control insn, then we do not
5621 want to avoid simulating the statement once. Failure
5622 to do so means that those edges will never get added. */
5623 if (stmt_ends_bb_p (stmt))
5624 prop_set_simulate_again (stmt, true);
5625 else if (!stmt_interesting_for_vrp (stmt))
5626 {
5627 ssa_op_iter i;
5628 tree def;
5629 FOR_EACH_SSA_TREE_OPERAND (def, stmt, i, SSA_OP_DEF)
5630 set_value_range_to_varying (get_value_range (def));
5631 prop_set_simulate_again (stmt, false);
5632 }
5633 else
5634 prop_set_simulate_again (stmt, true);
5635 }
5636 }
5637 }
5638
5639 /* Return the singleton value-range for NAME or NAME. */
5640
5641 static inline tree
5642 vrp_valueize (tree name)
5643 {
5644 if (TREE_CODE (name) == SSA_NAME)
5645 {
5646 value_range_t *vr = get_value_range (name);
5647 if (vr->type == VR_RANGE
5648 && (vr->min == vr->max
5649 || operand_equal_p (vr->min, vr->max, 0)))
5650 return vr->min;
5651 }
5652 return name;
5653 }
5654
5655 /* Visit assignment STMT. If it produces an interesting range, record
5656 the SSA name in *OUTPUT_P. */
5657
5658 static enum ssa_prop_result
5659 vrp_visit_assignment_or_call (gimple stmt, tree *output_p)
5660 {
5661 tree def, lhs;
5662 ssa_op_iter iter;
5663 enum gimple_code code = gimple_code (stmt);
5664 lhs = gimple_get_lhs (stmt);
5665
5666 /* We only keep track of ranges in integral and pointer types. */
5667 if (TREE_CODE (lhs) == SSA_NAME
5668 && ((INTEGRAL_TYPE_P (TREE_TYPE (lhs))
5669 /* It is valid to have NULL MIN/MAX values on a type. See
5670 build_range_type. */
5671 && TYPE_MIN_VALUE (TREE_TYPE (lhs))
5672 && TYPE_MAX_VALUE (TREE_TYPE (lhs)))
5673 || POINTER_TYPE_P (TREE_TYPE (lhs))))
5674 {
5675 value_range_t new_vr = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
5676
5677 /* Try folding the statement to a constant first. */
5678 tree tem = gimple_fold_stmt_to_constant (stmt, vrp_valueize);
5679 if (tem && !is_overflow_infinity (tem))
5680 set_value_range (&new_vr, VR_RANGE, tem, tem, NULL);
5681 /* Then dispatch to value-range extracting functions. */
5682 else if (code == GIMPLE_CALL)
5683 extract_range_basic (&new_vr, stmt);
5684 else
5685 extract_range_from_assignment (&new_vr, stmt);
5686
5687 if (update_value_range (lhs, &new_vr))
5688 {
5689 *output_p = lhs;
5690
5691 if (dump_file && (dump_flags & TDF_DETAILS))
5692 {
5693 fprintf (dump_file, "Found new range for ");
5694 print_generic_expr (dump_file, lhs, 0);
5695 fprintf (dump_file, ": ");
5696 dump_value_range (dump_file, &new_vr);
5697 fprintf (dump_file, "\n\n");
5698 }
5699
5700 if (new_vr.type == VR_VARYING)
5701 return SSA_PROP_VARYING;
5702
5703 return SSA_PROP_INTERESTING;
5704 }
5705
5706 return SSA_PROP_NOT_INTERESTING;
5707 }
5708
5709 /* Every other statement produces no useful ranges. */
5710 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_DEF)
5711 set_value_range_to_varying (get_value_range (def));
5712
5713 return SSA_PROP_VARYING;
5714 }
5715
5716 /* Helper that gets the value range of the SSA_NAME with version I
5717 or a symbolic range containing the SSA_NAME only if the value range
5718 is varying or undefined. */
5719
5720 static inline value_range_t
5721 get_vr_for_comparison (int i)
5722 {
5723 value_range_t vr = *(vr_value[i]);
5724
5725 /* If name N_i does not have a valid range, use N_i as its own
5726 range. This allows us to compare against names that may
5727 have N_i in their ranges. */
5728 if (vr.type == VR_VARYING || vr.type == VR_UNDEFINED)
5729 {
5730 vr.type = VR_RANGE;
5731 vr.min = ssa_name (i);
5732 vr.max = ssa_name (i);
5733 }
5734
5735 return vr;
5736 }
5737
5738 /* Compare all the value ranges for names equivalent to VAR with VAL
5739 using comparison code COMP. Return the same value returned by
5740 compare_range_with_value, including the setting of
5741 *STRICT_OVERFLOW_P. */
5742
5743 static tree
5744 compare_name_with_value (enum tree_code comp, tree var, tree val,
5745 bool *strict_overflow_p)
5746 {
5747 bitmap_iterator bi;
5748 unsigned i;
5749 bitmap e;
5750 tree retval, t;
5751 int used_strict_overflow;
5752 bool sop;
5753 value_range_t equiv_vr;
5754
5755 /* Get the set of equivalences for VAR. */
5756 e = get_value_range (var)->equiv;
5757
5758 /* Start at -1. Set it to 0 if we do a comparison without relying
5759 on overflow, or 1 if all comparisons rely on overflow. */
5760 used_strict_overflow = -1;
5761
5762 /* Compare vars' value range with val. */
5763 equiv_vr = get_vr_for_comparison (SSA_NAME_VERSION (var));
5764 sop = false;
5765 retval = compare_range_with_value (comp, &equiv_vr, val, &sop);
5766 if (retval)
5767 used_strict_overflow = sop ? 1 : 0;
5768
5769 /* If the equiv set is empty we have done all work we need to do. */
5770 if (e == NULL)
5771 {
5772 if (retval
5773 && used_strict_overflow > 0)
5774 *strict_overflow_p = true;
5775 return retval;
5776 }
5777
5778 EXECUTE_IF_SET_IN_BITMAP (e, 0, i, bi)
5779 {
5780 equiv_vr = get_vr_for_comparison (i);
5781 sop = false;
5782 t = compare_range_with_value (comp, &equiv_vr, val, &sop);
5783 if (t)
5784 {
5785 /* If we get different answers from different members
5786 of the equivalence set this check must be in a dead
5787 code region. Folding it to a trap representation
5788 would be correct here. For now just return don't-know. */
5789 if (retval != NULL
5790 && t != retval)
5791 {
5792 retval = NULL_TREE;
5793 break;
5794 }
5795 retval = t;
5796
5797 if (!sop)
5798 used_strict_overflow = 0;
5799 else if (used_strict_overflow < 0)
5800 used_strict_overflow = 1;
5801 }
5802 }
5803
5804 if (retval
5805 && used_strict_overflow > 0)
5806 *strict_overflow_p = true;
5807
5808 return retval;
5809 }
5810
5811
5812 /* Given a comparison code COMP and names N1 and N2, compare all the
5813 ranges equivalent to N1 against all the ranges equivalent to N2
5814 to determine the value of N1 COMP N2. Return the same value
5815 returned by compare_ranges. Set *STRICT_OVERFLOW_P to indicate
5816 whether we relied on an overflow infinity in the comparison. */
5817
5818
5819 static tree
5820 compare_names (enum tree_code comp, tree n1, tree n2,
5821 bool *strict_overflow_p)
5822 {
5823 tree t, retval;
5824 bitmap e1, e2;
5825 bitmap_iterator bi1, bi2;
5826 unsigned i1, i2;
5827 int used_strict_overflow;
5828 static bitmap_obstack *s_obstack = NULL;
5829 static bitmap s_e1 = NULL, s_e2 = NULL;
5830
5831 /* Compare the ranges of every name equivalent to N1 against the
5832 ranges of every name equivalent to N2. */
5833 e1 = get_value_range (n1)->equiv;
5834 e2 = get_value_range (n2)->equiv;
5835
5836 /* Use the fake bitmaps if e1 or e2 are not available. */
5837 if (s_obstack == NULL)
5838 {
5839 s_obstack = XNEW (bitmap_obstack);
5840 bitmap_obstack_initialize (s_obstack);
5841 s_e1 = BITMAP_ALLOC (s_obstack);
5842 s_e2 = BITMAP_ALLOC (s_obstack);
5843 }
5844 if (e1 == NULL)
5845 e1 = s_e1;
5846 if (e2 == NULL)
5847 e2 = s_e2;
5848
5849 /* Add N1 and N2 to their own set of equivalences to avoid
5850 duplicating the body of the loop just to check N1 and N2
5851 ranges. */
5852 bitmap_set_bit (e1, SSA_NAME_VERSION (n1));
5853 bitmap_set_bit (e2, SSA_NAME_VERSION (n2));
5854
5855 /* If the equivalence sets have a common intersection, then the two
5856 names can be compared without checking their ranges. */
5857 if (bitmap_intersect_p (e1, e2))
5858 {
5859 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
5860 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
5861
5862 return (comp == EQ_EXPR || comp == GE_EXPR || comp == LE_EXPR)
5863 ? boolean_true_node
5864 : boolean_false_node;
5865 }
5866
5867 /* Start at -1. Set it to 0 if we do a comparison without relying
5868 on overflow, or 1 if all comparisons rely on overflow. */
5869 used_strict_overflow = -1;
5870
5871 /* Otherwise, compare all the equivalent ranges. First, add N1 and
5872 N2 to their own set of equivalences to avoid duplicating the body
5873 of the loop just to check N1 and N2 ranges. */
5874 EXECUTE_IF_SET_IN_BITMAP (e1, 0, i1, bi1)
5875 {
5876 value_range_t vr1 = get_vr_for_comparison (i1);
5877
5878 t = retval = NULL_TREE;
5879 EXECUTE_IF_SET_IN_BITMAP (e2, 0, i2, bi2)
5880 {
5881 bool sop = false;
5882
5883 value_range_t vr2 = get_vr_for_comparison (i2);
5884
5885 t = compare_ranges (comp, &vr1, &vr2, &sop);
5886 if (t)
5887 {
5888 /* If we get different answers from different members
5889 of the equivalence set this check must be in a dead
5890 code region. Folding it to a trap representation
5891 would be correct here. For now just return don't-know. */
5892 if (retval != NULL
5893 && t != retval)
5894 {
5895 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
5896 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
5897 return NULL_TREE;
5898 }
5899 retval = t;
5900
5901 if (!sop)
5902 used_strict_overflow = 0;
5903 else if (used_strict_overflow < 0)
5904 used_strict_overflow = 1;
5905 }
5906 }
5907
5908 if (retval)
5909 {
5910 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
5911 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
5912 if (used_strict_overflow > 0)
5913 *strict_overflow_p = true;
5914 return retval;
5915 }
5916 }
5917
5918 /* None of the equivalent ranges are useful in computing this
5919 comparison. */
5920 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
5921 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
5922 return NULL_TREE;
5923 }
5924
5925 /* Helper function for vrp_evaluate_conditional_warnv. */
5926
5927 static tree
5928 vrp_evaluate_conditional_warnv_with_ops_using_ranges (enum tree_code code,
5929 tree op0, tree op1,
5930 bool * strict_overflow_p)
5931 {
5932 value_range_t *vr0, *vr1;
5933
5934 vr0 = (TREE_CODE (op0) == SSA_NAME) ? get_value_range (op0) : NULL;
5935 vr1 = (TREE_CODE (op1) == SSA_NAME) ? get_value_range (op1) : NULL;
5936
5937 if (vr0 && vr1)
5938 return compare_ranges (code, vr0, vr1, strict_overflow_p);
5939 else if (vr0 && vr1 == NULL)
5940 return compare_range_with_value (code, vr0, op1, strict_overflow_p);
5941 else if (vr0 == NULL && vr1)
5942 return (compare_range_with_value
5943 (swap_tree_comparison (code), vr1, op0, strict_overflow_p));
5944 return NULL;
5945 }
5946
5947 /* Helper function for vrp_evaluate_conditional_warnv. */
5948
5949 static tree
5950 vrp_evaluate_conditional_warnv_with_ops (enum tree_code code, tree op0,
5951 tree op1, bool use_equiv_p,
5952 bool *strict_overflow_p, bool *only_ranges)
5953 {
5954 tree ret;
5955 if (only_ranges)
5956 *only_ranges = true;
5957
5958 /* We only deal with integral and pointer types. */
5959 if (!INTEGRAL_TYPE_P (TREE_TYPE (op0))
5960 && !POINTER_TYPE_P (TREE_TYPE (op0)))
5961 return NULL_TREE;
5962
5963 if (use_equiv_p)
5964 {
5965 if (only_ranges
5966 && (ret = vrp_evaluate_conditional_warnv_with_ops_using_ranges
5967 (code, op0, op1, strict_overflow_p)))
5968 return ret;
5969 *only_ranges = false;
5970 if (TREE_CODE (op0) == SSA_NAME && TREE_CODE (op1) == SSA_NAME)
5971 return compare_names (code, op0, op1, strict_overflow_p);
5972 else if (TREE_CODE (op0) == SSA_NAME)
5973 return compare_name_with_value (code, op0, op1, strict_overflow_p);
5974 else if (TREE_CODE (op1) == SSA_NAME)
5975 return (compare_name_with_value
5976 (swap_tree_comparison (code), op1, op0, strict_overflow_p));
5977 }
5978 else
5979 return vrp_evaluate_conditional_warnv_with_ops_using_ranges (code, op0, op1,
5980 strict_overflow_p);
5981 return NULL_TREE;
5982 }
5983
5984 /* Given (CODE OP0 OP1) within STMT, try to simplify it based on value range
5985 information. Return NULL if the conditional can not be evaluated.
5986 The ranges of all the names equivalent with the operands in COND
5987 will be used when trying to compute the value. If the result is
5988 based on undefined signed overflow, issue a warning if
5989 appropriate. */
5990
5991 static tree
5992 vrp_evaluate_conditional (enum tree_code code, tree op0, tree op1, gimple stmt)
5993 {
5994 bool sop;
5995 tree ret;
5996 bool only_ranges;
5997
5998 /* Some passes and foldings leak constants with overflow flag set
5999 into the IL. Avoid doing wrong things with these and bail out. */
6000 if ((TREE_CODE (op0) == INTEGER_CST
6001 && TREE_OVERFLOW (op0))
6002 || (TREE_CODE (op1) == INTEGER_CST
6003 && TREE_OVERFLOW (op1)))
6004 return NULL_TREE;
6005
6006 sop = false;
6007 ret = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, true, &sop,
6008 &only_ranges);
6009
6010 if (ret && sop)
6011 {
6012 enum warn_strict_overflow_code wc;
6013 const char* warnmsg;
6014
6015 if (is_gimple_min_invariant (ret))
6016 {
6017 wc = WARN_STRICT_OVERFLOW_CONDITIONAL;
6018 warnmsg = G_("assuming signed overflow does not occur when "
6019 "simplifying conditional to constant");
6020 }
6021 else
6022 {
6023 wc = WARN_STRICT_OVERFLOW_COMPARISON;
6024 warnmsg = G_("assuming signed overflow does not occur when "
6025 "simplifying conditional");
6026 }
6027
6028 if (issue_strict_overflow_warning (wc))
6029 {
6030 location_t location;
6031
6032 if (!gimple_has_location (stmt))
6033 location = input_location;
6034 else
6035 location = gimple_location (stmt);
6036 warning_at (location, OPT_Wstrict_overflow, "%s", warnmsg);
6037 }
6038 }
6039
6040 if (warn_type_limits
6041 && ret && only_ranges
6042 && TREE_CODE_CLASS (code) == tcc_comparison
6043 && TREE_CODE (op0) == SSA_NAME)
6044 {
6045 /* If the comparison is being folded and the operand on the LHS
6046 is being compared against a constant value that is outside of
6047 the natural range of OP0's type, then the predicate will
6048 always fold regardless of the value of OP0. If -Wtype-limits
6049 was specified, emit a warning. */
6050 tree type = TREE_TYPE (op0);
6051 value_range_t *vr0 = get_value_range (op0);
6052
6053 if (vr0->type != VR_VARYING
6054 && INTEGRAL_TYPE_P (type)
6055 && vrp_val_is_min (vr0->min)
6056 && vrp_val_is_max (vr0->max)
6057 && is_gimple_min_invariant (op1))
6058 {
6059 location_t location;
6060
6061 if (!gimple_has_location (stmt))
6062 location = input_location;
6063 else
6064 location = gimple_location (stmt);
6065
6066 warning_at (location, OPT_Wtype_limits,
6067 integer_zerop (ret)
6068 ? G_("comparison always false "
6069 "due to limited range of data type")
6070 : G_("comparison always true "
6071 "due to limited range of data type"));
6072 }
6073 }
6074
6075 return ret;
6076 }
6077
6078
6079 /* Visit conditional statement STMT. If we can determine which edge
6080 will be taken out of STMT's basic block, record it in
6081 *TAKEN_EDGE_P and return SSA_PROP_INTERESTING. Otherwise, return
6082 SSA_PROP_VARYING. */
6083
6084 static enum ssa_prop_result
6085 vrp_visit_cond_stmt (gimple stmt, edge *taken_edge_p)
6086 {
6087 tree val;
6088 bool sop;
6089
6090 *taken_edge_p = NULL;
6091
6092 if (dump_file && (dump_flags & TDF_DETAILS))
6093 {
6094 tree use;
6095 ssa_op_iter i;
6096
6097 fprintf (dump_file, "\nVisiting conditional with predicate: ");
6098 print_gimple_stmt (dump_file, stmt, 0, 0);
6099 fprintf (dump_file, "\nWith known ranges\n");
6100
6101 FOR_EACH_SSA_TREE_OPERAND (use, stmt, i, SSA_OP_USE)
6102 {
6103 fprintf (dump_file, "\t");
6104 print_generic_expr (dump_file, use, 0);
6105 fprintf (dump_file, ": ");
6106 dump_value_range (dump_file, vr_value[SSA_NAME_VERSION (use)]);
6107 }
6108
6109 fprintf (dump_file, "\n");
6110 }
6111
6112 /* Compute the value of the predicate COND by checking the known
6113 ranges of each of its operands.
6114
6115 Note that we cannot evaluate all the equivalent ranges here
6116 because those ranges may not yet be final and with the current
6117 propagation strategy, we cannot determine when the value ranges
6118 of the names in the equivalence set have changed.
6119
6120 For instance, given the following code fragment
6121
6122 i_5 = PHI <8, i_13>
6123 ...
6124 i_14 = ASSERT_EXPR <i_5, i_5 != 0>
6125 if (i_14 == 1)
6126 ...
6127
6128 Assume that on the first visit to i_14, i_5 has the temporary
6129 range [8, 8] because the second argument to the PHI function is
6130 not yet executable. We derive the range ~[0, 0] for i_14 and the
6131 equivalence set { i_5 }. So, when we visit 'if (i_14 == 1)' for
6132 the first time, since i_14 is equivalent to the range [8, 8], we
6133 determine that the predicate is always false.
6134
6135 On the next round of propagation, i_13 is determined to be
6136 VARYING, which causes i_5 to drop down to VARYING. So, another
6137 visit to i_14 is scheduled. In this second visit, we compute the
6138 exact same range and equivalence set for i_14, namely ~[0, 0] and
6139 { i_5 }. But we did not have the previous range for i_5
6140 registered, so vrp_visit_assignment thinks that the range for
6141 i_14 has not changed. Therefore, the predicate 'if (i_14 == 1)'
6142 is not visited again, which stops propagation from visiting
6143 statements in the THEN clause of that if().
6144
6145 To properly fix this we would need to keep the previous range
6146 value for the names in the equivalence set. This way we would've
6147 discovered that from one visit to the other i_5 changed from
6148 range [8, 8] to VR_VARYING.
6149
6150 However, fixing this apparent limitation may not be worth the
6151 additional checking. Testing on several code bases (GCC, DLV,
6152 MICO, TRAMP3D and SPEC2000) showed that doing this results in
6153 4 more predicates folded in SPEC. */
6154 sop = false;
6155
6156 val = vrp_evaluate_conditional_warnv_with_ops (gimple_cond_code (stmt),
6157 gimple_cond_lhs (stmt),
6158 gimple_cond_rhs (stmt),
6159 false, &sop, NULL);
6160 if (val)
6161 {
6162 if (!sop)
6163 *taken_edge_p = find_taken_edge (gimple_bb (stmt), val);
6164 else
6165 {
6166 if (dump_file && (dump_flags & TDF_DETAILS))
6167 fprintf (dump_file,
6168 "\nIgnoring predicate evaluation because "
6169 "it assumes that signed overflow is undefined");
6170 val = NULL_TREE;
6171 }
6172 }
6173
6174 if (dump_file && (dump_flags & TDF_DETAILS))
6175 {
6176 fprintf (dump_file, "\nPredicate evaluates to: ");
6177 if (val == NULL_TREE)
6178 fprintf (dump_file, "DON'T KNOW\n");
6179 else
6180 print_generic_stmt (dump_file, val, 0);
6181 }
6182
6183 return (*taken_edge_p) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING;
6184 }
6185
6186 /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL
6187 that includes the value VAL. The search is restricted to the range
6188 [START_IDX, n - 1] where n is the size of VEC.
6189
6190 If there is a CASE_LABEL for VAL, its index is placed in IDX and true is
6191 returned.
6192
6193 If there is no CASE_LABEL for VAL and there is one that is larger than VAL,
6194 it is placed in IDX and false is returned.
6195
6196 If VAL is larger than any CASE_LABEL, n is placed on IDX and false is
6197 returned. */
6198
6199 static bool
6200 find_case_label_index (gimple stmt, size_t start_idx, tree val, size_t *idx)
6201 {
6202 size_t n = gimple_switch_num_labels (stmt);
6203 size_t low, high;
6204
6205 /* Find case label for minimum of the value range or the next one.
6206 At each iteration we are searching in [low, high - 1]. */
6207
6208 for (low = start_idx, high = n; high != low; )
6209 {
6210 tree t;
6211 int cmp;
6212 /* Note that i != high, so we never ask for n. */
6213 size_t i = (high + low) / 2;
6214 t = gimple_switch_label (stmt, i);
6215
6216 /* Cache the result of comparing CASE_LOW and val. */
6217 cmp = tree_int_cst_compare (CASE_LOW (t), val);
6218
6219 if (cmp == 0)
6220 {
6221 /* Ranges cannot be empty. */
6222 *idx = i;
6223 return true;
6224 }
6225 else if (cmp > 0)
6226 high = i;
6227 else
6228 {
6229 low = i + 1;
6230 if (CASE_HIGH (t) != NULL
6231 && tree_int_cst_compare (CASE_HIGH (t), val) >= 0)
6232 {
6233 *idx = i;
6234 return true;
6235 }
6236 }
6237 }
6238
6239 *idx = high;
6240 return false;
6241 }
6242
6243 /* Searches the case label vector VEC for the range of CASE_LABELs that is used
6244 for values between MIN and MAX. The first index is placed in MIN_IDX. The
6245 last index is placed in MAX_IDX. If the range of CASE_LABELs is empty
6246 then MAX_IDX < MIN_IDX.
6247 Returns true if the default label is not needed. */
6248
6249 static bool
6250 find_case_label_range (gimple stmt, tree min, tree max, size_t *min_idx,
6251 size_t *max_idx)
6252 {
6253 size_t i, j;
6254 bool min_take_default = !find_case_label_index (stmt, 1, min, &i);
6255 bool max_take_default = !find_case_label_index (stmt, i, max, &j);
6256
6257 if (i == j
6258 && min_take_default
6259 && max_take_default)
6260 {
6261 /* Only the default case label reached.
6262 Return an empty range. */
6263 *min_idx = 1;
6264 *max_idx = 0;
6265 return false;
6266 }
6267 else
6268 {
6269 bool take_default = min_take_default || max_take_default;
6270 tree low, high;
6271 size_t k;
6272
6273 if (max_take_default)
6274 j--;
6275
6276 /* If the case label range is continuous, we do not need
6277 the default case label. Verify that. */
6278 high = CASE_LOW (gimple_switch_label (stmt, i));
6279 if (CASE_HIGH (gimple_switch_label (stmt, i)))
6280 high = CASE_HIGH (gimple_switch_label (stmt, i));
6281 for (k = i + 1; k <= j; ++k)
6282 {
6283 low = CASE_LOW (gimple_switch_label (stmt, k));
6284 if (!integer_onep (int_const_binop (MINUS_EXPR, low, high)))
6285 {
6286 take_default = true;
6287 break;
6288 }
6289 high = low;
6290 if (CASE_HIGH (gimple_switch_label (stmt, k)))
6291 high = CASE_HIGH (gimple_switch_label (stmt, k));
6292 }
6293
6294 *min_idx = i;
6295 *max_idx = j;
6296 return !take_default;
6297 }
6298 }
6299
6300 /* Visit switch statement STMT. If we can determine which edge
6301 will be taken out of STMT's basic block, record it in
6302 *TAKEN_EDGE_P and return SSA_PROP_INTERESTING. Otherwise, return
6303 SSA_PROP_VARYING. */
6304
6305 static enum ssa_prop_result
6306 vrp_visit_switch_stmt (gimple stmt, edge *taken_edge_p)
6307 {
6308 tree op, val;
6309 value_range_t *vr;
6310 size_t i = 0, j = 0;
6311 bool take_default;
6312
6313 *taken_edge_p = NULL;
6314 op = gimple_switch_index (stmt);
6315 if (TREE_CODE (op) != SSA_NAME)
6316 return SSA_PROP_VARYING;
6317
6318 vr = get_value_range (op);
6319 if (dump_file && (dump_flags & TDF_DETAILS))
6320 {
6321 fprintf (dump_file, "\nVisiting switch expression with operand ");
6322 print_generic_expr (dump_file, op, 0);
6323 fprintf (dump_file, " with known range ");
6324 dump_value_range (dump_file, vr);
6325 fprintf (dump_file, "\n");
6326 }
6327
6328 if (vr->type != VR_RANGE
6329 || symbolic_range_p (vr))
6330 return SSA_PROP_VARYING;
6331
6332 /* Find the single edge that is taken from the switch expression. */
6333 take_default = !find_case_label_range (stmt, vr->min, vr->max, &i, &j);
6334
6335 /* Check if the range spans no CASE_LABEL. If so, we only reach the default
6336 label */
6337 if (j < i)
6338 {
6339 gcc_assert (take_default);
6340 val = gimple_switch_default_label (stmt);
6341 }
6342 else
6343 {
6344 /* Check if labels with index i to j and maybe the default label
6345 are all reaching the same label. */
6346
6347 val = gimple_switch_label (stmt, i);
6348 if (take_default
6349 && CASE_LABEL (gimple_switch_default_label (stmt))
6350 != CASE_LABEL (val))
6351 {
6352 if (dump_file && (dump_flags & TDF_DETAILS))
6353 fprintf (dump_file, " not a single destination for this "
6354 "range\n");
6355 return SSA_PROP_VARYING;
6356 }
6357 for (++i; i <= j; ++i)
6358 {
6359 if (CASE_LABEL (gimple_switch_label (stmt, i)) != CASE_LABEL (val))
6360 {
6361 if (dump_file && (dump_flags & TDF_DETAILS))
6362 fprintf (dump_file, " not a single destination for this "
6363 "range\n");
6364 return SSA_PROP_VARYING;
6365 }
6366 }
6367 }
6368
6369 *taken_edge_p = find_edge (gimple_bb (stmt),
6370 label_to_block (CASE_LABEL (val)));
6371
6372 if (dump_file && (dump_flags & TDF_DETAILS))
6373 {
6374 fprintf (dump_file, " will take edge to ");
6375 print_generic_stmt (dump_file, CASE_LABEL (val), 0);
6376 }
6377
6378 return SSA_PROP_INTERESTING;
6379 }
6380
6381
6382 /* Evaluate statement STMT. If the statement produces a useful range,
6383 return SSA_PROP_INTERESTING and record the SSA name with the
6384 interesting range into *OUTPUT_P.
6385
6386 If STMT is a conditional branch and we can determine its truth
6387 value, the taken edge is recorded in *TAKEN_EDGE_P.
6388
6389 If STMT produces a varying value, return SSA_PROP_VARYING. */
6390
6391 static enum ssa_prop_result
6392 vrp_visit_stmt (gimple stmt, edge *taken_edge_p, tree *output_p)
6393 {
6394 tree def;
6395 ssa_op_iter iter;
6396
6397 if (dump_file && (dump_flags & TDF_DETAILS))
6398 {
6399 fprintf (dump_file, "\nVisiting statement:\n");
6400 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
6401 fprintf (dump_file, "\n");
6402 }
6403
6404 if (!stmt_interesting_for_vrp (stmt))
6405 gcc_assert (stmt_ends_bb_p (stmt));
6406 else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
6407 {
6408 /* In general, assignments with virtual operands are not useful
6409 for deriving ranges, with the obvious exception of calls to
6410 builtin functions. */
6411 if ((is_gimple_call (stmt)
6412 && gimple_call_fndecl (stmt) != NULL_TREE
6413 && DECL_IS_BUILTIN (gimple_call_fndecl (stmt)))
6414 || !gimple_vuse (stmt))
6415 return vrp_visit_assignment_or_call (stmt, output_p);
6416 }
6417 else if (gimple_code (stmt) == GIMPLE_COND)
6418 return vrp_visit_cond_stmt (stmt, taken_edge_p);
6419 else if (gimple_code (stmt) == GIMPLE_SWITCH)
6420 return vrp_visit_switch_stmt (stmt, taken_edge_p);
6421
6422 /* All other statements produce nothing of interest for VRP, so mark
6423 their outputs varying and prevent further simulation. */
6424 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_DEF)
6425 set_value_range_to_varying (get_value_range (def));
6426
6427 return SSA_PROP_VARYING;
6428 }
6429
6430
6431 /* Meet operation for value ranges. Given two value ranges VR0 and
6432 VR1, store in VR0 a range that contains both VR0 and VR1. This
6433 may not be the smallest possible such range. */
6434
6435 static void
6436 vrp_meet (value_range_t *vr0, value_range_t *vr1)
6437 {
6438 if (vr0->type == VR_UNDEFINED)
6439 {
6440 copy_value_range (vr0, vr1);
6441 return;
6442 }
6443
6444 if (vr1->type == VR_UNDEFINED)
6445 {
6446 /* Nothing to do. VR0 already has the resulting range. */
6447 return;
6448 }
6449
6450 if (vr0->type == VR_VARYING)
6451 {
6452 /* Nothing to do. VR0 already has the resulting range. */
6453 return;
6454 }
6455
6456 if (vr1->type == VR_VARYING)
6457 {
6458 set_value_range_to_varying (vr0);
6459 return;
6460 }
6461
6462 if (vr0->type == VR_RANGE && vr1->type == VR_RANGE)
6463 {
6464 int cmp;
6465 tree min, max;
6466
6467 /* Compute the convex hull of the ranges. The lower limit of
6468 the new range is the minimum of the two ranges. If they
6469 cannot be compared, then give up. */
6470 cmp = compare_values (vr0->min, vr1->min);
6471 if (cmp == 0 || cmp == 1)
6472 min = vr1->min;
6473 else if (cmp == -1)
6474 min = vr0->min;
6475 else
6476 goto give_up;
6477
6478 /* Similarly, the upper limit of the new range is the maximum
6479 of the two ranges. If they cannot be compared, then
6480 give up. */
6481 cmp = compare_values (vr0->max, vr1->max);
6482 if (cmp == 0 || cmp == -1)
6483 max = vr1->max;
6484 else if (cmp == 1)
6485 max = vr0->max;
6486 else
6487 goto give_up;
6488
6489 /* Check for useless ranges. */
6490 if (INTEGRAL_TYPE_P (TREE_TYPE (min))
6491 && ((vrp_val_is_min (min) || is_overflow_infinity (min))
6492 && (vrp_val_is_max (max) || is_overflow_infinity (max))))
6493 goto give_up;
6494
6495 /* The resulting set of equivalences is the intersection of
6496 the two sets. */
6497 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
6498 bitmap_and_into (vr0->equiv, vr1->equiv);
6499 else if (vr0->equiv && !vr1->equiv)
6500 bitmap_clear (vr0->equiv);
6501
6502 set_value_range (vr0, vr0->type, min, max, vr0->equiv);
6503 }
6504 else if (vr0->type == VR_ANTI_RANGE && vr1->type == VR_ANTI_RANGE)
6505 {
6506 /* Two anti-ranges meet only if their complements intersect.
6507 Only handle the case of identical ranges. */
6508 if (compare_values (vr0->min, vr1->min) == 0
6509 && compare_values (vr0->max, vr1->max) == 0
6510 && compare_values (vr0->min, vr0->max) == 0)
6511 {
6512 /* The resulting set of equivalences is the intersection of
6513 the two sets. */
6514 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
6515 bitmap_and_into (vr0->equiv, vr1->equiv);
6516 else if (vr0->equiv && !vr1->equiv)
6517 bitmap_clear (vr0->equiv);
6518 }
6519 else
6520 goto give_up;
6521 }
6522 else if (vr0->type == VR_ANTI_RANGE || vr1->type == VR_ANTI_RANGE)
6523 {
6524 /* For a numeric range [VAL1, VAL2] and an anti-range ~[VAL3, VAL4],
6525 only handle the case where the ranges have an empty intersection.
6526 The result of the meet operation is the anti-range. */
6527 if (!symbolic_range_p (vr0)
6528 && !symbolic_range_p (vr1)
6529 && !value_ranges_intersect_p (vr0, vr1))
6530 {
6531 /* Copy most of VR1 into VR0. Don't copy VR1's equivalence
6532 set. We need to compute the intersection of the two
6533 equivalence sets. */
6534 if (vr1->type == VR_ANTI_RANGE)
6535 set_value_range (vr0, vr1->type, vr1->min, vr1->max, vr0->equiv);
6536
6537 /* The resulting set of equivalences is the intersection of
6538 the two sets. */
6539 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
6540 bitmap_and_into (vr0->equiv, vr1->equiv);
6541 else if (vr0->equiv && !vr1->equiv)
6542 bitmap_clear (vr0->equiv);
6543 }
6544 else
6545 goto give_up;
6546 }
6547 else
6548 gcc_unreachable ();
6549
6550 return;
6551
6552 give_up:
6553 /* Failed to find an efficient meet. Before giving up and setting
6554 the result to VARYING, see if we can at least derive a useful
6555 anti-range. FIXME, all this nonsense about distinguishing
6556 anti-ranges from ranges is necessary because of the odd
6557 semantics of range_includes_zero_p and friends. */
6558 if (!symbolic_range_p (vr0)
6559 && ((vr0->type == VR_RANGE && !range_includes_zero_p (vr0))
6560 || (vr0->type == VR_ANTI_RANGE && range_includes_zero_p (vr0)))
6561 && !symbolic_range_p (vr1)
6562 && ((vr1->type == VR_RANGE && !range_includes_zero_p (vr1))
6563 || (vr1->type == VR_ANTI_RANGE && range_includes_zero_p (vr1))))
6564 {
6565 set_value_range_to_nonnull (vr0, TREE_TYPE (vr0->min));
6566
6567 /* Since this meet operation did not result from the meeting of
6568 two equivalent names, VR0 cannot have any equivalences. */
6569 if (vr0->equiv)
6570 bitmap_clear (vr0->equiv);
6571 }
6572 else
6573 set_value_range_to_varying (vr0);
6574 }
6575
6576
6577 /* Visit all arguments for PHI node PHI that flow through executable
6578 edges. If a valid value range can be derived from all the incoming
6579 value ranges, set a new range for the LHS of PHI. */
6580
6581 static enum ssa_prop_result
6582 vrp_visit_phi_node (gimple phi)
6583 {
6584 size_t i;
6585 tree lhs = PHI_RESULT (phi);
6586 value_range_t *lhs_vr = get_value_range (lhs);
6587 value_range_t vr_result = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
6588 int edges, old_edges;
6589 struct loop *l;
6590
6591 if (dump_file && (dump_flags & TDF_DETAILS))
6592 {
6593 fprintf (dump_file, "\nVisiting PHI node: ");
6594 print_gimple_stmt (dump_file, phi, 0, dump_flags);
6595 }
6596
6597 edges = 0;
6598 for (i = 0; i < gimple_phi_num_args (phi); i++)
6599 {
6600 edge e = gimple_phi_arg_edge (phi, i);
6601
6602 if (dump_file && (dump_flags & TDF_DETAILS))
6603 {
6604 fprintf (dump_file,
6605 "\n Argument #%d (%d -> %d %sexecutable)\n",
6606 (int) i, e->src->index, e->dest->index,
6607 (e->flags & EDGE_EXECUTABLE) ? "" : "not ");
6608 }
6609
6610 if (e->flags & EDGE_EXECUTABLE)
6611 {
6612 tree arg = PHI_ARG_DEF (phi, i);
6613 value_range_t vr_arg;
6614
6615 ++edges;
6616
6617 if (TREE_CODE (arg) == SSA_NAME)
6618 {
6619 vr_arg = *(get_value_range (arg));
6620 }
6621 else
6622 {
6623 if (is_overflow_infinity (arg))
6624 {
6625 arg = copy_node (arg);
6626 TREE_OVERFLOW (arg) = 0;
6627 }
6628
6629 vr_arg.type = VR_RANGE;
6630 vr_arg.min = arg;
6631 vr_arg.max = arg;
6632 vr_arg.equiv = NULL;
6633 }
6634
6635 if (dump_file && (dump_flags & TDF_DETAILS))
6636 {
6637 fprintf (dump_file, "\t");
6638 print_generic_expr (dump_file, arg, dump_flags);
6639 fprintf (dump_file, "\n\tValue: ");
6640 dump_value_range (dump_file, &vr_arg);
6641 fprintf (dump_file, "\n");
6642 }
6643
6644 vrp_meet (&vr_result, &vr_arg);
6645
6646 if (vr_result.type == VR_VARYING)
6647 break;
6648 }
6649 }
6650
6651 if (vr_result.type == VR_VARYING)
6652 goto varying;
6653
6654 old_edges = vr_phi_edge_counts[SSA_NAME_VERSION (lhs)];
6655 vr_phi_edge_counts[SSA_NAME_VERSION (lhs)] = edges;
6656
6657 /* To prevent infinite iterations in the algorithm, derive ranges
6658 when the new value is slightly bigger or smaller than the
6659 previous one. We don't do this if we have seen a new executable
6660 edge; this helps us avoid an overflow infinity for conditionals
6661 which are not in a loop. */
6662 if (edges > 0
6663 && gimple_phi_num_args (phi) > 1
6664 && edges == old_edges)
6665 {
6666 int cmp_min = compare_values (lhs_vr->min, vr_result.min);
6667 int cmp_max = compare_values (lhs_vr->max, vr_result.max);
6668
6669 /* For non VR_RANGE or for pointers fall back to varying if
6670 the range changed. */
6671 if ((lhs_vr->type != VR_RANGE || vr_result.type != VR_RANGE
6672 || POINTER_TYPE_P (TREE_TYPE (lhs)))
6673 && (cmp_min != 0 || cmp_max != 0))
6674 goto varying;
6675
6676 /* If the new minimum is smaller or larger than the previous
6677 one, go all the way to -INF. In the first case, to avoid
6678 iterating millions of times to reach -INF, and in the
6679 other case to avoid infinite bouncing between different
6680 minimums. */
6681 if (cmp_min > 0 || cmp_min < 0)
6682 {
6683 if (!needs_overflow_infinity (TREE_TYPE (vr_result.min))
6684 || !vrp_var_may_overflow (lhs, phi))
6685 vr_result.min = TYPE_MIN_VALUE (TREE_TYPE (vr_result.min));
6686 else if (supports_overflow_infinity (TREE_TYPE (vr_result.min)))
6687 vr_result.min =
6688 negative_overflow_infinity (TREE_TYPE (vr_result.min));
6689 }
6690
6691 /* Similarly, if the new maximum is smaller or larger than
6692 the previous one, go all the way to +INF. */
6693 if (cmp_max < 0 || cmp_max > 0)
6694 {
6695 if (!needs_overflow_infinity (TREE_TYPE (vr_result.max))
6696 || !vrp_var_may_overflow (lhs, phi))
6697 vr_result.max = TYPE_MAX_VALUE (TREE_TYPE (vr_result.max));
6698 else if (supports_overflow_infinity (TREE_TYPE (vr_result.max)))
6699 vr_result.max =
6700 positive_overflow_infinity (TREE_TYPE (vr_result.max));
6701 }
6702
6703 /* If we dropped either bound to +-INF then if this is a loop
6704 PHI node SCEV may known more about its value-range. */
6705 if ((cmp_min > 0 || cmp_min < 0
6706 || cmp_max < 0 || cmp_max > 0)
6707 && current_loops
6708 && (l = loop_containing_stmt (phi))
6709 && l->header == gimple_bb (phi))
6710 adjust_range_with_scev (&vr_result, l, phi, lhs);
6711
6712 /* If we will end up with a (-INF, +INF) range, set it to
6713 VARYING. Same if the previous max value was invalid for
6714 the type and we end up with vr_result.min > vr_result.max. */
6715 if ((vrp_val_is_max (vr_result.max)
6716 && vrp_val_is_min (vr_result.min))
6717 || compare_values (vr_result.min,
6718 vr_result.max) > 0)
6719 goto varying;
6720 }
6721
6722 /* If the new range is different than the previous value, keep
6723 iterating. */
6724 if (update_value_range (lhs, &vr_result))
6725 {
6726 if (dump_file && (dump_flags & TDF_DETAILS))
6727 {
6728 fprintf (dump_file, "Found new range for ");
6729 print_generic_expr (dump_file, lhs, 0);
6730 fprintf (dump_file, ": ");
6731 dump_value_range (dump_file, &vr_result);
6732 fprintf (dump_file, "\n\n");
6733 }
6734
6735 return SSA_PROP_INTERESTING;
6736 }
6737
6738 /* Nothing changed, don't add outgoing edges. */
6739 return SSA_PROP_NOT_INTERESTING;
6740
6741 /* No match found. Set the LHS to VARYING. */
6742 varying:
6743 set_value_range_to_varying (lhs_vr);
6744 return SSA_PROP_VARYING;
6745 }
6746
6747 /* Simplify boolean operations if the source is known
6748 to be already a boolean. */
6749 static bool
6750 simplify_truth_ops_using_ranges (gimple_stmt_iterator *gsi, gimple stmt)
6751 {
6752 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
6753 tree val = NULL;
6754 tree op0, op1;
6755 value_range_t *vr;
6756 bool sop = false;
6757 bool need_conversion;
6758
6759 op0 = gimple_assign_rhs1 (stmt);
6760 if (TYPE_PRECISION (TREE_TYPE (op0)) != 1)
6761 {
6762 if (TREE_CODE (op0) != SSA_NAME)
6763 return false;
6764 vr = get_value_range (op0);
6765
6766 val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, &sop);
6767 if (!val || !integer_onep (val))
6768 return false;
6769
6770 val = compare_range_with_value (LE_EXPR, vr, integer_one_node, &sop);
6771 if (!val || !integer_onep (val))
6772 return false;
6773 }
6774
6775 if (rhs_code == TRUTH_NOT_EXPR)
6776 {
6777 rhs_code = NE_EXPR;
6778 op1 = build_int_cst (TREE_TYPE (op0), 1);
6779 }
6780 else
6781 {
6782 op1 = gimple_assign_rhs2 (stmt);
6783
6784 /* Reduce number of cases to handle. */
6785 if (is_gimple_min_invariant (op1))
6786 {
6787 /* Exclude anything that should have been already folded. */
6788 if (rhs_code != EQ_EXPR
6789 && rhs_code != NE_EXPR
6790 && rhs_code != TRUTH_XOR_EXPR)
6791 return false;
6792
6793 if (!integer_zerop (op1)
6794 && !integer_onep (op1)
6795 && !integer_all_onesp (op1))
6796 return false;
6797
6798 /* Limit the number of cases we have to consider. */
6799 if (rhs_code == EQ_EXPR)
6800 {
6801 rhs_code = NE_EXPR;
6802 op1 = fold_unary (TRUTH_NOT_EXPR, TREE_TYPE (op1), op1);
6803 }
6804 }
6805 else
6806 {
6807 /* Punt on A == B as there is no BIT_XNOR_EXPR. */
6808 if (rhs_code == EQ_EXPR)
6809 return false;
6810
6811 if (TYPE_PRECISION (TREE_TYPE (op1)) != 1)
6812 {
6813 vr = get_value_range (op1);
6814 val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, &sop);
6815 if (!val || !integer_onep (val))
6816 return false;
6817
6818 val = compare_range_with_value (LE_EXPR, vr, integer_one_node, &sop);
6819 if (!val || !integer_onep (val))
6820 return false;
6821 }
6822 }
6823 }
6824
6825 if (sop && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
6826 {
6827 location_t location;
6828
6829 if (!gimple_has_location (stmt))
6830 location = input_location;
6831 else
6832 location = gimple_location (stmt);
6833
6834 if (rhs_code == TRUTH_AND_EXPR || rhs_code == TRUTH_OR_EXPR)
6835 warning_at (location, OPT_Wstrict_overflow,
6836 _("assuming signed overflow does not occur when "
6837 "simplifying && or || to & or |"));
6838 else
6839 warning_at (location, OPT_Wstrict_overflow,
6840 _("assuming signed overflow does not occur when "
6841 "simplifying ==, != or ! to identity or ^"));
6842 }
6843
6844 need_conversion =
6845 !useless_type_conversion_p (TREE_TYPE (gimple_assign_lhs (stmt)),
6846 TREE_TYPE (op0));
6847
6848 /* Make sure to not sign-extend -1 as a boolean value. */
6849 if (need_conversion
6850 && !TYPE_UNSIGNED (TREE_TYPE (op0))
6851 && TYPE_PRECISION (TREE_TYPE (op0)) == 1)
6852 return false;
6853
6854 switch (rhs_code)
6855 {
6856 case TRUTH_AND_EXPR:
6857 rhs_code = BIT_AND_EXPR;
6858 break;
6859 case TRUTH_OR_EXPR:
6860 rhs_code = BIT_IOR_EXPR;
6861 break;
6862 case TRUTH_XOR_EXPR:
6863 case NE_EXPR:
6864 if (integer_zerop (op1))
6865 {
6866 gimple_assign_set_rhs_with_ops (gsi,
6867 need_conversion ? NOP_EXPR : SSA_NAME,
6868 op0, NULL);
6869 update_stmt (gsi_stmt (*gsi));
6870 return true;
6871 }
6872
6873 rhs_code = BIT_XOR_EXPR;
6874 break;
6875 default:
6876 gcc_unreachable ();
6877 }
6878
6879 if (need_conversion)
6880 return false;
6881
6882 gimple_assign_set_rhs_with_ops (gsi, rhs_code, op0, op1);
6883 update_stmt (gsi_stmt (*gsi));
6884 return true;
6885 }
6886
6887 /* Simplify a division or modulo operator to a right shift or
6888 bitwise and if the first operand is unsigned or is greater
6889 than zero and the second operand is an exact power of two. */
6890
6891 static bool
6892 simplify_div_or_mod_using_ranges (gimple stmt)
6893 {
6894 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
6895 tree val = NULL;
6896 tree op0 = gimple_assign_rhs1 (stmt);
6897 tree op1 = gimple_assign_rhs2 (stmt);
6898 value_range_t *vr = get_value_range (gimple_assign_rhs1 (stmt));
6899
6900 if (TYPE_UNSIGNED (TREE_TYPE (op0)))
6901 {
6902 val = integer_one_node;
6903 }
6904 else
6905 {
6906 bool sop = false;
6907
6908 val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, &sop);
6909
6910 if (val
6911 && sop
6912 && integer_onep (val)
6913 && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
6914 {
6915 location_t location;
6916
6917 if (!gimple_has_location (stmt))
6918 location = input_location;
6919 else
6920 location = gimple_location (stmt);
6921 warning_at (location, OPT_Wstrict_overflow,
6922 "assuming signed overflow does not occur when "
6923 "simplifying %</%> or %<%%%> to %<>>%> or %<&%>");
6924 }
6925 }
6926
6927 if (val && integer_onep (val))
6928 {
6929 tree t;
6930
6931 if (rhs_code == TRUNC_DIV_EXPR)
6932 {
6933 t = build_int_cst (integer_type_node, tree_log2 (op1));
6934 gimple_assign_set_rhs_code (stmt, RSHIFT_EXPR);
6935 gimple_assign_set_rhs1 (stmt, op0);
6936 gimple_assign_set_rhs2 (stmt, t);
6937 }
6938 else
6939 {
6940 t = build_int_cst (TREE_TYPE (op1), 1);
6941 t = int_const_binop (MINUS_EXPR, op1, t);
6942 t = fold_convert (TREE_TYPE (op0), t);
6943
6944 gimple_assign_set_rhs_code (stmt, BIT_AND_EXPR);
6945 gimple_assign_set_rhs1 (stmt, op0);
6946 gimple_assign_set_rhs2 (stmt, t);
6947 }
6948
6949 update_stmt (stmt);
6950 return true;
6951 }
6952
6953 return false;
6954 }
6955
6956 /* If the operand to an ABS_EXPR is >= 0, then eliminate the
6957 ABS_EXPR. If the operand is <= 0, then simplify the
6958 ABS_EXPR into a NEGATE_EXPR. */
6959
6960 static bool
6961 simplify_abs_using_ranges (gimple stmt)
6962 {
6963 tree val = NULL;
6964 tree op = gimple_assign_rhs1 (stmt);
6965 tree type = TREE_TYPE (op);
6966 value_range_t *vr = get_value_range (op);
6967
6968 if (TYPE_UNSIGNED (type))
6969 {
6970 val = integer_zero_node;
6971 }
6972 else if (vr)
6973 {
6974 bool sop = false;
6975
6976 val = compare_range_with_value (LE_EXPR, vr, integer_zero_node, &sop);
6977 if (!val)
6978 {
6979 sop = false;
6980 val = compare_range_with_value (GE_EXPR, vr, integer_zero_node,
6981 &sop);
6982
6983 if (val)
6984 {
6985 if (integer_zerop (val))
6986 val = integer_one_node;
6987 else if (integer_onep (val))
6988 val = integer_zero_node;
6989 }
6990 }
6991
6992 if (val
6993 && (integer_onep (val) || integer_zerop (val)))
6994 {
6995 if (sop && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
6996 {
6997 location_t location;
6998
6999 if (!gimple_has_location (stmt))
7000 location = input_location;
7001 else
7002 location = gimple_location (stmt);
7003 warning_at (location, OPT_Wstrict_overflow,
7004 "assuming signed overflow does not occur when "
7005 "simplifying %<abs (X)%> to %<X%> or %<-X%>");
7006 }
7007
7008 gimple_assign_set_rhs1 (stmt, op);
7009 if (integer_onep (val))
7010 gimple_assign_set_rhs_code (stmt, NEGATE_EXPR);
7011 else
7012 gimple_assign_set_rhs_code (stmt, SSA_NAME);
7013 update_stmt (stmt);
7014 return true;
7015 }
7016 }
7017
7018 return false;
7019 }
7020
7021 /* Optimize away redundant BIT_AND_EXPR and BIT_IOR_EXPR.
7022 If all the bits that are being cleared by & are already
7023 known to be zero from VR, or all the bits that are being
7024 set by | are already known to be one from VR, the bit
7025 operation is redundant. */
7026
7027 static bool
7028 simplify_bit_ops_using_ranges (gimple_stmt_iterator *gsi, gimple stmt)
7029 {
7030 tree op0 = gimple_assign_rhs1 (stmt);
7031 tree op1 = gimple_assign_rhs2 (stmt);
7032 tree op = NULL_TREE;
7033 value_range_t vr0 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
7034 value_range_t vr1 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
7035 double_int may_be_nonzero0, may_be_nonzero1;
7036 double_int must_be_nonzero0, must_be_nonzero1;
7037 double_int mask;
7038
7039 if (TREE_CODE (op0) == SSA_NAME)
7040 vr0 = *(get_value_range (op0));
7041 else if (is_gimple_min_invariant (op0))
7042 set_value_range_to_value (&vr0, op0, NULL);
7043 else
7044 return false;
7045
7046 if (TREE_CODE (op1) == SSA_NAME)
7047 vr1 = *(get_value_range (op1));
7048 else if (is_gimple_min_invariant (op1))
7049 set_value_range_to_value (&vr1, op1, NULL);
7050 else
7051 return false;
7052
7053 if (!zero_nonzero_bits_from_vr (&vr0, &may_be_nonzero0, &must_be_nonzero0))
7054 return false;
7055 if (!zero_nonzero_bits_from_vr (&vr1, &may_be_nonzero1, &must_be_nonzero1))
7056 return false;
7057
7058 switch (gimple_assign_rhs_code (stmt))
7059 {
7060 case BIT_AND_EXPR:
7061 mask = double_int_and_not (may_be_nonzero0, must_be_nonzero1);
7062 if (double_int_zero_p (mask))
7063 {
7064 op = op0;
7065 break;
7066 }
7067 mask = double_int_and_not (may_be_nonzero1, must_be_nonzero0);
7068 if (double_int_zero_p (mask))
7069 {
7070 op = op1;
7071 break;
7072 }
7073 break;
7074 case BIT_IOR_EXPR:
7075 mask = double_int_and_not (may_be_nonzero0, must_be_nonzero1);
7076 if (double_int_zero_p (mask))
7077 {
7078 op = op1;
7079 break;
7080 }
7081 mask = double_int_and_not (may_be_nonzero1, must_be_nonzero0);
7082 if (double_int_zero_p (mask))
7083 {
7084 op = op0;
7085 break;
7086 }
7087 break;
7088 default:
7089 gcc_unreachable ();
7090 }
7091
7092 if (op == NULL_TREE)
7093 return false;
7094
7095 gimple_assign_set_rhs_with_ops (gsi, TREE_CODE (op), op, NULL);
7096 update_stmt (gsi_stmt (*gsi));
7097 return true;
7098 }
7099
7100 /* We are comparing trees OP0 and OP1 using COND_CODE. OP0 has
7101 a known value range VR.
7102
7103 If there is one and only one value which will satisfy the
7104 conditional, then return that value. Else return NULL. */
7105
7106 static tree
7107 test_for_singularity (enum tree_code cond_code, tree op0,
7108 tree op1, value_range_t *vr)
7109 {
7110 tree min = NULL;
7111 tree max = NULL;
7112
7113 /* Extract minimum/maximum values which satisfy the
7114 the conditional as it was written. */
7115 if (cond_code == LE_EXPR || cond_code == LT_EXPR)
7116 {
7117 /* This should not be negative infinity; there is no overflow
7118 here. */
7119 min = TYPE_MIN_VALUE (TREE_TYPE (op0));
7120
7121 max = op1;
7122 if (cond_code == LT_EXPR && !is_overflow_infinity (max))
7123 {
7124 tree one = build_int_cst (TREE_TYPE (op0), 1);
7125 max = fold_build2 (MINUS_EXPR, TREE_TYPE (op0), max, one);
7126 if (EXPR_P (max))
7127 TREE_NO_WARNING (max) = 1;
7128 }
7129 }
7130 else if (cond_code == GE_EXPR || cond_code == GT_EXPR)
7131 {
7132 /* This should not be positive infinity; there is no overflow
7133 here. */
7134 max = TYPE_MAX_VALUE (TREE_TYPE (op0));
7135
7136 min = op1;
7137 if (cond_code == GT_EXPR && !is_overflow_infinity (min))
7138 {
7139 tree one = build_int_cst (TREE_TYPE (op0), 1);
7140 min = fold_build2 (PLUS_EXPR, TREE_TYPE (op0), min, one);
7141 if (EXPR_P (min))
7142 TREE_NO_WARNING (min) = 1;
7143 }
7144 }
7145
7146 /* Now refine the minimum and maximum values using any
7147 value range information we have for op0. */
7148 if (min && max)
7149 {
7150 if (compare_values (vr->min, min) == 1)
7151 min = vr->min;
7152 if (compare_values (vr->max, max) == -1)
7153 max = vr->max;
7154
7155 /* If the new min/max values have converged to a single value,
7156 then there is only one value which can satisfy the condition,
7157 return that value. */
7158 if (operand_equal_p (min, max, 0) && is_gimple_min_invariant (min))
7159 return min;
7160 }
7161 return NULL;
7162 }
7163
7164 /* Simplify a conditional using a relational operator to an equality
7165 test if the range information indicates only one value can satisfy
7166 the original conditional. */
7167
7168 static bool
7169 simplify_cond_using_ranges (gimple stmt)
7170 {
7171 tree op0 = gimple_cond_lhs (stmt);
7172 tree op1 = gimple_cond_rhs (stmt);
7173 enum tree_code cond_code = gimple_cond_code (stmt);
7174
7175 if (cond_code != NE_EXPR
7176 && cond_code != EQ_EXPR
7177 && TREE_CODE (op0) == SSA_NAME
7178 && INTEGRAL_TYPE_P (TREE_TYPE (op0))
7179 && is_gimple_min_invariant (op1))
7180 {
7181 value_range_t *vr = get_value_range (op0);
7182
7183 /* If we have range information for OP0, then we might be
7184 able to simplify this conditional. */
7185 if (vr->type == VR_RANGE)
7186 {
7187 tree new_tree = test_for_singularity (cond_code, op0, op1, vr);
7188
7189 if (new_tree)
7190 {
7191 if (dump_file)
7192 {
7193 fprintf (dump_file, "Simplified relational ");
7194 print_gimple_stmt (dump_file, stmt, 0, 0);
7195 fprintf (dump_file, " into ");
7196 }
7197
7198 gimple_cond_set_code (stmt, EQ_EXPR);
7199 gimple_cond_set_lhs (stmt, op0);
7200 gimple_cond_set_rhs (stmt, new_tree);
7201
7202 update_stmt (stmt);
7203
7204 if (dump_file)
7205 {
7206 print_gimple_stmt (dump_file, stmt, 0, 0);
7207 fprintf (dump_file, "\n");
7208 }
7209
7210 return true;
7211 }
7212
7213 /* Try again after inverting the condition. We only deal
7214 with integral types here, so no need to worry about
7215 issues with inverting FP comparisons. */
7216 cond_code = invert_tree_comparison (cond_code, false);
7217 new_tree = test_for_singularity (cond_code, op0, op1, vr);
7218
7219 if (new_tree)
7220 {
7221 if (dump_file)
7222 {
7223 fprintf (dump_file, "Simplified relational ");
7224 print_gimple_stmt (dump_file, stmt, 0, 0);
7225 fprintf (dump_file, " into ");
7226 }
7227
7228 gimple_cond_set_code (stmt, NE_EXPR);
7229 gimple_cond_set_lhs (stmt, op0);
7230 gimple_cond_set_rhs (stmt, new_tree);
7231
7232 update_stmt (stmt);
7233
7234 if (dump_file)
7235 {
7236 print_gimple_stmt (dump_file, stmt, 0, 0);
7237 fprintf (dump_file, "\n");
7238 }
7239
7240 return true;
7241 }
7242 }
7243 }
7244
7245 return false;
7246 }
7247
7248 /* Simplify a switch statement using the value range of the switch
7249 argument. */
7250
7251 static bool
7252 simplify_switch_using_ranges (gimple stmt)
7253 {
7254 tree op = gimple_switch_index (stmt);
7255 value_range_t *vr;
7256 bool take_default;
7257 edge e;
7258 edge_iterator ei;
7259 size_t i = 0, j = 0, n, n2;
7260 tree vec2;
7261 switch_update su;
7262
7263 if (TREE_CODE (op) == SSA_NAME)
7264 {
7265 vr = get_value_range (op);
7266
7267 /* We can only handle integer ranges. */
7268 if (vr->type != VR_RANGE
7269 || symbolic_range_p (vr))
7270 return false;
7271
7272 /* Find case label for min/max of the value range. */
7273 take_default = !find_case_label_range (stmt, vr->min, vr->max, &i, &j);
7274 }
7275 else if (TREE_CODE (op) == INTEGER_CST)
7276 {
7277 take_default = !find_case_label_index (stmt, 1, op, &i);
7278 if (take_default)
7279 {
7280 i = 1;
7281 j = 0;
7282 }
7283 else
7284 {
7285 j = i;
7286 }
7287 }
7288 else
7289 return false;
7290
7291 n = gimple_switch_num_labels (stmt);
7292
7293 /* Bail out if this is just all edges taken. */
7294 if (i == 1
7295 && j == n - 1
7296 && take_default)
7297 return false;
7298
7299 /* Build a new vector of taken case labels. */
7300 vec2 = make_tree_vec (j - i + 1 + (int)take_default);
7301 n2 = 0;
7302
7303 /* Add the default edge, if necessary. */
7304 if (take_default)
7305 TREE_VEC_ELT (vec2, n2++) = gimple_switch_default_label (stmt);
7306
7307 for (; i <= j; ++i, ++n2)
7308 TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, i);
7309
7310 /* Mark needed edges. */
7311 for (i = 0; i < n2; ++i)
7312 {
7313 e = find_edge (gimple_bb (stmt),
7314 label_to_block (CASE_LABEL (TREE_VEC_ELT (vec2, i))));
7315 e->aux = (void *)-1;
7316 }
7317
7318 /* Queue not needed edges for later removal. */
7319 FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
7320 {
7321 if (e->aux == (void *)-1)
7322 {
7323 e->aux = NULL;
7324 continue;
7325 }
7326
7327 if (dump_file && (dump_flags & TDF_DETAILS))
7328 {
7329 fprintf (dump_file, "removing unreachable case label\n");
7330 }
7331 VEC_safe_push (edge, heap, to_remove_edges, e);
7332 e->flags &= ~EDGE_EXECUTABLE;
7333 }
7334
7335 /* And queue an update for the stmt. */
7336 su.stmt = stmt;
7337 su.vec = vec2;
7338 VEC_safe_push (switch_update, heap, to_update_switch_stmts, &su);
7339 return false;
7340 }
7341
7342 /* Simplify an integral conversion from an SSA name in STMT. */
7343
7344 static bool
7345 simplify_conversion_using_ranges (gimple stmt)
7346 {
7347 tree innerop, middleop, finaltype;
7348 gimple def_stmt;
7349 value_range_t *innervr;
7350 double_int innermin, innermax, middlemin, middlemax;
7351
7352 finaltype = TREE_TYPE (gimple_assign_lhs (stmt));
7353 if (!INTEGRAL_TYPE_P (finaltype))
7354 return false;
7355 middleop = gimple_assign_rhs1 (stmt);
7356 def_stmt = SSA_NAME_DEF_STMT (middleop);
7357 if (!is_gimple_assign (def_stmt)
7358 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)))
7359 return false;
7360 innerop = gimple_assign_rhs1 (def_stmt);
7361 if (TREE_CODE (innerop) != SSA_NAME)
7362 return false;
7363
7364 /* Get the value-range of the inner operand. */
7365 innervr = get_value_range (innerop);
7366 if (innervr->type != VR_RANGE
7367 || TREE_CODE (innervr->min) != INTEGER_CST
7368 || TREE_CODE (innervr->max) != INTEGER_CST)
7369 return false;
7370
7371 /* Simulate the conversion chain to check if the result is equal if
7372 the middle conversion is removed. */
7373 innermin = tree_to_double_int (innervr->min);
7374 innermax = tree_to_double_int (innervr->max);
7375 middlemin = double_int_ext (innermin, TYPE_PRECISION (TREE_TYPE (middleop)),
7376 TYPE_UNSIGNED (TREE_TYPE (middleop)));
7377 middlemax = double_int_ext (innermax, TYPE_PRECISION (TREE_TYPE (middleop)),
7378 TYPE_UNSIGNED (TREE_TYPE (middleop)));
7379 /* If the middle values do not represent a proper range fail. */
7380 if (double_int_cmp (middlemin, middlemax,
7381 TYPE_UNSIGNED (TREE_TYPE (middleop))) > 0)
7382 return false;
7383 if (!double_int_equal_p (double_int_ext (middlemin,
7384 TYPE_PRECISION (finaltype),
7385 TYPE_UNSIGNED (finaltype)),
7386 double_int_ext (innermin,
7387 TYPE_PRECISION (finaltype),
7388 TYPE_UNSIGNED (finaltype)))
7389 || !double_int_equal_p (double_int_ext (middlemax,
7390 TYPE_PRECISION (finaltype),
7391 TYPE_UNSIGNED (finaltype)),
7392 double_int_ext (innermax,
7393 TYPE_PRECISION (finaltype),
7394 TYPE_UNSIGNED (finaltype))))
7395 return false;
7396
7397 gimple_assign_set_rhs1 (stmt, innerop);
7398 update_stmt (stmt);
7399 return true;
7400 }
7401
7402 /* Simplify STMT using ranges if possible. */
7403
7404 static bool
7405 simplify_stmt_using_ranges (gimple_stmt_iterator *gsi)
7406 {
7407 gimple stmt = gsi_stmt (*gsi);
7408 if (is_gimple_assign (stmt))
7409 {
7410 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
7411 tree rhs1 = gimple_assign_rhs1 (stmt);
7412
7413 switch (rhs_code)
7414 {
7415 case EQ_EXPR:
7416 case NE_EXPR:
7417 case TRUTH_NOT_EXPR:
7418 case TRUTH_AND_EXPR:
7419 case TRUTH_OR_EXPR:
7420 case TRUTH_XOR_EXPR:
7421 /* Transform EQ_EXPR, NE_EXPR, TRUTH_NOT_EXPR into BIT_XOR_EXPR
7422 or identity if the RHS is zero or one, and the LHS are known
7423 to be boolean values. Transform all TRUTH_*_EXPR into
7424 BIT_*_EXPR if both arguments are known to be boolean values. */
7425 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
7426 return simplify_truth_ops_using_ranges (gsi, stmt);
7427 break;
7428
7429 /* Transform TRUNC_DIV_EXPR and TRUNC_MOD_EXPR into RSHIFT_EXPR
7430 and BIT_AND_EXPR respectively if the first operand is greater
7431 than zero and the second operand is an exact power of two. */
7432 case TRUNC_DIV_EXPR:
7433 case TRUNC_MOD_EXPR:
7434 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
7435 && integer_pow2p (gimple_assign_rhs2 (stmt)))
7436 return simplify_div_or_mod_using_ranges (stmt);
7437 break;
7438
7439 /* Transform ABS (X) into X or -X as appropriate. */
7440 case ABS_EXPR:
7441 if (TREE_CODE (rhs1) == SSA_NAME
7442 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
7443 return simplify_abs_using_ranges (stmt);
7444 break;
7445
7446 case BIT_AND_EXPR:
7447 case BIT_IOR_EXPR:
7448 /* Optimize away BIT_AND_EXPR and BIT_IOR_EXPR
7449 if all the bits being cleared are already cleared or
7450 all the bits being set are already set. */
7451 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
7452 return simplify_bit_ops_using_ranges (gsi, stmt);
7453 break;
7454
7455 CASE_CONVERT:
7456 if (TREE_CODE (rhs1) == SSA_NAME
7457 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
7458 return simplify_conversion_using_ranges (stmt);
7459 break;
7460
7461 default:
7462 break;
7463 }
7464 }
7465 else if (gimple_code (stmt) == GIMPLE_COND)
7466 return simplify_cond_using_ranges (stmt);
7467 else if (gimple_code (stmt) == GIMPLE_SWITCH)
7468 return simplify_switch_using_ranges (stmt);
7469
7470 return false;
7471 }
7472
7473 /* If the statement pointed by SI has a predicate whose value can be
7474 computed using the value range information computed by VRP, compute
7475 its value and return true. Otherwise, return false. */
7476
7477 static bool
7478 fold_predicate_in (gimple_stmt_iterator *si)
7479 {
7480 bool assignment_p = false;
7481 tree val;
7482 gimple stmt = gsi_stmt (*si);
7483
7484 if (is_gimple_assign (stmt)
7485 && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison)
7486 {
7487 assignment_p = true;
7488 val = vrp_evaluate_conditional (gimple_assign_rhs_code (stmt),
7489 gimple_assign_rhs1 (stmt),
7490 gimple_assign_rhs2 (stmt),
7491 stmt);
7492 }
7493 else if (gimple_code (stmt) == GIMPLE_COND)
7494 val = vrp_evaluate_conditional (gimple_cond_code (stmt),
7495 gimple_cond_lhs (stmt),
7496 gimple_cond_rhs (stmt),
7497 stmt);
7498 else
7499 return false;
7500
7501 if (val)
7502 {
7503 if (assignment_p)
7504 val = fold_convert (gimple_expr_type (stmt), val);
7505
7506 if (dump_file)
7507 {
7508 fprintf (dump_file, "Folding predicate ");
7509 print_gimple_expr (dump_file, stmt, 0, 0);
7510 fprintf (dump_file, " to ");
7511 print_generic_expr (dump_file, val, 0);
7512 fprintf (dump_file, "\n");
7513 }
7514
7515 if (is_gimple_assign (stmt))
7516 gimple_assign_set_rhs_from_tree (si, val);
7517 else
7518 {
7519 gcc_assert (gimple_code (stmt) == GIMPLE_COND);
7520 if (integer_zerop (val))
7521 gimple_cond_make_false (stmt);
7522 else if (integer_onep (val))
7523 gimple_cond_make_true (stmt);
7524 else
7525 gcc_unreachable ();
7526 }
7527
7528 return true;
7529 }
7530
7531 return false;
7532 }
7533
7534 /* Callback for substitute_and_fold folding the stmt at *SI. */
7535
7536 static bool
7537 vrp_fold_stmt (gimple_stmt_iterator *si)
7538 {
7539 if (fold_predicate_in (si))
7540 return true;
7541
7542 return simplify_stmt_using_ranges (si);
7543 }
7544
7545 /* Stack of dest,src equivalency pairs that need to be restored after
7546 each attempt to thread a block's incoming edge to an outgoing edge.
7547
7548 A NULL entry is used to mark the end of pairs which need to be
7549 restored. */
7550 static VEC(tree,heap) *stack;
7551
7552 /* A trivial wrapper so that we can present the generic jump threading
7553 code with a simple API for simplifying statements. STMT is the
7554 statement we want to simplify, WITHIN_STMT provides the location
7555 for any overflow warnings. */
7556
7557 static tree
7558 simplify_stmt_for_jump_threading (gimple stmt, gimple within_stmt)
7559 {
7560 /* We only use VRP information to simplify conditionals. This is
7561 overly conservative, but it's unclear if doing more would be
7562 worth the compile time cost. */
7563 if (gimple_code (stmt) != GIMPLE_COND)
7564 return NULL;
7565
7566 return vrp_evaluate_conditional (gimple_cond_code (stmt),
7567 gimple_cond_lhs (stmt),
7568 gimple_cond_rhs (stmt), within_stmt);
7569 }
7570
7571 /* Blocks which have more than one predecessor and more than
7572 one successor present jump threading opportunities, i.e.,
7573 when the block is reached from a specific predecessor, we
7574 may be able to determine which of the outgoing edges will
7575 be traversed. When this optimization applies, we are able
7576 to avoid conditionals at runtime and we may expose secondary
7577 optimization opportunities.
7578
7579 This routine is effectively a driver for the generic jump
7580 threading code. It basically just presents the generic code
7581 with edges that may be suitable for jump threading.
7582
7583 Unlike DOM, we do not iterate VRP if jump threading was successful.
7584 While iterating may expose new opportunities for VRP, it is expected
7585 those opportunities would be very limited and the compile time cost
7586 to expose those opportunities would be significant.
7587
7588 As jump threading opportunities are discovered, they are registered
7589 for later realization. */
7590
7591 static void
7592 identify_jump_threads (void)
7593 {
7594 basic_block bb;
7595 gimple dummy;
7596 int i;
7597 edge e;
7598
7599 /* Ugh. When substituting values earlier in this pass we can
7600 wipe the dominance information. So rebuild the dominator
7601 information as we need it within the jump threading code. */
7602 calculate_dominance_info (CDI_DOMINATORS);
7603
7604 /* We do not allow VRP information to be used for jump threading
7605 across a back edge in the CFG. Otherwise it becomes too
7606 difficult to avoid eliminating loop exit tests. Of course
7607 EDGE_DFS_BACK is not accurate at this time so we have to
7608 recompute it. */
7609 mark_dfs_back_edges ();
7610
7611 /* Do not thread across edges we are about to remove. Just marking
7612 them as EDGE_DFS_BACK will do. */
7613 FOR_EACH_VEC_ELT (edge, to_remove_edges, i, e)
7614 e->flags |= EDGE_DFS_BACK;
7615
7616 /* Allocate our unwinder stack to unwind any temporary equivalences
7617 that might be recorded. */
7618 stack = VEC_alloc (tree, heap, 20);
7619
7620 /* To avoid lots of silly node creation, we create a single
7621 conditional and just modify it in-place when attempting to
7622 thread jumps. */
7623 dummy = gimple_build_cond (EQ_EXPR,
7624 integer_zero_node, integer_zero_node,
7625 NULL, NULL);
7626
7627 /* Walk through all the blocks finding those which present a
7628 potential jump threading opportunity. We could set this up
7629 as a dominator walker and record data during the walk, but
7630 I doubt it's worth the effort for the classes of jump
7631 threading opportunities we are trying to identify at this
7632 point in compilation. */
7633 FOR_EACH_BB (bb)
7634 {
7635 gimple last;
7636
7637 /* If the generic jump threading code does not find this block
7638 interesting, then there is nothing to do. */
7639 if (! potentially_threadable_block (bb))
7640 continue;
7641
7642 /* We only care about blocks ending in a COND_EXPR. While there
7643 may be some value in handling SWITCH_EXPR here, I doubt it's
7644 terribly important. */
7645 last = gsi_stmt (gsi_last_bb (bb));
7646
7647 /* We're basically looking for a switch or any kind of conditional with
7648 integral or pointer type arguments. Note the type of the second
7649 argument will be the same as the first argument, so no need to
7650 check it explicitly. */
7651 if (gimple_code (last) == GIMPLE_SWITCH
7652 || (gimple_code (last) == GIMPLE_COND
7653 && TREE_CODE (gimple_cond_lhs (last)) == SSA_NAME
7654 && (INTEGRAL_TYPE_P (TREE_TYPE (gimple_cond_lhs (last)))
7655 || POINTER_TYPE_P (TREE_TYPE (gimple_cond_lhs (last))))
7656 && (TREE_CODE (gimple_cond_rhs (last)) == SSA_NAME
7657 || is_gimple_min_invariant (gimple_cond_rhs (last)))))
7658 {
7659 edge_iterator ei;
7660
7661 /* We've got a block with multiple predecessors and multiple
7662 successors which also ends in a suitable conditional or
7663 switch statement. For each predecessor, see if we can thread
7664 it to a specific successor. */
7665 FOR_EACH_EDGE (e, ei, bb->preds)
7666 {
7667 /* Do not thread across back edges or abnormal edges
7668 in the CFG. */
7669 if (e->flags & (EDGE_DFS_BACK | EDGE_COMPLEX))
7670 continue;
7671
7672 thread_across_edge (dummy, e, true, &stack,
7673 simplify_stmt_for_jump_threading);
7674 }
7675 }
7676 }
7677
7678 /* We do not actually update the CFG or SSA graphs at this point as
7679 ASSERT_EXPRs are still in the IL and cfg cleanup code does not yet
7680 handle ASSERT_EXPRs gracefully. */
7681 }
7682
7683 /* We identified all the jump threading opportunities earlier, but could
7684 not transform the CFG at that time. This routine transforms the
7685 CFG and arranges for the dominator tree to be rebuilt if necessary.
7686
7687 Note the SSA graph update will occur during the normal TODO
7688 processing by the pass manager. */
7689 static void
7690 finalize_jump_threads (void)
7691 {
7692 thread_through_all_blocks (false);
7693 VEC_free (tree, heap, stack);
7694 }
7695
7696
7697 /* Traverse all the blocks folding conditionals with known ranges. */
7698
7699 static void
7700 vrp_finalize (void)
7701 {
7702 size_t i;
7703 unsigned num = num_ssa_names;
7704
7705 if (dump_file)
7706 {
7707 fprintf (dump_file, "\nValue ranges after VRP:\n\n");
7708 dump_all_value_ranges (dump_file);
7709 fprintf (dump_file, "\n");
7710 }
7711
7712 substitute_and_fold (op_with_constant_singleton_value_range,
7713 vrp_fold_stmt, false);
7714
7715 if (warn_array_bounds)
7716 check_all_array_refs ();
7717
7718 /* We must identify jump threading opportunities before we release
7719 the datastructures built by VRP. */
7720 identify_jump_threads ();
7721
7722 /* Free allocated memory. */
7723 for (i = 0; i < num; i++)
7724 if (vr_value[i])
7725 {
7726 BITMAP_FREE (vr_value[i]->equiv);
7727 free (vr_value[i]);
7728 }
7729
7730 free (vr_value);
7731 free (vr_phi_edge_counts);
7732
7733 /* So that we can distinguish between VRP data being available
7734 and not available. */
7735 vr_value = NULL;
7736 vr_phi_edge_counts = NULL;
7737 }
7738
7739
7740 /* Main entry point to VRP (Value Range Propagation). This pass is
7741 loosely based on J. R. C. Patterson, ``Accurate Static Branch
7742 Prediction by Value Range Propagation,'' in SIGPLAN Conference on
7743 Programming Language Design and Implementation, pp. 67-78, 1995.
7744 Also available at http://citeseer.ist.psu.edu/patterson95accurate.html
7745
7746 This is essentially an SSA-CCP pass modified to deal with ranges
7747 instead of constants.
7748
7749 While propagating ranges, we may find that two or more SSA name
7750 have equivalent, though distinct ranges. For instance,
7751
7752 1 x_9 = p_3->a;
7753 2 p_4 = ASSERT_EXPR <p_3, p_3 != 0>
7754 3 if (p_4 == q_2)
7755 4 p_5 = ASSERT_EXPR <p_4, p_4 == q_2>;
7756 5 endif
7757 6 if (q_2)
7758
7759 In the code above, pointer p_5 has range [q_2, q_2], but from the
7760 code we can also determine that p_5 cannot be NULL and, if q_2 had
7761 a non-varying range, p_5's range should also be compatible with it.
7762
7763 These equivalences are created by two expressions: ASSERT_EXPR and
7764 copy operations. Since p_5 is an assertion on p_4, and p_4 was the
7765 result of another assertion, then we can use the fact that p_5 and
7766 p_4 are equivalent when evaluating p_5's range.
7767
7768 Together with value ranges, we also propagate these equivalences
7769 between names so that we can take advantage of information from
7770 multiple ranges when doing final replacement. Note that this
7771 equivalency relation is transitive but not symmetric.
7772
7773 In the example above, p_5 is equivalent to p_4, q_2 and p_3, but we
7774 cannot assert that q_2 is equivalent to p_5 because q_2 may be used
7775 in contexts where that assertion does not hold (e.g., in line 6).
7776
7777 TODO, the main difference between this pass and Patterson's is that
7778 we do not propagate edge probabilities. We only compute whether
7779 edges can be taken or not. That is, instead of having a spectrum
7780 of jump probabilities between 0 and 1, we only deal with 0, 1 and
7781 DON'T KNOW. In the future, it may be worthwhile to propagate
7782 probabilities to aid branch prediction. */
7783
7784 static unsigned int
7785 execute_vrp (void)
7786 {
7787 int i;
7788 edge e;
7789 switch_update *su;
7790
7791 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
7792 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
7793 scev_initialize ();
7794
7795 insert_range_assertions ();
7796
7797 /* Estimate number of iterations - but do not use undefined behavior
7798 for this. We can't do this lazily as other functions may compute
7799 this using undefined behavior. */
7800 free_numbers_of_iterations_estimates ();
7801 estimate_numbers_of_iterations (false);
7802
7803 to_remove_edges = VEC_alloc (edge, heap, 10);
7804 to_update_switch_stmts = VEC_alloc (switch_update, heap, 5);
7805 threadedge_initialize_values ();
7806
7807 vrp_initialize ();
7808 ssa_propagate (vrp_visit_stmt, vrp_visit_phi_node);
7809 vrp_finalize ();
7810
7811 free_numbers_of_iterations_estimates ();
7812
7813 /* ASSERT_EXPRs must be removed before finalizing jump threads
7814 as finalizing jump threads calls the CFG cleanup code which
7815 does not properly handle ASSERT_EXPRs. */
7816 remove_range_assertions ();
7817
7818 /* If we exposed any new variables, go ahead and put them into
7819 SSA form now, before we handle jump threading. This simplifies
7820 interactions between rewriting of _DECL nodes into SSA form
7821 and rewriting SSA_NAME nodes into SSA form after block
7822 duplication and CFG manipulation. */
7823 update_ssa (TODO_update_ssa);
7824
7825 finalize_jump_threads ();
7826
7827 /* Remove dead edges from SWITCH_EXPR optimization. This leaves the
7828 CFG in a broken state and requires a cfg_cleanup run. */
7829 FOR_EACH_VEC_ELT (edge, to_remove_edges, i, e)
7830 remove_edge (e);
7831 /* Update SWITCH_EXPR case label vector. */
7832 FOR_EACH_VEC_ELT (switch_update, to_update_switch_stmts, i, su)
7833 {
7834 size_t j;
7835 size_t n = TREE_VEC_LENGTH (su->vec);
7836 tree label;
7837 gimple_switch_set_num_labels (su->stmt, n);
7838 for (j = 0; j < n; j++)
7839 gimple_switch_set_label (su->stmt, j, TREE_VEC_ELT (su->vec, j));
7840 /* As we may have replaced the default label with a regular one
7841 make sure to make it a real default label again. This ensures
7842 optimal expansion. */
7843 label = gimple_switch_default_label (su->stmt);
7844 CASE_LOW (label) = NULL_TREE;
7845 CASE_HIGH (label) = NULL_TREE;
7846 }
7847
7848 if (VEC_length (edge, to_remove_edges) > 0)
7849 free_dominance_info (CDI_DOMINATORS);
7850
7851 VEC_free (edge, heap, to_remove_edges);
7852 VEC_free (switch_update, heap, to_update_switch_stmts);
7853 threadedge_finalize_values ();
7854
7855 scev_finalize ();
7856 loop_optimizer_finalize ();
7857 return 0;
7858 }
7859
7860 static bool
7861 gate_vrp (void)
7862 {
7863 return flag_tree_vrp != 0;
7864 }
7865
7866 struct gimple_opt_pass pass_vrp =
7867 {
7868 {
7869 GIMPLE_PASS,
7870 "vrp", /* name */
7871 gate_vrp, /* gate */
7872 execute_vrp, /* execute */
7873 NULL, /* sub */
7874 NULL, /* next */
7875 0, /* static_pass_number */
7876 TV_TREE_VRP, /* tv_id */
7877 PROP_ssa, /* properties_required */
7878 0, /* properties_provided */
7879 0, /* properties_destroyed */
7880 0, /* todo_flags_start */
7881 TODO_cleanup_cfg
7882 | TODO_update_ssa
7883 | TODO_verify_ssa
7884 | TODO_verify_flow
7885 | TODO_ggc_collect /* todo_flags_finish */
7886 }
7887 };