diagnostic.c: Don't include tm.h, tree.h, tm_p.h, langhooks.h or langhooks-def.h.
[gcc.git] / gcc / tree-vrp.c
1 /* Support routines for Value Range Propagation (VRP).
2 Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
4 Contributed by Diego Novillo <dnovillo@redhat.com>.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
12
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "ggc.h"
27 #include "flags.h"
28 #include "tree.h"
29 #include "basic-block.h"
30 #include "tree-flow.h"
31 #include "tree-pass.h"
32 #include "tree-dump.h"
33 #include "timevar.h"
34 #include "diagnostic.h"
35 #include "tree-pretty-print.h"
36 #include "gimple-pretty-print.h"
37 #include "toplev.h"
38 #include "intl.h"
39 #include "cfgloop.h"
40 #include "tree-scalar-evolution.h"
41 #include "tree-ssa-propagate.h"
42 #include "tree-chrec.h"
43
44
45 /* Set of SSA names found live during the RPO traversal of the function
46 for still active basic-blocks. */
47 static sbitmap *live;
48
49 /* Return true if the SSA name NAME is live on the edge E. */
50
51 static bool
52 live_on_edge (edge e, tree name)
53 {
54 return (live[e->dest->index]
55 && TEST_BIT (live[e->dest->index], SSA_NAME_VERSION (name)));
56 }
57
58 /* Local functions. */
59 static int compare_values (tree val1, tree val2);
60 static int compare_values_warnv (tree val1, tree val2, bool *);
61 static void vrp_meet (value_range_t *, value_range_t *);
62 static tree vrp_evaluate_conditional_warnv_with_ops (enum tree_code,
63 tree, tree, bool, bool *,
64 bool *);
65
66 /* Location information for ASSERT_EXPRs. Each instance of this
67 structure describes an ASSERT_EXPR for an SSA name. Since a single
68 SSA name may have more than one assertion associated with it, these
69 locations are kept in a linked list attached to the corresponding
70 SSA name. */
71 struct assert_locus_d
72 {
73 /* Basic block where the assertion would be inserted. */
74 basic_block bb;
75
76 /* Some assertions need to be inserted on an edge (e.g., assertions
77 generated by COND_EXPRs). In those cases, BB will be NULL. */
78 edge e;
79
80 /* Pointer to the statement that generated this assertion. */
81 gimple_stmt_iterator si;
82
83 /* Predicate code for the ASSERT_EXPR. Must be COMPARISON_CLASS_P. */
84 enum tree_code comp_code;
85
86 /* Value being compared against. */
87 tree val;
88
89 /* Expression to compare. */
90 tree expr;
91
92 /* Next node in the linked list. */
93 struct assert_locus_d *next;
94 };
95
96 typedef struct assert_locus_d *assert_locus_t;
97
98 /* If bit I is present, it means that SSA name N_i has a list of
99 assertions that should be inserted in the IL. */
100 static bitmap need_assert_for;
101
102 /* Array of locations lists where to insert assertions. ASSERTS_FOR[I]
103 holds a list of ASSERT_LOCUS_T nodes that describe where
104 ASSERT_EXPRs for SSA name N_I should be inserted. */
105 static assert_locus_t *asserts_for;
106
107 /* Value range array. After propagation, VR_VALUE[I] holds the range
108 of values that SSA name N_I may take. */
109 static value_range_t **vr_value;
110
111 /* For a PHI node which sets SSA name N_I, VR_COUNTS[I] holds the
112 number of executable edges we saw the last time we visited the
113 node. */
114 static int *vr_phi_edge_counts;
115
116 typedef struct {
117 gimple stmt;
118 tree vec;
119 } switch_update;
120
121 static VEC (edge, heap) *to_remove_edges;
122 DEF_VEC_O(switch_update);
123 DEF_VEC_ALLOC_O(switch_update, heap);
124 static VEC (switch_update, heap) *to_update_switch_stmts;
125
126
127 /* Return the maximum value for TYPE. */
128
129 static inline tree
130 vrp_val_max (const_tree type)
131 {
132 if (!INTEGRAL_TYPE_P (type))
133 return NULL_TREE;
134
135 return TYPE_MAX_VALUE (type);
136 }
137
138 /* Return the minimum value for TYPE. */
139
140 static inline tree
141 vrp_val_min (const_tree type)
142 {
143 if (!INTEGRAL_TYPE_P (type))
144 return NULL_TREE;
145
146 return TYPE_MIN_VALUE (type);
147 }
148
149 /* Return whether VAL is equal to the maximum value of its type. This
150 will be true for a positive overflow infinity. We can't do a
151 simple equality comparison with TYPE_MAX_VALUE because C typedefs
152 and Ada subtypes can produce types whose TYPE_MAX_VALUE is not ==
153 to the integer constant with the same value in the type. */
154
155 static inline bool
156 vrp_val_is_max (const_tree val)
157 {
158 tree type_max = vrp_val_max (TREE_TYPE (val));
159 return (val == type_max
160 || (type_max != NULL_TREE
161 && operand_equal_p (val, type_max, 0)));
162 }
163
164 /* Return whether VAL is equal to the minimum value of its type. This
165 will be true for a negative overflow infinity. */
166
167 static inline bool
168 vrp_val_is_min (const_tree val)
169 {
170 tree type_min = vrp_val_min (TREE_TYPE (val));
171 return (val == type_min
172 || (type_min != NULL_TREE
173 && operand_equal_p (val, type_min, 0)));
174 }
175
176
177 /* Return whether TYPE should use an overflow infinity distinct from
178 TYPE_{MIN,MAX}_VALUE. We use an overflow infinity value to
179 represent a signed overflow during VRP computations. An infinity
180 is distinct from a half-range, which will go from some number to
181 TYPE_{MIN,MAX}_VALUE. */
182
183 static inline bool
184 needs_overflow_infinity (const_tree type)
185 {
186 return INTEGRAL_TYPE_P (type) && !TYPE_OVERFLOW_WRAPS (type);
187 }
188
189 /* Return whether TYPE can support our overflow infinity
190 representation: we use the TREE_OVERFLOW flag, which only exists
191 for constants. If TYPE doesn't support this, we don't optimize
192 cases which would require signed overflow--we drop them to
193 VARYING. */
194
195 static inline bool
196 supports_overflow_infinity (const_tree type)
197 {
198 tree min = vrp_val_min (type), max = vrp_val_max (type);
199 #ifdef ENABLE_CHECKING
200 gcc_assert (needs_overflow_infinity (type));
201 #endif
202 return (min != NULL_TREE
203 && CONSTANT_CLASS_P (min)
204 && max != NULL_TREE
205 && CONSTANT_CLASS_P (max));
206 }
207
208 /* VAL is the maximum or minimum value of a type. Return a
209 corresponding overflow infinity. */
210
211 static inline tree
212 make_overflow_infinity (tree val)
213 {
214 #ifdef ENABLE_CHECKING
215 gcc_assert (val != NULL_TREE && CONSTANT_CLASS_P (val));
216 #endif
217 val = copy_node (val);
218 TREE_OVERFLOW (val) = 1;
219 return val;
220 }
221
222 /* Return a negative overflow infinity for TYPE. */
223
224 static inline tree
225 negative_overflow_infinity (tree type)
226 {
227 #ifdef ENABLE_CHECKING
228 gcc_assert (supports_overflow_infinity (type));
229 #endif
230 return make_overflow_infinity (vrp_val_min (type));
231 }
232
233 /* Return a positive overflow infinity for TYPE. */
234
235 static inline tree
236 positive_overflow_infinity (tree type)
237 {
238 #ifdef ENABLE_CHECKING
239 gcc_assert (supports_overflow_infinity (type));
240 #endif
241 return make_overflow_infinity (vrp_val_max (type));
242 }
243
244 /* Return whether VAL is a negative overflow infinity. */
245
246 static inline bool
247 is_negative_overflow_infinity (const_tree val)
248 {
249 return (needs_overflow_infinity (TREE_TYPE (val))
250 && CONSTANT_CLASS_P (val)
251 && TREE_OVERFLOW (val)
252 && vrp_val_is_min (val));
253 }
254
255 /* Return whether VAL is a positive overflow infinity. */
256
257 static inline bool
258 is_positive_overflow_infinity (const_tree val)
259 {
260 return (needs_overflow_infinity (TREE_TYPE (val))
261 && CONSTANT_CLASS_P (val)
262 && TREE_OVERFLOW (val)
263 && vrp_val_is_max (val));
264 }
265
266 /* Return whether VAL is a positive or negative overflow infinity. */
267
268 static inline bool
269 is_overflow_infinity (const_tree val)
270 {
271 return (needs_overflow_infinity (TREE_TYPE (val))
272 && CONSTANT_CLASS_P (val)
273 && TREE_OVERFLOW (val)
274 && (vrp_val_is_min (val) || vrp_val_is_max (val)));
275 }
276
277 /* Return whether STMT has a constant rhs that is_overflow_infinity. */
278
279 static inline bool
280 stmt_overflow_infinity (gimple stmt)
281 {
282 if (is_gimple_assign (stmt)
283 && get_gimple_rhs_class (gimple_assign_rhs_code (stmt)) ==
284 GIMPLE_SINGLE_RHS)
285 return is_overflow_infinity (gimple_assign_rhs1 (stmt));
286 return false;
287 }
288
289 /* If VAL is now an overflow infinity, return VAL. Otherwise, return
290 the same value with TREE_OVERFLOW clear. This can be used to avoid
291 confusing a regular value with an overflow value. */
292
293 static inline tree
294 avoid_overflow_infinity (tree val)
295 {
296 if (!is_overflow_infinity (val))
297 return val;
298
299 if (vrp_val_is_max (val))
300 return vrp_val_max (TREE_TYPE (val));
301 else
302 {
303 #ifdef ENABLE_CHECKING
304 gcc_assert (vrp_val_is_min (val));
305 #endif
306 return vrp_val_min (TREE_TYPE (val));
307 }
308 }
309
310
311 /* Return true if ARG is marked with the nonnull attribute in the
312 current function signature. */
313
314 static bool
315 nonnull_arg_p (const_tree arg)
316 {
317 tree t, attrs, fntype;
318 unsigned HOST_WIDE_INT arg_num;
319
320 gcc_assert (TREE_CODE (arg) == PARM_DECL && POINTER_TYPE_P (TREE_TYPE (arg)));
321
322 /* The static chain decl is always non null. */
323 if (arg == cfun->static_chain_decl)
324 return true;
325
326 fntype = TREE_TYPE (current_function_decl);
327 attrs = lookup_attribute ("nonnull", TYPE_ATTRIBUTES (fntype));
328
329 /* If "nonnull" wasn't specified, we know nothing about the argument. */
330 if (attrs == NULL_TREE)
331 return false;
332
333 /* If "nonnull" applies to all the arguments, then ARG is non-null. */
334 if (TREE_VALUE (attrs) == NULL_TREE)
335 return true;
336
337 /* Get the position number for ARG in the function signature. */
338 for (arg_num = 1, t = DECL_ARGUMENTS (current_function_decl);
339 t;
340 t = TREE_CHAIN (t), arg_num++)
341 {
342 if (t == arg)
343 break;
344 }
345
346 gcc_assert (t == arg);
347
348 /* Now see if ARG_NUM is mentioned in the nonnull list. */
349 for (t = TREE_VALUE (attrs); t; t = TREE_CHAIN (t))
350 {
351 if (compare_tree_int (TREE_VALUE (t), arg_num) == 0)
352 return true;
353 }
354
355 return false;
356 }
357
358
359 /* Set value range VR to VR_VARYING. */
360
361 static inline void
362 set_value_range_to_varying (value_range_t *vr)
363 {
364 vr->type = VR_VARYING;
365 vr->min = vr->max = NULL_TREE;
366 if (vr->equiv)
367 bitmap_clear (vr->equiv);
368 }
369
370
371 /* Set value range VR to {T, MIN, MAX, EQUIV}. */
372
373 static void
374 set_value_range (value_range_t *vr, enum value_range_type t, tree min,
375 tree max, bitmap equiv)
376 {
377 #if defined ENABLE_CHECKING
378 /* Check the validity of the range. */
379 if (t == VR_RANGE || t == VR_ANTI_RANGE)
380 {
381 int cmp;
382
383 gcc_assert (min && max);
384
385 if (INTEGRAL_TYPE_P (TREE_TYPE (min)) && t == VR_ANTI_RANGE)
386 gcc_assert (!vrp_val_is_min (min) || !vrp_val_is_max (max));
387
388 cmp = compare_values (min, max);
389 gcc_assert (cmp == 0 || cmp == -1 || cmp == -2);
390
391 if (needs_overflow_infinity (TREE_TYPE (min)))
392 gcc_assert (!is_overflow_infinity (min)
393 || !is_overflow_infinity (max));
394 }
395
396 if (t == VR_UNDEFINED || t == VR_VARYING)
397 gcc_assert (min == NULL_TREE && max == NULL_TREE);
398
399 if (t == VR_UNDEFINED || t == VR_VARYING)
400 gcc_assert (equiv == NULL || bitmap_empty_p (equiv));
401 #endif
402
403 vr->type = t;
404 vr->min = min;
405 vr->max = max;
406
407 /* Since updating the equivalence set involves deep copying the
408 bitmaps, only do it if absolutely necessary. */
409 if (vr->equiv == NULL
410 && equiv != NULL)
411 vr->equiv = BITMAP_ALLOC (NULL);
412
413 if (equiv != vr->equiv)
414 {
415 if (equiv && !bitmap_empty_p (equiv))
416 bitmap_copy (vr->equiv, equiv);
417 else
418 bitmap_clear (vr->equiv);
419 }
420 }
421
422
423 /* Set value range VR to the canonical form of {T, MIN, MAX, EQUIV}.
424 This means adjusting T, MIN and MAX representing the case of a
425 wrapping range with MAX < MIN covering [MIN, type_max] U [type_min, MAX]
426 as anti-rage ~[MAX+1, MIN-1]. Likewise for wrapping anti-ranges.
427 In corner cases where MAX+1 or MIN-1 wraps this will fall back
428 to varying.
429 This routine exists to ease canonicalization in the case where we
430 extract ranges from var + CST op limit. */
431
432 static void
433 set_and_canonicalize_value_range (value_range_t *vr, enum value_range_type t,
434 tree min, tree max, bitmap equiv)
435 {
436 /* Nothing to canonicalize for symbolic or unknown or varying ranges. */
437 if ((t != VR_RANGE
438 && t != VR_ANTI_RANGE)
439 || TREE_CODE (min) != INTEGER_CST
440 || TREE_CODE (max) != INTEGER_CST)
441 {
442 set_value_range (vr, t, min, max, equiv);
443 return;
444 }
445
446 /* Wrong order for min and max, to swap them and the VR type we need
447 to adjust them. */
448 if (tree_int_cst_lt (max, min))
449 {
450 tree one = build_int_cst (TREE_TYPE (min), 1);
451 tree tmp = int_const_binop (PLUS_EXPR, max, one, 0);
452 max = int_const_binop (MINUS_EXPR, min, one, 0);
453 min = tmp;
454
455 /* There's one corner case, if we had [C+1, C] before we now have
456 that again. But this represents an empty value range, so drop
457 to varying in this case. */
458 if (tree_int_cst_lt (max, min))
459 {
460 set_value_range_to_varying (vr);
461 return;
462 }
463
464 t = t == VR_RANGE ? VR_ANTI_RANGE : VR_RANGE;
465 }
466
467 /* Anti-ranges that can be represented as ranges should be so. */
468 if (t == VR_ANTI_RANGE)
469 {
470 bool is_min = vrp_val_is_min (min);
471 bool is_max = vrp_val_is_max (max);
472
473 if (is_min && is_max)
474 {
475 /* We cannot deal with empty ranges, drop to varying. */
476 set_value_range_to_varying (vr);
477 return;
478 }
479 else if (is_min
480 /* As a special exception preserve non-null ranges. */
481 && !(TYPE_UNSIGNED (TREE_TYPE (min))
482 && integer_zerop (max)))
483 {
484 tree one = build_int_cst (TREE_TYPE (max), 1);
485 min = int_const_binop (PLUS_EXPR, max, one, 0);
486 max = vrp_val_max (TREE_TYPE (max));
487 t = VR_RANGE;
488 }
489 else if (is_max)
490 {
491 tree one = build_int_cst (TREE_TYPE (min), 1);
492 max = int_const_binop (MINUS_EXPR, min, one, 0);
493 min = vrp_val_min (TREE_TYPE (min));
494 t = VR_RANGE;
495 }
496 }
497
498 set_value_range (vr, t, min, max, equiv);
499 }
500
501 /* Copy value range FROM into value range TO. */
502
503 static inline void
504 copy_value_range (value_range_t *to, value_range_t *from)
505 {
506 set_value_range (to, from->type, from->min, from->max, from->equiv);
507 }
508
509 /* Set value range VR to a single value. This function is only called
510 with values we get from statements, and exists to clear the
511 TREE_OVERFLOW flag so that we don't think we have an overflow
512 infinity when we shouldn't. */
513
514 static inline void
515 set_value_range_to_value (value_range_t *vr, tree val, bitmap equiv)
516 {
517 gcc_assert (is_gimple_min_invariant (val));
518 val = avoid_overflow_infinity (val);
519 set_value_range (vr, VR_RANGE, val, val, equiv);
520 }
521
522 /* Set value range VR to a non-negative range of type TYPE.
523 OVERFLOW_INFINITY indicates whether to use an overflow infinity
524 rather than TYPE_MAX_VALUE; this should be true if we determine
525 that the range is nonnegative based on the assumption that signed
526 overflow does not occur. */
527
528 static inline void
529 set_value_range_to_nonnegative (value_range_t *vr, tree type,
530 bool overflow_infinity)
531 {
532 tree zero;
533
534 if (overflow_infinity && !supports_overflow_infinity (type))
535 {
536 set_value_range_to_varying (vr);
537 return;
538 }
539
540 zero = build_int_cst (type, 0);
541 set_value_range (vr, VR_RANGE, zero,
542 (overflow_infinity
543 ? positive_overflow_infinity (type)
544 : TYPE_MAX_VALUE (type)),
545 vr->equiv);
546 }
547
548 /* Set value range VR to a non-NULL range of type TYPE. */
549
550 static inline void
551 set_value_range_to_nonnull (value_range_t *vr, tree type)
552 {
553 tree zero = build_int_cst (type, 0);
554 set_value_range (vr, VR_ANTI_RANGE, zero, zero, vr->equiv);
555 }
556
557
558 /* Set value range VR to a NULL range of type TYPE. */
559
560 static inline void
561 set_value_range_to_null (value_range_t *vr, tree type)
562 {
563 set_value_range_to_value (vr, build_int_cst (type, 0), vr->equiv);
564 }
565
566
567 /* Set value range VR to a range of a truthvalue of type TYPE. */
568
569 static inline void
570 set_value_range_to_truthvalue (value_range_t *vr, tree type)
571 {
572 if (TYPE_PRECISION (type) == 1)
573 set_value_range_to_varying (vr);
574 else
575 set_value_range (vr, VR_RANGE,
576 build_int_cst (type, 0), build_int_cst (type, 1),
577 vr->equiv);
578 }
579
580
581 /* Set value range VR to VR_UNDEFINED. */
582
583 static inline void
584 set_value_range_to_undefined (value_range_t *vr)
585 {
586 vr->type = VR_UNDEFINED;
587 vr->min = vr->max = NULL_TREE;
588 if (vr->equiv)
589 bitmap_clear (vr->equiv);
590 }
591
592
593 /* If abs (min) < abs (max), set VR to [-max, max], if
594 abs (min) >= abs (max), set VR to [-min, min]. */
595
596 static void
597 abs_extent_range (value_range_t *vr, tree min, tree max)
598 {
599 int cmp;
600
601 gcc_assert (TREE_CODE (min) == INTEGER_CST);
602 gcc_assert (TREE_CODE (max) == INTEGER_CST);
603 gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (min)));
604 gcc_assert (!TYPE_UNSIGNED (TREE_TYPE (min)));
605 min = fold_unary (ABS_EXPR, TREE_TYPE (min), min);
606 max = fold_unary (ABS_EXPR, TREE_TYPE (max), max);
607 if (TREE_OVERFLOW (min) || TREE_OVERFLOW (max))
608 {
609 set_value_range_to_varying (vr);
610 return;
611 }
612 cmp = compare_values (min, max);
613 if (cmp == -1)
614 min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), max);
615 else if (cmp == 0 || cmp == 1)
616 {
617 max = min;
618 min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), min);
619 }
620 else
621 {
622 set_value_range_to_varying (vr);
623 return;
624 }
625 set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL);
626 }
627
628
629 /* Return value range information for VAR.
630
631 If we have no values ranges recorded (ie, VRP is not running), then
632 return NULL. Otherwise create an empty range if none existed for VAR. */
633
634 static value_range_t *
635 get_value_range (const_tree var)
636 {
637 value_range_t *vr;
638 tree sym;
639 unsigned ver = SSA_NAME_VERSION (var);
640
641 /* If we have no recorded ranges, then return NULL. */
642 if (! vr_value)
643 return NULL;
644
645 vr = vr_value[ver];
646 if (vr)
647 return vr;
648
649 /* Create a default value range. */
650 vr_value[ver] = vr = XCNEW (value_range_t);
651
652 /* Defer allocating the equivalence set. */
653 vr->equiv = NULL;
654
655 /* If VAR is a default definition, the variable can take any value
656 in VAR's type. */
657 sym = SSA_NAME_VAR (var);
658 if (SSA_NAME_IS_DEFAULT_DEF (var))
659 {
660 /* Try to use the "nonnull" attribute to create ~[0, 0]
661 anti-ranges for pointers. Note that this is only valid with
662 default definitions of PARM_DECLs. */
663 if (TREE_CODE (sym) == PARM_DECL
664 && POINTER_TYPE_P (TREE_TYPE (sym))
665 && nonnull_arg_p (sym))
666 set_value_range_to_nonnull (vr, TREE_TYPE (sym));
667 else
668 set_value_range_to_varying (vr);
669 }
670
671 return vr;
672 }
673
674 /* Return true, if VAL1 and VAL2 are equal values for VRP purposes. */
675
676 static inline bool
677 vrp_operand_equal_p (const_tree val1, const_tree val2)
678 {
679 if (val1 == val2)
680 return true;
681 if (!val1 || !val2 || !operand_equal_p (val1, val2, 0))
682 return false;
683 if (is_overflow_infinity (val1))
684 return is_overflow_infinity (val2);
685 return true;
686 }
687
688 /* Return true, if the bitmaps B1 and B2 are equal. */
689
690 static inline bool
691 vrp_bitmap_equal_p (const_bitmap b1, const_bitmap b2)
692 {
693 return (b1 == b2
694 || (b1 && b2
695 && bitmap_equal_p (b1, b2)));
696 }
697
698 /* Update the value range and equivalence set for variable VAR to
699 NEW_VR. Return true if NEW_VR is different from VAR's previous
700 value.
701
702 NOTE: This function assumes that NEW_VR is a temporary value range
703 object created for the sole purpose of updating VAR's range. The
704 storage used by the equivalence set from NEW_VR will be freed by
705 this function. Do not call update_value_range when NEW_VR
706 is the range object associated with another SSA name. */
707
708 static inline bool
709 update_value_range (const_tree var, value_range_t *new_vr)
710 {
711 value_range_t *old_vr;
712 bool is_new;
713
714 /* Update the value range, if necessary. */
715 old_vr = get_value_range (var);
716 is_new = old_vr->type != new_vr->type
717 || !vrp_operand_equal_p (old_vr->min, new_vr->min)
718 || !vrp_operand_equal_p (old_vr->max, new_vr->max)
719 || !vrp_bitmap_equal_p (old_vr->equiv, new_vr->equiv);
720
721 if (is_new)
722 set_value_range (old_vr, new_vr->type, new_vr->min, new_vr->max,
723 new_vr->equiv);
724
725 BITMAP_FREE (new_vr->equiv);
726
727 return is_new;
728 }
729
730
731 /* Add VAR and VAR's equivalence set to EQUIV. This is the central
732 point where equivalence processing can be turned on/off. */
733
734 static void
735 add_equivalence (bitmap *equiv, const_tree var)
736 {
737 unsigned ver = SSA_NAME_VERSION (var);
738 value_range_t *vr = vr_value[ver];
739
740 if (*equiv == NULL)
741 *equiv = BITMAP_ALLOC (NULL);
742 bitmap_set_bit (*equiv, ver);
743 if (vr && vr->equiv)
744 bitmap_ior_into (*equiv, vr->equiv);
745 }
746
747
748 /* Return true if VR is ~[0, 0]. */
749
750 static inline bool
751 range_is_nonnull (value_range_t *vr)
752 {
753 return vr->type == VR_ANTI_RANGE
754 && integer_zerop (vr->min)
755 && integer_zerop (vr->max);
756 }
757
758
759 /* Return true if VR is [0, 0]. */
760
761 static inline bool
762 range_is_null (value_range_t *vr)
763 {
764 return vr->type == VR_RANGE
765 && integer_zerop (vr->min)
766 && integer_zerop (vr->max);
767 }
768
769 /* Return true if max and min of VR are INTEGER_CST. It's not necessary
770 a singleton. */
771
772 static inline bool
773 range_int_cst_p (value_range_t *vr)
774 {
775 return (vr->type == VR_RANGE
776 && TREE_CODE (vr->max) == INTEGER_CST
777 && TREE_CODE (vr->min) == INTEGER_CST
778 && !TREE_OVERFLOW (vr->max)
779 && !TREE_OVERFLOW (vr->min));
780 }
781
782 /* Return true if VR is a INTEGER_CST singleton. */
783
784 static inline bool
785 range_int_cst_singleton_p (value_range_t *vr)
786 {
787 return (range_int_cst_p (vr)
788 && tree_int_cst_equal (vr->min, vr->max));
789 }
790
791 /* Return true if value range VR involves at least one symbol. */
792
793 static inline bool
794 symbolic_range_p (value_range_t *vr)
795 {
796 return (!is_gimple_min_invariant (vr->min)
797 || !is_gimple_min_invariant (vr->max));
798 }
799
800 /* Return true if value range VR uses an overflow infinity. */
801
802 static inline bool
803 overflow_infinity_range_p (value_range_t *vr)
804 {
805 return (vr->type == VR_RANGE
806 && (is_overflow_infinity (vr->min)
807 || is_overflow_infinity (vr->max)));
808 }
809
810 /* Return false if we can not make a valid comparison based on VR;
811 this will be the case if it uses an overflow infinity and overflow
812 is not undefined (i.e., -fno-strict-overflow is in effect).
813 Otherwise return true, and set *STRICT_OVERFLOW_P to true if VR
814 uses an overflow infinity. */
815
816 static bool
817 usable_range_p (value_range_t *vr, bool *strict_overflow_p)
818 {
819 gcc_assert (vr->type == VR_RANGE);
820 if (is_overflow_infinity (vr->min))
821 {
822 *strict_overflow_p = true;
823 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr->min)))
824 return false;
825 }
826 if (is_overflow_infinity (vr->max))
827 {
828 *strict_overflow_p = true;
829 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr->max)))
830 return false;
831 }
832 return true;
833 }
834
835
836 /* Like tree_expr_nonnegative_warnv_p, but this function uses value
837 ranges obtained so far. */
838
839 static bool
840 vrp_expr_computes_nonnegative (tree expr, bool *strict_overflow_p)
841 {
842 return (tree_expr_nonnegative_warnv_p (expr, strict_overflow_p)
843 || (TREE_CODE (expr) == SSA_NAME
844 && ssa_name_nonnegative_p (expr)));
845 }
846
847 /* Return true if the result of assignment STMT is know to be non-negative.
848 If the return value is based on the assumption that signed overflow is
849 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
850 *STRICT_OVERFLOW_P.*/
851
852 static bool
853 gimple_assign_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p)
854 {
855 enum tree_code code = gimple_assign_rhs_code (stmt);
856 switch (get_gimple_rhs_class (code))
857 {
858 case GIMPLE_UNARY_RHS:
859 return tree_unary_nonnegative_warnv_p (gimple_assign_rhs_code (stmt),
860 gimple_expr_type (stmt),
861 gimple_assign_rhs1 (stmt),
862 strict_overflow_p);
863 case GIMPLE_BINARY_RHS:
864 return tree_binary_nonnegative_warnv_p (gimple_assign_rhs_code (stmt),
865 gimple_expr_type (stmt),
866 gimple_assign_rhs1 (stmt),
867 gimple_assign_rhs2 (stmt),
868 strict_overflow_p);
869 case GIMPLE_SINGLE_RHS:
870 return tree_single_nonnegative_warnv_p (gimple_assign_rhs1 (stmt),
871 strict_overflow_p);
872 case GIMPLE_INVALID_RHS:
873 gcc_unreachable ();
874 default:
875 gcc_unreachable ();
876 }
877 }
878
879 /* Return true if return value of call STMT is know to be non-negative.
880 If the return value is based on the assumption that signed overflow is
881 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
882 *STRICT_OVERFLOW_P.*/
883
884 static bool
885 gimple_call_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p)
886 {
887 tree arg0 = gimple_call_num_args (stmt) > 0 ?
888 gimple_call_arg (stmt, 0) : NULL_TREE;
889 tree arg1 = gimple_call_num_args (stmt) > 1 ?
890 gimple_call_arg (stmt, 1) : NULL_TREE;
891
892 return tree_call_nonnegative_warnv_p (gimple_expr_type (stmt),
893 gimple_call_fndecl (stmt),
894 arg0,
895 arg1,
896 strict_overflow_p);
897 }
898
899 /* Return true if STMT is know to to compute a non-negative value.
900 If the return value is based on the assumption that signed overflow is
901 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
902 *STRICT_OVERFLOW_P.*/
903
904 static bool
905 gimple_stmt_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p)
906 {
907 switch (gimple_code (stmt))
908 {
909 case GIMPLE_ASSIGN:
910 return gimple_assign_nonnegative_warnv_p (stmt, strict_overflow_p);
911 case GIMPLE_CALL:
912 return gimple_call_nonnegative_warnv_p (stmt, strict_overflow_p);
913 default:
914 gcc_unreachable ();
915 }
916 }
917
918 /* Return true if the result of assignment STMT is know to be non-zero.
919 If the return value is based on the assumption that signed overflow is
920 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
921 *STRICT_OVERFLOW_P.*/
922
923 static bool
924 gimple_assign_nonzero_warnv_p (gimple stmt, bool *strict_overflow_p)
925 {
926 enum tree_code code = gimple_assign_rhs_code (stmt);
927 switch (get_gimple_rhs_class (code))
928 {
929 case GIMPLE_UNARY_RHS:
930 return tree_unary_nonzero_warnv_p (gimple_assign_rhs_code (stmt),
931 gimple_expr_type (stmt),
932 gimple_assign_rhs1 (stmt),
933 strict_overflow_p);
934 case GIMPLE_BINARY_RHS:
935 return tree_binary_nonzero_warnv_p (gimple_assign_rhs_code (stmt),
936 gimple_expr_type (stmt),
937 gimple_assign_rhs1 (stmt),
938 gimple_assign_rhs2 (stmt),
939 strict_overflow_p);
940 case GIMPLE_SINGLE_RHS:
941 return tree_single_nonzero_warnv_p (gimple_assign_rhs1 (stmt),
942 strict_overflow_p);
943 case GIMPLE_INVALID_RHS:
944 gcc_unreachable ();
945 default:
946 gcc_unreachable ();
947 }
948 }
949
950 /* Return true if STMT is know to to compute a non-zero value.
951 If the return value is based on the assumption that signed overflow is
952 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
953 *STRICT_OVERFLOW_P.*/
954
955 static bool
956 gimple_stmt_nonzero_warnv_p (gimple stmt, bool *strict_overflow_p)
957 {
958 switch (gimple_code (stmt))
959 {
960 case GIMPLE_ASSIGN:
961 return gimple_assign_nonzero_warnv_p (stmt, strict_overflow_p);
962 case GIMPLE_CALL:
963 return gimple_alloca_call_p (stmt);
964 default:
965 gcc_unreachable ();
966 }
967 }
968
969 /* Like tree_expr_nonzero_warnv_p, but this function uses value ranges
970 obtained so far. */
971
972 static bool
973 vrp_stmt_computes_nonzero (gimple stmt, bool *strict_overflow_p)
974 {
975 if (gimple_stmt_nonzero_warnv_p (stmt, strict_overflow_p))
976 return true;
977
978 /* If we have an expression of the form &X->a, then the expression
979 is nonnull if X is nonnull. */
980 if (is_gimple_assign (stmt)
981 && gimple_assign_rhs_code (stmt) == ADDR_EXPR)
982 {
983 tree expr = gimple_assign_rhs1 (stmt);
984 tree base = get_base_address (TREE_OPERAND (expr, 0));
985
986 if (base != NULL_TREE
987 && TREE_CODE (base) == INDIRECT_REF
988 && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME)
989 {
990 value_range_t *vr = get_value_range (TREE_OPERAND (base, 0));
991 if (range_is_nonnull (vr))
992 return true;
993 }
994 }
995
996 return false;
997 }
998
999 /* Returns true if EXPR is a valid value (as expected by compare_values) --
1000 a gimple invariant, or SSA_NAME +- CST. */
1001
1002 static bool
1003 valid_value_p (tree expr)
1004 {
1005 if (TREE_CODE (expr) == SSA_NAME)
1006 return true;
1007
1008 if (TREE_CODE (expr) == PLUS_EXPR
1009 || TREE_CODE (expr) == MINUS_EXPR)
1010 return (TREE_CODE (TREE_OPERAND (expr, 0)) == SSA_NAME
1011 && TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST);
1012
1013 return is_gimple_min_invariant (expr);
1014 }
1015
1016 /* Return
1017 1 if VAL < VAL2
1018 0 if !(VAL < VAL2)
1019 -2 if those are incomparable. */
1020 static inline int
1021 operand_less_p (tree val, tree val2)
1022 {
1023 /* LT is folded faster than GE and others. Inline the common case. */
1024 if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
1025 {
1026 if (TYPE_UNSIGNED (TREE_TYPE (val)))
1027 return INT_CST_LT_UNSIGNED (val, val2);
1028 else
1029 {
1030 if (INT_CST_LT (val, val2))
1031 return 1;
1032 }
1033 }
1034 else
1035 {
1036 tree tcmp;
1037
1038 fold_defer_overflow_warnings ();
1039
1040 tcmp = fold_binary_to_constant (LT_EXPR, boolean_type_node, val, val2);
1041
1042 fold_undefer_and_ignore_overflow_warnings ();
1043
1044 if (!tcmp
1045 || TREE_CODE (tcmp) != INTEGER_CST)
1046 return -2;
1047
1048 if (!integer_zerop (tcmp))
1049 return 1;
1050 }
1051
1052 /* val >= val2, not considering overflow infinity. */
1053 if (is_negative_overflow_infinity (val))
1054 return is_negative_overflow_infinity (val2) ? 0 : 1;
1055 else if (is_positive_overflow_infinity (val2))
1056 return is_positive_overflow_infinity (val) ? 0 : 1;
1057
1058 return 0;
1059 }
1060
1061 /* Compare two values VAL1 and VAL2. Return
1062
1063 -2 if VAL1 and VAL2 cannot be compared at compile-time,
1064 -1 if VAL1 < VAL2,
1065 0 if VAL1 == VAL2,
1066 +1 if VAL1 > VAL2, and
1067 +2 if VAL1 != VAL2
1068
1069 This is similar to tree_int_cst_compare but supports pointer values
1070 and values that cannot be compared at compile time.
1071
1072 If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to
1073 true if the return value is only valid if we assume that signed
1074 overflow is undefined. */
1075
1076 static int
1077 compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p)
1078 {
1079 if (val1 == val2)
1080 return 0;
1081
1082 /* Below we rely on the fact that VAL1 and VAL2 are both pointers or
1083 both integers. */
1084 gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1))
1085 == POINTER_TYPE_P (TREE_TYPE (val2)));
1086 /* Convert the two values into the same type. This is needed because
1087 sizetype causes sign extension even for unsigned types. */
1088 val2 = fold_convert (TREE_TYPE (val1), val2);
1089 STRIP_USELESS_TYPE_CONVERSION (val2);
1090
1091 if ((TREE_CODE (val1) == SSA_NAME
1092 || TREE_CODE (val1) == PLUS_EXPR
1093 || TREE_CODE (val1) == MINUS_EXPR)
1094 && (TREE_CODE (val2) == SSA_NAME
1095 || TREE_CODE (val2) == PLUS_EXPR
1096 || TREE_CODE (val2) == MINUS_EXPR))
1097 {
1098 tree n1, c1, n2, c2;
1099 enum tree_code code1, code2;
1100
1101 /* If VAL1 and VAL2 are of the form 'NAME [+-] CST' or 'NAME',
1102 return -1 or +1 accordingly. If VAL1 and VAL2 don't use the
1103 same name, return -2. */
1104 if (TREE_CODE (val1) == SSA_NAME)
1105 {
1106 code1 = SSA_NAME;
1107 n1 = val1;
1108 c1 = NULL_TREE;
1109 }
1110 else
1111 {
1112 code1 = TREE_CODE (val1);
1113 n1 = TREE_OPERAND (val1, 0);
1114 c1 = TREE_OPERAND (val1, 1);
1115 if (tree_int_cst_sgn (c1) == -1)
1116 {
1117 if (is_negative_overflow_infinity (c1))
1118 return -2;
1119 c1 = fold_unary_to_constant (NEGATE_EXPR, TREE_TYPE (c1), c1);
1120 if (!c1)
1121 return -2;
1122 code1 = code1 == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR;
1123 }
1124 }
1125
1126 if (TREE_CODE (val2) == SSA_NAME)
1127 {
1128 code2 = SSA_NAME;
1129 n2 = val2;
1130 c2 = NULL_TREE;
1131 }
1132 else
1133 {
1134 code2 = TREE_CODE (val2);
1135 n2 = TREE_OPERAND (val2, 0);
1136 c2 = TREE_OPERAND (val2, 1);
1137 if (tree_int_cst_sgn (c2) == -1)
1138 {
1139 if (is_negative_overflow_infinity (c2))
1140 return -2;
1141 c2 = fold_unary_to_constant (NEGATE_EXPR, TREE_TYPE (c2), c2);
1142 if (!c2)
1143 return -2;
1144 code2 = code2 == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR;
1145 }
1146 }
1147
1148 /* Both values must use the same name. */
1149 if (n1 != n2)
1150 return -2;
1151
1152 if (code1 == SSA_NAME
1153 && code2 == SSA_NAME)
1154 /* NAME == NAME */
1155 return 0;
1156
1157 /* If overflow is defined we cannot simplify more. */
1158 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1)))
1159 return -2;
1160
1161 if (strict_overflow_p != NULL
1162 && (code1 == SSA_NAME || !TREE_NO_WARNING (val1))
1163 && (code2 == SSA_NAME || !TREE_NO_WARNING (val2)))
1164 *strict_overflow_p = true;
1165
1166 if (code1 == SSA_NAME)
1167 {
1168 if (code2 == PLUS_EXPR)
1169 /* NAME < NAME + CST */
1170 return -1;
1171 else if (code2 == MINUS_EXPR)
1172 /* NAME > NAME - CST */
1173 return 1;
1174 }
1175 else if (code1 == PLUS_EXPR)
1176 {
1177 if (code2 == SSA_NAME)
1178 /* NAME + CST > NAME */
1179 return 1;
1180 else if (code2 == PLUS_EXPR)
1181 /* NAME + CST1 > NAME + CST2, if CST1 > CST2 */
1182 return compare_values_warnv (c1, c2, strict_overflow_p);
1183 else if (code2 == MINUS_EXPR)
1184 /* NAME + CST1 > NAME - CST2 */
1185 return 1;
1186 }
1187 else if (code1 == MINUS_EXPR)
1188 {
1189 if (code2 == SSA_NAME)
1190 /* NAME - CST < NAME */
1191 return -1;
1192 else if (code2 == PLUS_EXPR)
1193 /* NAME - CST1 < NAME + CST2 */
1194 return -1;
1195 else if (code2 == MINUS_EXPR)
1196 /* NAME - CST1 > NAME - CST2, if CST1 < CST2. Notice that
1197 C1 and C2 are swapped in the call to compare_values. */
1198 return compare_values_warnv (c2, c1, strict_overflow_p);
1199 }
1200
1201 gcc_unreachable ();
1202 }
1203
1204 /* We cannot compare non-constants. */
1205 if (!is_gimple_min_invariant (val1) || !is_gimple_min_invariant (val2))
1206 return -2;
1207
1208 if (!POINTER_TYPE_P (TREE_TYPE (val1)))
1209 {
1210 /* We cannot compare overflowed values, except for overflow
1211 infinities. */
1212 if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2))
1213 {
1214 if (strict_overflow_p != NULL)
1215 *strict_overflow_p = true;
1216 if (is_negative_overflow_infinity (val1))
1217 return is_negative_overflow_infinity (val2) ? 0 : -1;
1218 else if (is_negative_overflow_infinity (val2))
1219 return 1;
1220 else if (is_positive_overflow_infinity (val1))
1221 return is_positive_overflow_infinity (val2) ? 0 : 1;
1222 else if (is_positive_overflow_infinity (val2))
1223 return -1;
1224 return -2;
1225 }
1226
1227 return tree_int_cst_compare (val1, val2);
1228 }
1229 else
1230 {
1231 tree t;
1232
1233 /* First see if VAL1 and VAL2 are not the same. */
1234 if (val1 == val2 || operand_equal_p (val1, val2, 0))
1235 return 0;
1236
1237 /* If VAL1 is a lower address than VAL2, return -1. */
1238 if (operand_less_p (val1, val2) == 1)
1239 return -1;
1240
1241 /* If VAL1 is a higher address than VAL2, return +1. */
1242 if (operand_less_p (val2, val1) == 1)
1243 return 1;
1244
1245 /* If VAL1 is different than VAL2, return +2.
1246 For integer constants we either have already returned -1 or 1
1247 or they are equivalent. We still might succeed in proving
1248 something about non-trivial operands. */
1249 if (TREE_CODE (val1) != INTEGER_CST
1250 || TREE_CODE (val2) != INTEGER_CST)
1251 {
1252 t = fold_binary_to_constant (NE_EXPR, boolean_type_node, val1, val2);
1253 if (t && integer_onep (t))
1254 return 2;
1255 }
1256
1257 return -2;
1258 }
1259 }
1260
1261 /* Compare values like compare_values_warnv, but treat comparisons of
1262 nonconstants which rely on undefined overflow as incomparable. */
1263
1264 static int
1265 compare_values (tree val1, tree val2)
1266 {
1267 bool sop;
1268 int ret;
1269
1270 sop = false;
1271 ret = compare_values_warnv (val1, val2, &sop);
1272 if (sop
1273 && (!is_gimple_min_invariant (val1) || !is_gimple_min_invariant (val2)))
1274 ret = -2;
1275 return ret;
1276 }
1277
1278
1279 /* Return 1 if VAL is inside value range VR (VR->MIN <= VAL <= VR->MAX),
1280 0 if VAL is not inside VR,
1281 -2 if we cannot tell either way.
1282
1283 FIXME, the current semantics of this functions are a bit quirky
1284 when taken in the context of VRP. In here we do not care
1285 about VR's type. If VR is the anti-range ~[3, 5] the call
1286 value_inside_range (4, VR) will return 1.
1287
1288 This is counter-intuitive in a strict sense, but the callers
1289 currently expect this. They are calling the function
1290 merely to determine whether VR->MIN <= VAL <= VR->MAX. The
1291 callers are applying the VR_RANGE/VR_ANTI_RANGE semantics
1292 themselves.
1293
1294 This also applies to value_ranges_intersect_p and
1295 range_includes_zero_p. The semantics of VR_RANGE and
1296 VR_ANTI_RANGE should be encoded here, but that also means
1297 adapting the users of these functions to the new semantics.
1298
1299 Benchmark compile/20001226-1.c compilation time after changing this
1300 function. */
1301
1302 static inline int
1303 value_inside_range (tree val, value_range_t * vr)
1304 {
1305 int cmp1, cmp2;
1306
1307 cmp1 = operand_less_p (val, vr->min);
1308 if (cmp1 == -2)
1309 return -2;
1310 if (cmp1 == 1)
1311 return 0;
1312
1313 cmp2 = operand_less_p (vr->max, val);
1314 if (cmp2 == -2)
1315 return -2;
1316
1317 return !cmp2;
1318 }
1319
1320
1321 /* Return true if value ranges VR0 and VR1 have a non-empty
1322 intersection.
1323
1324 Benchmark compile/20001226-1.c compilation time after changing this
1325 function.
1326 */
1327
1328 static inline bool
1329 value_ranges_intersect_p (value_range_t *vr0, value_range_t *vr1)
1330 {
1331 /* The value ranges do not intersect if the maximum of the first range is
1332 less than the minimum of the second range or vice versa.
1333 When those relations are unknown, we can't do any better. */
1334 if (operand_less_p (vr0->max, vr1->min) != 0)
1335 return false;
1336 if (operand_less_p (vr1->max, vr0->min) != 0)
1337 return false;
1338 return true;
1339 }
1340
1341
1342 /* Return true if VR includes the value zero, false otherwise. FIXME,
1343 currently this will return false for an anti-range like ~[-4, 3].
1344 This will be wrong when the semantics of value_inside_range are
1345 modified (currently the users of this function expect these
1346 semantics). */
1347
1348 static inline bool
1349 range_includes_zero_p (value_range_t *vr)
1350 {
1351 tree zero;
1352
1353 gcc_assert (vr->type != VR_UNDEFINED
1354 && vr->type != VR_VARYING
1355 && !symbolic_range_p (vr));
1356
1357 zero = build_int_cst (TREE_TYPE (vr->min), 0);
1358 return (value_inside_range (zero, vr) == 1);
1359 }
1360
1361 /* Return true if T, an SSA_NAME, is known to be nonnegative. Return
1362 false otherwise or if no value range information is available. */
1363
1364 bool
1365 ssa_name_nonnegative_p (const_tree t)
1366 {
1367 value_range_t *vr = get_value_range (t);
1368
1369 if (INTEGRAL_TYPE_P (t)
1370 && TYPE_UNSIGNED (t))
1371 return true;
1372
1373 if (!vr)
1374 return false;
1375
1376 /* Testing for VR_ANTI_RANGE is not useful here as any anti-range
1377 which would return a useful value should be encoded as a VR_RANGE. */
1378 if (vr->type == VR_RANGE)
1379 {
1380 int result = compare_values (vr->min, integer_zero_node);
1381
1382 return (result == 0 || result == 1);
1383 }
1384 return false;
1385 }
1386
1387 /* If OP has a value range with a single constant value return that,
1388 otherwise return NULL_TREE. This returns OP itself if OP is a
1389 constant. */
1390
1391 static tree
1392 op_with_constant_singleton_value_range (tree op)
1393 {
1394 value_range_t *vr;
1395
1396 if (is_gimple_min_invariant (op))
1397 return op;
1398
1399 if (TREE_CODE (op) != SSA_NAME)
1400 return NULL_TREE;
1401
1402 vr = get_value_range (op);
1403 if (vr->type == VR_RANGE
1404 && operand_equal_p (vr->min, vr->max, 0)
1405 && is_gimple_min_invariant (vr->min))
1406 return vr->min;
1407
1408 return NULL_TREE;
1409 }
1410
1411
1412 /* Extract value range information from an ASSERT_EXPR EXPR and store
1413 it in *VR_P. */
1414
1415 static void
1416 extract_range_from_assert (value_range_t *vr_p, tree expr)
1417 {
1418 tree var, cond, limit, min, max, type;
1419 value_range_t *var_vr, *limit_vr;
1420 enum tree_code cond_code;
1421
1422 var = ASSERT_EXPR_VAR (expr);
1423 cond = ASSERT_EXPR_COND (expr);
1424
1425 gcc_assert (COMPARISON_CLASS_P (cond));
1426
1427 /* Find VAR in the ASSERT_EXPR conditional. */
1428 if (var == TREE_OPERAND (cond, 0)
1429 || TREE_CODE (TREE_OPERAND (cond, 0)) == PLUS_EXPR
1430 || TREE_CODE (TREE_OPERAND (cond, 0)) == NOP_EXPR)
1431 {
1432 /* If the predicate is of the form VAR COMP LIMIT, then we just
1433 take LIMIT from the RHS and use the same comparison code. */
1434 cond_code = TREE_CODE (cond);
1435 limit = TREE_OPERAND (cond, 1);
1436 cond = TREE_OPERAND (cond, 0);
1437 }
1438 else
1439 {
1440 /* If the predicate is of the form LIMIT COMP VAR, then we need
1441 to flip around the comparison code to create the proper range
1442 for VAR. */
1443 cond_code = swap_tree_comparison (TREE_CODE (cond));
1444 limit = TREE_OPERAND (cond, 0);
1445 cond = TREE_OPERAND (cond, 1);
1446 }
1447
1448 limit = avoid_overflow_infinity (limit);
1449
1450 type = TREE_TYPE (limit);
1451 gcc_assert (limit != var);
1452
1453 /* For pointer arithmetic, we only keep track of pointer equality
1454 and inequality. */
1455 if (POINTER_TYPE_P (type) && cond_code != NE_EXPR && cond_code != EQ_EXPR)
1456 {
1457 set_value_range_to_varying (vr_p);
1458 return;
1459 }
1460
1461 /* If LIMIT is another SSA name and LIMIT has a range of its own,
1462 try to use LIMIT's range to avoid creating symbolic ranges
1463 unnecessarily. */
1464 limit_vr = (TREE_CODE (limit) == SSA_NAME) ? get_value_range (limit) : NULL;
1465
1466 /* LIMIT's range is only interesting if it has any useful information. */
1467 if (limit_vr
1468 && (limit_vr->type == VR_UNDEFINED
1469 || limit_vr->type == VR_VARYING
1470 || symbolic_range_p (limit_vr)))
1471 limit_vr = NULL;
1472
1473 /* Initially, the new range has the same set of equivalences of
1474 VAR's range. This will be revised before returning the final
1475 value. Since assertions may be chained via mutually exclusive
1476 predicates, we will need to trim the set of equivalences before
1477 we are done. */
1478 gcc_assert (vr_p->equiv == NULL);
1479 add_equivalence (&vr_p->equiv, var);
1480
1481 /* Extract a new range based on the asserted comparison for VAR and
1482 LIMIT's value range. Notice that if LIMIT has an anti-range, we
1483 will only use it for equality comparisons (EQ_EXPR). For any
1484 other kind of assertion, we cannot derive a range from LIMIT's
1485 anti-range that can be used to describe the new range. For
1486 instance, ASSERT_EXPR <x_2, x_2 <= b_4>. If b_4 is ~[2, 10],
1487 then b_4 takes on the ranges [-INF, 1] and [11, +INF]. There is
1488 no single range for x_2 that could describe LE_EXPR, so we might
1489 as well build the range [b_4, +INF] for it.
1490 One special case we handle is extracting a range from a
1491 range test encoded as (unsigned)var + CST <= limit. */
1492 if (TREE_CODE (cond) == NOP_EXPR
1493 || TREE_CODE (cond) == PLUS_EXPR)
1494 {
1495 if (TREE_CODE (cond) == PLUS_EXPR)
1496 {
1497 min = fold_build1 (NEGATE_EXPR, TREE_TYPE (TREE_OPERAND (cond, 1)),
1498 TREE_OPERAND (cond, 1));
1499 max = int_const_binop (PLUS_EXPR, limit, min, 0);
1500 cond = TREE_OPERAND (cond, 0);
1501 }
1502 else
1503 {
1504 min = build_int_cst (TREE_TYPE (var), 0);
1505 max = limit;
1506 }
1507
1508 /* Make sure to not set TREE_OVERFLOW on the final type
1509 conversion. We are willingly interpreting large positive
1510 unsigned values as negative singed values here. */
1511 min = force_fit_type_double (TREE_TYPE (var), TREE_INT_CST_LOW (min),
1512 TREE_INT_CST_HIGH (min), 0, false);
1513 max = force_fit_type_double (TREE_TYPE (var), TREE_INT_CST_LOW (max),
1514 TREE_INT_CST_HIGH (max), 0, false);
1515
1516 /* We can transform a max, min range to an anti-range or
1517 vice-versa. Use set_and_canonicalize_value_range which does
1518 this for us. */
1519 if (cond_code == LE_EXPR)
1520 set_and_canonicalize_value_range (vr_p, VR_RANGE,
1521 min, max, vr_p->equiv);
1522 else if (cond_code == GT_EXPR)
1523 set_and_canonicalize_value_range (vr_p, VR_ANTI_RANGE,
1524 min, max, vr_p->equiv);
1525 else
1526 gcc_unreachable ();
1527 }
1528 else if (cond_code == EQ_EXPR)
1529 {
1530 enum value_range_type range_type;
1531
1532 if (limit_vr)
1533 {
1534 range_type = limit_vr->type;
1535 min = limit_vr->min;
1536 max = limit_vr->max;
1537 }
1538 else
1539 {
1540 range_type = VR_RANGE;
1541 min = limit;
1542 max = limit;
1543 }
1544
1545 set_value_range (vr_p, range_type, min, max, vr_p->equiv);
1546
1547 /* When asserting the equality VAR == LIMIT and LIMIT is another
1548 SSA name, the new range will also inherit the equivalence set
1549 from LIMIT. */
1550 if (TREE_CODE (limit) == SSA_NAME)
1551 add_equivalence (&vr_p->equiv, limit);
1552 }
1553 else if (cond_code == NE_EXPR)
1554 {
1555 /* As described above, when LIMIT's range is an anti-range and
1556 this assertion is an inequality (NE_EXPR), then we cannot
1557 derive anything from the anti-range. For instance, if
1558 LIMIT's range was ~[0, 0], the assertion 'VAR != LIMIT' does
1559 not imply that VAR's range is [0, 0]. So, in the case of
1560 anti-ranges, we just assert the inequality using LIMIT and
1561 not its anti-range.
1562
1563 If LIMIT_VR is a range, we can only use it to build a new
1564 anti-range if LIMIT_VR is a single-valued range. For
1565 instance, if LIMIT_VR is [0, 1], the predicate
1566 VAR != [0, 1] does not mean that VAR's range is ~[0, 1].
1567 Rather, it means that for value 0 VAR should be ~[0, 0]
1568 and for value 1, VAR should be ~[1, 1]. We cannot
1569 represent these ranges.
1570
1571 The only situation in which we can build a valid
1572 anti-range is when LIMIT_VR is a single-valued range
1573 (i.e., LIMIT_VR->MIN == LIMIT_VR->MAX). In that case,
1574 build the anti-range ~[LIMIT_VR->MIN, LIMIT_VR->MAX]. */
1575 if (limit_vr
1576 && limit_vr->type == VR_RANGE
1577 && compare_values (limit_vr->min, limit_vr->max) == 0)
1578 {
1579 min = limit_vr->min;
1580 max = limit_vr->max;
1581 }
1582 else
1583 {
1584 /* In any other case, we cannot use LIMIT's range to build a
1585 valid anti-range. */
1586 min = max = limit;
1587 }
1588
1589 /* If MIN and MAX cover the whole range for their type, then
1590 just use the original LIMIT. */
1591 if (INTEGRAL_TYPE_P (type)
1592 && vrp_val_is_min (min)
1593 && vrp_val_is_max (max))
1594 min = max = limit;
1595
1596 set_value_range (vr_p, VR_ANTI_RANGE, min, max, vr_p->equiv);
1597 }
1598 else if (cond_code == LE_EXPR || cond_code == LT_EXPR)
1599 {
1600 min = TYPE_MIN_VALUE (type);
1601
1602 if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE)
1603 max = limit;
1604 else
1605 {
1606 /* If LIMIT_VR is of the form [N1, N2], we need to build the
1607 range [MIN, N2] for LE_EXPR and [MIN, N2 - 1] for
1608 LT_EXPR. */
1609 max = limit_vr->max;
1610 }
1611
1612 /* If the maximum value forces us to be out of bounds, simply punt.
1613 It would be pointless to try and do anything more since this
1614 all should be optimized away above us. */
1615 if ((cond_code == LT_EXPR
1616 && compare_values (max, min) == 0)
1617 || (CONSTANT_CLASS_P (max) && TREE_OVERFLOW (max)))
1618 set_value_range_to_varying (vr_p);
1619 else
1620 {
1621 /* For LT_EXPR, we create the range [MIN, MAX - 1]. */
1622 if (cond_code == LT_EXPR)
1623 {
1624 tree one = build_int_cst (type, 1);
1625 max = fold_build2 (MINUS_EXPR, type, max, one);
1626 if (EXPR_P (max))
1627 TREE_NO_WARNING (max) = 1;
1628 }
1629
1630 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1631 }
1632 }
1633 else if (cond_code == GE_EXPR || cond_code == GT_EXPR)
1634 {
1635 max = TYPE_MAX_VALUE (type);
1636
1637 if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE)
1638 min = limit;
1639 else
1640 {
1641 /* If LIMIT_VR is of the form [N1, N2], we need to build the
1642 range [N1, MAX] for GE_EXPR and [N1 + 1, MAX] for
1643 GT_EXPR. */
1644 min = limit_vr->min;
1645 }
1646
1647 /* If the minimum value forces us to be out of bounds, simply punt.
1648 It would be pointless to try and do anything more since this
1649 all should be optimized away above us. */
1650 if ((cond_code == GT_EXPR
1651 && compare_values (min, max) == 0)
1652 || (CONSTANT_CLASS_P (min) && TREE_OVERFLOW (min)))
1653 set_value_range_to_varying (vr_p);
1654 else
1655 {
1656 /* For GT_EXPR, we create the range [MIN + 1, MAX]. */
1657 if (cond_code == GT_EXPR)
1658 {
1659 tree one = build_int_cst (type, 1);
1660 min = fold_build2 (PLUS_EXPR, type, min, one);
1661 if (EXPR_P (min))
1662 TREE_NO_WARNING (min) = 1;
1663 }
1664
1665 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1666 }
1667 }
1668 else
1669 gcc_unreachable ();
1670
1671 /* If VAR already had a known range, it may happen that the new
1672 range we have computed and VAR's range are not compatible. For
1673 instance,
1674
1675 if (p_5 == NULL)
1676 p_6 = ASSERT_EXPR <p_5, p_5 == NULL>;
1677 x_7 = p_6->fld;
1678 p_8 = ASSERT_EXPR <p_6, p_6 != NULL>;
1679
1680 While the above comes from a faulty program, it will cause an ICE
1681 later because p_8 and p_6 will have incompatible ranges and at
1682 the same time will be considered equivalent. A similar situation
1683 would arise from
1684
1685 if (i_5 > 10)
1686 i_6 = ASSERT_EXPR <i_5, i_5 > 10>;
1687 if (i_5 < 5)
1688 i_7 = ASSERT_EXPR <i_6, i_6 < 5>;
1689
1690 Again i_6 and i_7 will have incompatible ranges. It would be
1691 pointless to try and do anything with i_7's range because
1692 anything dominated by 'if (i_5 < 5)' will be optimized away.
1693 Note, due to the wa in which simulation proceeds, the statement
1694 i_7 = ASSERT_EXPR <...> we would never be visited because the
1695 conditional 'if (i_5 < 5)' always evaluates to false. However,
1696 this extra check does not hurt and may protect against future
1697 changes to VRP that may get into a situation similar to the
1698 NULL pointer dereference example.
1699
1700 Note that these compatibility tests are only needed when dealing
1701 with ranges or a mix of range and anti-range. If VAR_VR and VR_P
1702 are both anti-ranges, they will always be compatible, because two
1703 anti-ranges will always have a non-empty intersection. */
1704
1705 var_vr = get_value_range (var);
1706
1707 /* We may need to make adjustments when VR_P and VAR_VR are numeric
1708 ranges or anti-ranges. */
1709 if (vr_p->type == VR_VARYING
1710 || vr_p->type == VR_UNDEFINED
1711 || var_vr->type == VR_VARYING
1712 || var_vr->type == VR_UNDEFINED
1713 || symbolic_range_p (vr_p)
1714 || symbolic_range_p (var_vr))
1715 return;
1716
1717 if (var_vr->type == VR_RANGE && vr_p->type == VR_RANGE)
1718 {
1719 /* If the two ranges have a non-empty intersection, we can
1720 refine the resulting range. Since the assert expression
1721 creates an equivalency and at the same time it asserts a
1722 predicate, we can take the intersection of the two ranges to
1723 get better precision. */
1724 if (value_ranges_intersect_p (var_vr, vr_p))
1725 {
1726 /* Use the larger of the two minimums. */
1727 if (compare_values (vr_p->min, var_vr->min) == -1)
1728 min = var_vr->min;
1729 else
1730 min = vr_p->min;
1731
1732 /* Use the smaller of the two maximums. */
1733 if (compare_values (vr_p->max, var_vr->max) == 1)
1734 max = var_vr->max;
1735 else
1736 max = vr_p->max;
1737
1738 set_value_range (vr_p, vr_p->type, min, max, vr_p->equiv);
1739 }
1740 else
1741 {
1742 /* The two ranges do not intersect, set the new range to
1743 VARYING, because we will not be able to do anything
1744 meaningful with it. */
1745 set_value_range_to_varying (vr_p);
1746 }
1747 }
1748 else if ((var_vr->type == VR_RANGE && vr_p->type == VR_ANTI_RANGE)
1749 || (var_vr->type == VR_ANTI_RANGE && vr_p->type == VR_RANGE))
1750 {
1751 /* A range and an anti-range will cancel each other only if
1752 their ends are the same. For instance, in the example above,
1753 p_8's range ~[0, 0] and p_6's range [0, 0] are incompatible,
1754 so VR_P should be set to VR_VARYING. */
1755 if (compare_values (var_vr->min, vr_p->min) == 0
1756 && compare_values (var_vr->max, vr_p->max) == 0)
1757 set_value_range_to_varying (vr_p);
1758 else
1759 {
1760 tree min, max, anti_min, anti_max, real_min, real_max;
1761 int cmp;
1762
1763 /* We want to compute the logical AND of the two ranges;
1764 there are three cases to consider.
1765
1766
1767 1. The VR_ANTI_RANGE range is completely within the
1768 VR_RANGE and the endpoints of the ranges are
1769 different. In that case the resulting range
1770 should be whichever range is more precise.
1771 Typically that will be the VR_RANGE.
1772
1773 2. The VR_ANTI_RANGE is completely disjoint from
1774 the VR_RANGE. In this case the resulting range
1775 should be the VR_RANGE.
1776
1777 3. There is some overlap between the VR_ANTI_RANGE
1778 and the VR_RANGE.
1779
1780 3a. If the high limit of the VR_ANTI_RANGE resides
1781 within the VR_RANGE, then the result is a new
1782 VR_RANGE starting at the high limit of the
1783 VR_ANTI_RANGE + 1 and extending to the
1784 high limit of the original VR_RANGE.
1785
1786 3b. If the low limit of the VR_ANTI_RANGE resides
1787 within the VR_RANGE, then the result is a new
1788 VR_RANGE starting at the low limit of the original
1789 VR_RANGE and extending to the low limit of the
1790 VR_ANTI_RANGE - 1. */
1791 if (vr_p->type == VR_ANTI_RANGE)
1792 {
1793 anti_min = vr_p->min;
1794 anti_max = vr_p->max;
1795 real_min = var_vr->min;
1796 real_max = var_vr->max;
1797 }
1798 else
1799 {
1800 anti_min = var_vr->min;
1801 anti_max = var_vr->max;
1802 real_min = vr_p->min;
1803 real_max = vr_p->max;
1804 }
1805
1806
1807 /* Case 1, VR_ANTI_RANGE completely within VR_RANGE,
1808 not including any endpoints. */
1809 if (compare_values (anti_max, real_max) == -1
1810 && compare_values (anti_min, real_min) == 1)
1811 {
1812 /* If the range is covering the whole valid range of
1813 the type keep the anti-range. */
1814 if (!vrp_val_is_min (real_min)
1815 || !vrp_val_is_max (real_max))
1816 set_value_range (vr_p, VR_RANGE, real_min,
1817 real_max, vr_p->equiv);
1818 }
1819 /* Case 2, VR_ANTI_RANGE completely disjoint from
1820 VR_RANGE. */
1821 else if (compare_values (anti_min, real_max) == 1
1822 || compare_values (anti_max, real_min) == -1)
1823 {
1824 set_value_range (vr_p, VR_RANGE, real_min,
1825 real_max, vr_p->equiv);
1826 }
1827 /* Case 3a, the anti-range extends into the low
1828 part of the real range. Thus creating a new
1829 low for the real range. */
1830 else if (((cmp = compare_values (anti_max, real_min)) == 1
1831 || cmp == 0)
1832 && compare_values (anti_max, real_max) == -1)
1833 {
1834 gcc_assert (!is_positive_overflow_infinity (anti_max));
1835 if (needs_overflow_infinity (TREE_TYPE (anti_max))
1836 && vrp_val_is_max (anti_max))
1837 {
1838 if (!supports_overflow_infinity (TREE_TYPE (var_vr->min)))
1839 {
1840 set_value_range_to_varying (vr_p);
1841 return;
1842 }
1843 min = positive_overflow_infinity (TREE_TYPE (var_vr->min));
1844 }
1845 else if (!POINTER_TYPE_P (TREE_TYPE (var_vr->min)))
1846 min = fold_build2 (PLUS_EXPR, TREE_TYPE (var_vr->min),
1847 anti_max,
1848 build_int_cst (TREE_TYPE (var_vr->min), 1));
1849 else
1850 min = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (var_vr->min),
1851 anti_max, size_int (1));
1852 max = real_max;
1853 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1854 }
1855 /* Case 3b, the anti-range extends into the high
1856 part of the real range. Thus creating a new
1857 higher for the real range. */
1858 else if (compare_values (anti_min, real_min) == 1
1859 && ((cmp = compare_values (anti_min, real_max)) == -1
1860 || cmp == 0))
1861 {
1862 gcc_assert (!is_negative_overflow_infinity (anti_min));
1863 if (needs_overflow_infinity (TREE_TYPE (anti_min))
1864 && vrp_val_is_min (anti_min))
1865 {
1866 if (!supports_overflow_infinity (TREE_TYPE (var_vr->min)))
1867 {
1868 set_value_range_to_varying (vr_p);
1869 return;
1870 }
1871 max = negative_overflow_infinity (TREE_TYPE (var_vr->min));
1872 }
1873 else if (!POINTER_TYPE_P (TREE_TYPE (var_vr->min)))
1874 max = fold_build2 (MINUS_EXPR, TREE_TYPE (var_vr->min),
1875 anti_min,
1876 build_int_cst (TREE_TYPE (var_vr->min), 1));
1877 else
1878 max = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (var_vr->min),
1879 anti_min,
1880 size_int (-1));
1881 min = real_min;
1882 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1883 }
1884 }
1885 }
1886 }
1887
1888
1889 /* Extract range information from SSA name VAR and store it in VR. If
1890 VAR has an interesting range, use it. Otherwise, create the
1891 range [VAR, VAR] and return it. This is useful in situations where
1892 we may have conditionals testing values of VARYING names. For
1893 instance,
1894
1895 x_3 = y_5;
1896 if (x_3 > y_5)
1897 ...
1898
1899 Even if y_5 is deemed VARYING, we can determine that x_3 > y_5 is
1900 always false. */
1901
1902 static void
1903 extract_range_from_ssa_name (value_range_t *vr, tree var)
1904 {
1905 value_range_t *var_vr = get_value_range (var);
1906
1907 if (var_vr->type != VR_UNDEFINED && var_vr->type != VR_VARYING)
1908 copy_value_range (vr, var_vr);
1909 else
1910 set_value_range (vr, VR_RANGE, var, var, NULL);
1911
1912 add_equivalence (&vr->equiv, var);
1913 }
1914
1915
1916 /* Wrapper around int_const_binop. If the operation overflows and we
1917 are not using wrapping arithmetic, then adjust the result to be
1918 -INF or +INF depending on CODE, VAL1 and VAL2. This can return
1919 NULL_TREE if we need to use an overflow infinity representation but
1920 the type does not support it. */
1921
1922 static tree
1923 vrp_int_const_binop (enum tree_code code, tree val1, tree val2)
1924 {
1925 tree res;
1926
1927 res = int_const_binop (code, val1, val2, 0);
1928
1929 /* If we are using unsigned arithmetic, operate symbolically
1930 on -INF and +INF as int_const_binop only handles signed overflow. */
1931 if (TYPE_UNSIGNED (TREE_TYPE (val1)))
1932 {
1933 int checkz = compare_values (res, val1);
1934 bool overflow = false;
1935
1936 /* Ensure that res = val1 [+*] val2 >= val1
1937 or that res = val1 - val2 <= val1. */
1938 if ((code == PLUS_EXPR
1939 && !(checkz == 1 || checkz == 0))
1940 || (code == MINUS_EXPR
1941 && !(checkz == 0 || checkz == -1)))
1942 {
1943 overflow = true;
1944 }
1945 /* Checking for multiplication overflow is done by dividing the
1946 output of the multiplication by the first input of the
1947 multiplication. If the result of that division operation is
1948 not equal to the second input of the multiplication, then the
1949 multiplication overflowed. */
1950 else if (code == MULT_EXPR && !integer_zerop (val1))
1951 {
1952 tree tmp = int_const_binop (TRUNC_DIV_EXPR,
1953 res,
1954 val1, 0);
1955 int check = compare_values (tmp, val2);
1956
1957 if (check != 0)
1958 overflow = true;
1959 }
1960
1961 if (overflow)
1962 {
1963 res = copy_node (res);
1964 TREE_OVERFLOW (res) = 1;
1965 }
1966
1967 }
1968 else if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (val1)))
1969 /* If the singed operation wraps then int_const_binop has done
1970 everything we want. */
1971 ;
1972 else if ((TREE_OVERFLOW (res)
1973 && !TREE_OVERFLOW (val1)
1974 && !TREE_OVERFLOW (val2))
1975 || is_overflow_infinity (val1)
1976 || is_overflow_infinity (val2))
1977 {
1978 /* If the operation overflowed but neither VAL1 nor VAL2 are
1979 overflown, return -INF or +INF depending on the operation
1980 and the combination of signs of the operands. */
1981 int sgn1 = tree_int_cst_sgn (val1);
1982 int sgn2 = tree_int_cst_sgn (val2);
1983
1984 if (needs_overflow_infinity (TREE_TYPE (res))
1985 && !supports_overflow_infinity (TREE_TYPE (res)))
1986 return NULL_TREE;
1987
1988 /* We have to punt on adding infinities of different signs,
1989 since we can't tell what the sign of the result should be.
1990 Likewise for subtracting infinities of the same sign. */
1991 if (((code == PLUS_EXPR && sgn1 != sgn2)
1992 || (code == MINUS_EXPR && sgn1 == sgn2))
1993 && is_overflow_infinity (val1)
1994 && is_overflow_infinity (val2))
1995 return NULL_TREE;
1996
1997 /* Don't try to handle division or shifting of infinities. */
1998 if ((code == TRUNC_DIV_EXPR
1999 || code == FLOOR_DIV_EXPR
2000 || code == CEIL_DIV_EXPR
2001 || code == EXACT_DIV_EXPR
2002 || code == ROUND_DIV_EXPR
2003 || code == RSHIFT_EXPR)
2004 && (is_overflow_infinity (val1)
2005 || is_overflow_infinity (val2)))
2006 return NULL_TREE;
2007
2008 /* Notice that we only need to handle the restricted set of
2009 operations handled by extract_range_from_binary_expr.
2010 Among them, only multiplication, addition and subtraction
2011 can yield overflow without overflown operands because we
2012 are working with integral types only... except in the
2013 case VAL1 = -INF and VAL2 = -1 which overflows to +INF
2014 for division too. */
2015
2016 /* For multiplication, the sign of the overflow is given
2017 by the comparison of the signs of the operands. */
2018 if ((code == MULT_EXPR && sgn1 == sgn2)
2019 /* For addition, the operands must be of the same sign
2020 to yield an overflow. Its sign is therefore that
2021 of one of the operands, for example the first. For
2022 infinite operands X + -INF is negative, not positive. */
2023 || (code == PLUS_EXPR
2024 && (sgn1 >= 0
2025 ? !is_negative_overflow_infinity (val2)
2026 : is_positive_overflow_infinity (val2)))
2027 /* For subtraction, non-infinite operands must be of
2028 different signs to yield an overflow. Its sign is
2029 therefore that of the first operand or the opposite of
2030 that of the second operand. A first operand of 0 counts
2031 as positive here, for the corner case 0 - (-INF), which
2032 overflows, but must yield +INF. For infinite operands 0
2033 - INF is negative, not positive. */
2034 || (code == MINUS_EXPR
2035 && (sgn1 >= 0
2036 ? !is_positive_overflow_infinity (val2)
2037 : is_negative_overflow_infinity (val2)))
2038 /* We only get in here with positive shift count, so the
2039 overflow direction is the same as the sign of val1.
2040 Actually rshift does not overflow at all, but we only
2041 handle the case of shifting overflowed -INF and +INF. */
2042 || (code == RSHIFT_EXPR
2043 && sgn1 >= 0)
2044 /* For division, the only case is -INF / -1 = +INF. */
2045 || code == TRUNC_DIV_EXPR
2046 || code == FLOOR_DIV_EXPR
2047 || code == CEIL_DIV_EXPR
2048 || code == EXACT_DIV_EXPR
2049 || code == ROUND_DIV_EXPR)
2050 return (needs_overflow_infinity (TREE_TYPE (res))
2051 ? positive_overflow_infinity (TREE_TYPE (res))
2052 : TYPE_MAX_VALUE (TREE_TYPE (res)));
2053 else
2054 return (needs_overflow_infinity (TREE_TYPE (res))
2055 ? negative_overflow_infinity (TREE_TYPE (res))
2056 : TYPE_MIN_VALUE (TREE_TYPE (res)));
2057 }
2058
2059 return res;
2060 }
2061
2062
2063 /* Extract range information from a binary expression EXPR based on
2064 the ranges of each of its operands and the expression code. */
2065
2066 static void
2067 extract_range_from_binary_expr (value_range_t *vr,
2068 enum tree_code code,
2069 tree expr_type, tree op0, tree op1)
2070 {
2071 enum value_range_type type;
2072 tree min, max;
2073 int cmp;
2074 value_range_t vr0 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
2075 value_range_t vr1 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
2076
2077 /* Not all binary expressions can be applied to ranges in a
2078 meaningful way. Handle only arithmetic operations. */
2079 if (code != PLUS_EXPR
2080 && code != MINUS_EXPR
2081 && code != POINTER_PLUS_EXPR
2082 && code != MULT_EXPR
2083 && code != TRUNC_DIV_EXPR
2084 && code != FLOOR_DIV_EXPR
2085 && code != CEIL_DIV_EXPR
2086 && code != EXACT_DIV_EXPR
2087 && code != ROUND_DIV_EXPR
2088 && code != TRUNC_MOD_EXPR
2089 && code != RSHIFT_EXPR
2090 && code != MIN_EXPR
2091 && code != MAX_EXPR
2092 && code != BIT_AND_EXPR
2093 && code != BIT_IOR_EXPR
2094 && code != TRUTH_AND_EXPR
2095 && code != TRUTH_OR_EXPR)
2096 {
2097 /* We can still do constant propagation here. */
2098 tree const_op0 = op_with_constant_singleton_value_range (op0);
2099 tree const_op1 = op_with_constant_singleton_value_range (op1);
2100 if (const_op0 || const_op1)
2101 {
2102 tree tem = fold_binary (code, expr_type,
2103 const_op0 ? const_op0 : op0,
2104 const_op1 ? const_op1 : op1);
2105 if (tem
2106 && is_gimple_min_invariant (tem)
2107 && !is_overflow_infinity (tem))
2108 {
2109 set_value_range (vr, VR_RANGE, tem, tem, NULL);
2110 return;
2111 }
2112 }
2113 set_value_range_to_varying (vr);
2114 return;
2115 }
2116
2117 /* Get value ranges for each operand. For constant operands, create
2118 a new value range with the operand to simplify processing. */
2119 if (TREE_CODE (op0) == SSA_NAME)
2120 vr0 = *(get_value_range (op0));
2121 else if (is_gimple_min_invariant (op0))
2122 set_value_range_to_value (&vr0, op0, NULL);
2123 else
2124 set_value_range_to_varying (&vr0);
2125
2126 if (TREE_CODE (op1) == SSA_NAME)
2127 vr1 = *(get_value_range (op1));
2128 else if (is_gimple_min_invariant (op1))
2129 set_value_range_to_value (&vr1, op1, NULL);
2130 else
2131 set_value_range_to_varying (&vr1);
2132
2133 /* If either range is UNDEFINED, so is the result. */
2134 if (vr0.type == VR_UNDEFINED || vr1.type == VR_UNDEFINED)
2135 {
2136 set_value_range_to_undefined (vr);
2137 return;
2138 }
2139
2140 /* The type of the resulting value range defaults to VR0.TYPE. */
2141 type = vr0.type;
2142
2143 /* Refuse to operate on VARYING ranges, ranges of different kinds
2144 and symbolic ranges. As an exception, we allow BIT_AND_EXPR
2145 because we may be able to derive a useful range even if one of
2146 the operands is VR_VARYING or symbolic range. Similarly for
2147 divisions. TODO, we may be able to derive anti-ranges in
2148 some cases. */
2149 if (code != BIT_AND_EXPR
2150 && code != TRUTH_AND_EXPR
2151 && code != TRUTH_OR_EXPR
2152 && code != TRUNC_DIV_EXPR
2153 && code != FLOOR_DIV_EXPR
2154 && code != CEIL_DIV_EXPR
2155 && code != EXACT_DIV_EXPR
2156 && code != ROUND_DIV_EXPR
2157 && code != TRUNC_MOD_EXPR
2158 && (vr0.type == VR_VARYING
2159 || vr1.type == VR_VARYING
2160 || vr0.type != vr1.type
2161 || symbolic_range_p (&vr0)
2162 || symbolic_range_p (&vr1)))
2163 {
2164 set_value_range_to_varying (vr);
2165 return;
2166 }
2167
2168 /* Now evaluate the expression to determine the new range. */
2169 if (POINTER_TYPE_P (expr_type)
2170 || POINTER_TYPE_P (TREE_TYPE (op0))
2171 || POINTER_TYPE_P (TREE_TYPE (op1)))
2172 {
2173 if (code == MIN_EXPR || code == MAX_EXPR)
2174 {
2175 /* For MIN/MAX expressions with pointers, we only care about
2176 nullness, if both are non null, then the result is nonnull.
2177 If both are null, then the result is null. Otherwise they
2178 are varying. */
2179 if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1))
2180 set_value_range_to_nonnull (vr, expr_type);
2181 else if (range_is_null (&vr0) && range_is_null (&vr1))
2182 set_value_range_to_null (vr, expr_type);
2183 else
2184 set_value_range_to_varying (vr);
2185
2186 return;
2187 }
2188 gcc_assert (code == POINTER_PLUS_EXPR);
2189 /* For pointer types, we are really only interested in asserting
2190 whether the expression evaluates to non-NULL. */
2191 if (range_is_nonnull (&vr0) || range_is_nonnull (&vr1))
2192 set_value_range_to_nonnull (vr, expr_type);
2193 else if (range_is_null (&vr0) && range_is_null (&vr1))
2194 set_value_range_to_null (vr, expr_type);
2195 else
2196 set_value_range_to_varying (vr);
2197
2198 return;
2199 }
2200
2201 /* For integer ranges, apply the operation to each end of the
2202 range and see what we end up with. */
2203 if (code == TRUTH_AND_EXPR
2204 || code == TRUTH_OR_EXPR)
2205 {
2206 /* If one of the operands is zero, we know that the whole
2207 expression evaluates zero. */
2208 if (code == TRUTH_AND_EXPR
2209 && ((vr0.type == VR_RANGE
2210 && integer_zerop (vr0.min)
2211 && integer_zerop (vr0.max))
2212 || (vr1.type == VR_RANGE
2213 && integer_zerop (vr1.min)
2214 && integer_zerop (vr1.max))))
2215 {
2216 type = VR_RANGE;
2217 min = max = build_int_cst (expr_type, 0);
2218 }
2219 /* If one of the operands is one, we know that the whole
2220 expression evaluates one. */
2221 else if (code == TRUTH_OR_EXPR
2222 && ((vr0.type == VR_RANGE
2223 && integer_onep (vr0.min)
2224 && integer_onep (vr0.max))
2225 || (vr1.type == VR_RANGE
2226 && integer_onep (vr1.min)
2227 && integer_onep (vr1.max))))
2228 {
2229 type = VR_RANGE;
2230 min = max = build_int_cst (expr_type, 1);
2231 }
2232 else if (vr0.type != VR_VARYING
2233 && vr1.type != VR_VARYING
2234 && vr0.type == vr1.type
2235 && !symbolic_range_p (&vr0)
2236 && !overflow_infinity_range_p (&vr0)
2237 && !symbolic_range_p (&vr1)
2238 && !overflow_infinity_range_p (&vr1))
2239 {
2240 /* Boolean expressions cannot be folded with int_const_binop. */
2241 min = fold_binary (code, expr_type, vr0.min, vr1.min);
2242 max = fold_binary (code, expr_type, vr0.max, vr1.max);
2243 }
2244 else
2245 {
2246 /* The result of a TRUTH_*_EXPR is always true or false. */
2247 set_value_range_to_truthvalue (vr, expr_type);
2248 return;
2249 }
2250 }
2251 else if (code == PLUS_EXPR
2252 || code == MIN_EXPR
2253 || code == MAX_EXPR)
2254 {
2255 /* If we have a PLUS_EXPR with two VR_ANTI_RANGEs, drop to
2256 VR_VARYING. It would take more effort to compute a precise
2257 range for such a case. For example, if we have op0 == 1 and
2258 op1 == -1 with their ranges both being ~[0,0], we would have
2259 op0 + op1 == 0, so we cannot claim that the sum is in ~[0,0].
2260 Note that we are guaranteed to have vr0.type == vr1.type at
2261 this point. */
2262 if (code == PLUS_EXPR && vr0.type == VR_ANTI_RANGE)
2263 {
2264 set_value_range_to_varying (vr);
2265 return;
2266 }
2267
2268 /* For operations that make the resulting range directly
2269 proportional to the original ranges, apply the operation to
2270 the same end of each range. */
2271 min = vrp_int_const_binop (code, vr0.min, vr1.min);
2272 max = vrp_int_const_binop (code, vr0.max, vr1.max);
2273
2274 /* If both additions overflowed the range kind is still correct.
2275 This happens regularly with subtracting something in unsigned
2276 arithmetic.
2277 ??? See PR30318 for all the cases we do not handle. */
2278 if (code == PLUS_EXPR
2279 && (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
2280 && (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
2281 {
2282 min = build_int_cst_wide (TREE_TYPE (min),
2283 TREE_INT_CST_LOW (min),
2284 TREE_INT_CST_HIGH (min));
2285 max = build_int_cst_wide (TREE_TYPE (max),
2286 TREE_INT_CST_LOW (max),
2287 TREE_INT_CST_HIGH (max));
2288 }
2289 }
2290 else if (code == MULT_EXPR
2291 || code == TRUNC_DIV_EXPR
2292 || code == FLOOR_DIV_EXPR
2293 || code == CEIL_DIV_EXPR
2294 || code == EXACT_DIV_EXPR
2295 || code == ROUND_DIV_EXPR
2296 || code == RSHIFT_EXPR)
2297 {
2298 tree val[4];
2299 size_t i;
2300 bool sop;
2301
2302 /* If we have an unsigned MULT_EXPR with two VR_ANTI_RANGEs,
2303 drop to VR_VARYING. It would take more effort to compute a
2304 precise range for such a case. For example, if we have
2305 op0 == 65536 and op1 == 65536 with their ranges both being
2306 ~[0,0] on a 32-bit machine, we would have op0 * op1 == 0, so
2307 we cannot claim that the product is in ~[0,0]. Note that we
2308 are guaranteed to have vr0.type == vr1.type at this
2309 point. */
2310 if (code == MULT_EXPR
2311 && vr0.type == VR_ANTI_RANGE
2312 && !TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (op0)))
2313 {
2314 set_value_range_to_varying (vr);
2315 return;
2316 }
2317
2318 /* If we have a RSHIFT_EXPR with any shift values outside [0..prec-1],
2319 then drop to VR_VARYING. Outside of this range we get undefined
2320 behavior from the shift operation. We cannot even trust
2321 SHIFT_COUNT_TRUNCATED at this stage, because that applies to rtl
2322 shifts, and the operation at the tree level may be widened. */
2323 if (code == RSHIFT_EXPR)
2324 {
2325 if (vr1.type == VR_ANTI_RANGE
2326 || !vrp_expr_computes_nonnegative (op1, &sop)
2327 || (operand_less_p
2328 (build_int_cst (TREE_TYPE (vr1.max),
2329 TYPE_PRECISION (expr_type) - 1),
2330 vr1.max) != 0))
2331 {
2332 set_value_range_to_varying (vr);
2333 return;
2334 }
2335 }
2336
2337 else if ((code == TRUNC_DIV_EXPR
2338 || code == FLOOR_DIV_EXPR
2339 || code == CEIL_DIV_EXPR
2340 || code == EXACT_DIV_EXPR
2341 || code == ROUND_DIV_EXPR)
2342 && (vr0.type != VR_RANGE || symbolic_range_p (&vr0)))
2343 {
2344 /* For division, if op1 has VR_RANGE but op0 does not, something
2345 can be deduced just from that range. Say [min, max] / [4, max]
2346 gives [min / 4, max / 4] range. */
2347 if (vr1.type == VR_RANGE
2348 && !symbolic_range_p (&vr1)
2349 && !range_includes_zero_p (&vr1))
2350 {
2351 vr0.type = type = VR_RANGE;
2352 vr0.min = vrp_val_min (TREE_TYPE (op0));
2353 vr0.max = vrp_val_max (TREE_TYPE (op1));
2354 }
2355 else
2356 {
2357 set_value_range_to_varying (vr);
2358 return;
2359 }
2360 }
2361
2362 /* For divisions, if op0 is VR_RANGE, we can deduce a range
2363 even if op1 is VR_VARYING, VR_ANTI_RANGE, symbolic or can
2364 include 0. */
2365 if ((code == TRUNC_DIV_EXPR
2366 || code == FLOOR_DIV_EXPR
2367 || code == CEIL_DIV_EXPR
2368 || code == EXACT_DIV_EXPR
2369 || code == ROUND_DIV_EXPR)
2370 && vr0.type == VR_RANGE
2371 && (vr1.type != VR_RANGE
2372 || symbolic_range_p (&vr1)
2373 || range_includes_zero_p (&vr1)))
2374 {
2375 tree zero = build_int_cst (TREE_TYPE (vr0.min), 0);
2376 int cmp;
2377
2378 sop = false;
2379 min = NULL_TREE;
2380 max = NULL_TREE;
2381 if (vrp_expr_computes_nonnegative (op1, &sop) && !sop)
2382 {
2383 /* For unsigned division or when divisor is known
2384 to be non-negative, the range has to cover
2385 all numbers from 0 to max for positive max
2386 and all numbers from min to 0 for negative min. */
2387 cmp = compare_values (vr0.max, zero);
2388 if (cmp == -1)
2389 max = zero;
2390 else if (cmp == 0 || cmp == 1)
2391 max = vr0.max;
2392 else
2393 type = VR_VARYING;
2394 cmp = compare_values (vr0.min, zero);
2395 if (cmp == 1)
2396 min = zero;
2397 else if (cmp == 0 || cmp == -1)
2398 min = vr0.min;
2399 else
2400 type = VR_VARYING;
2401 }
2402 else
2403 {
2404 /* Otherwise the range is -max .. max or min .. -min
2405 depending on which bound is bigger in absolute value,
2406 as the division can change the sign. */
2407 abs_extent_range (vr, vr0.min, vr0.max);
2408 return;
2409 }
2410 if (type == VR_VARYING)
2411 {
2412 set_value_range_to_varying (vr);
2413 return;
2414 }
2415 }
2416
2417 /* Multiplications and divisions are a bit tricky to handle,
2418 depending on the mix of signs we have in the two ranges, we
2419 need to operate on different values to get the minimum and
2420 maximum values for the new range. One approach is to figure
2421 out all the variations of range combinations and do the
2422 operations.
2423
2424 However, this involves several calls to compare_values and it
2425 is pretty convoluted. It's simpler to do the 4 operations
2426 (MIN0 OP MIN1, MIN0 OP MAX1, MAX0 OP MIN1 and MAX0 OP MAX0 OP
2427 MAX1) and then figure the smallest and largest values to form
2428 the new range. */
2429 else
2430 {
2431 gcc_assert ((vr0.type == VR_RANGE
2432 || (code == MULT_EXPR && vr0.type == VR_ANTI_RANGE))
2433 && vr0.type == vr1.type);
2434
2435 /* Compute the 4 cross operations. */
2436 sop = false;
2437 val[0] = vrp_int_const_binop (code, vr0.min, vr1.min);
2438 if (val[0] == NULL_TREE)
2439 sop = true;
2440
2441 if (vr1.max == vr1.min)
2442 val[1] = NULL_TREE;
2443 else
2444 {
2445 val[1] = vrp_int_const_binop (code, vr0.min, vr1.max);
2446 if (val[1] == NULL_TREE)
2447 sop = true;
2448 }
2449
2450 if (vr0.max == vr0.min)
2451 val[2] = NULL_TREE;
2452 else
2453 {
2454 val[2] = vrp_int_const_binop (code, vr0.max, vr1.min);
2455 if (val[2] == NULL_TREE)
2456 sop = true;
2457 }
2458
2459 if (vr0.min == vr0.max || vr1.min == vr1.max)
2460 val[3] = NULL_TREE;
2461 else
2462 {
2463 val[3] = vrp_int_const_binop (code, vr0.max, vr1.max);
2464 if (val[3] == NULL_TREE)
2465 sop = true;
2466 }
2467
2468 if (sop)
2469 {
2470 set_value_range_to_varying (vr);
2471 return;
2472 }
2473
2474 /* Set MIN to the minimum of VAL[i] and MAX to the maximum
2475 of VAL[i]. */
2476 min = val[0];
2477 max = val[0];
2478 for (i = 1; i < 4; i++)
2479 {
2480 if (!is_gimple_min_invariant (min)
2481 || (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
2482 || !is_gimple_min_invariant (max)
2483 || (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
2484 break;
2485
2486 if (val[i])
2487 {
2488 if (!is_gimple_min_invariant (val[i])
2489 || (TREE_OVERFLOW (val[i])
2490 && !is_overflow_infinity (val[i])))
2491 {
2492 /* If we found an overflowed value, set MIN and MAX
2493 to it so that we set the resulting range to
2494 VARYING. */
2495 min = max = val[i];
2496 break;
2497 }
2498
2499 if (compare_values (val[i], min) == -1)
2500 min = val[i];
2501
2502 if (compare_values (val[i], max) == 1)
2503 max = val[i];
2504 }
2505 }
2506 }
2507 }
2508 else if (code == TRUNC_MOD_EXPR)
2509 {
2510 bool sop = false;
2511 if (vr1.type != VR_RANGE
2512 || symbolic_range_p (&vr1)
2513 || range_includes_zero_p (&vr1)
2514 || vrp_val_is_min (vr1.min))
2515 {
2516 set_value_range_to_varying (vr);
2517 return;
2518 }
2519 type = VR_RANGE;
2520 /* Compute MAX <|vr1.min|, |vr1.max|> - 1. */
2521 max = fold_unary_to_constant (ABS_EXPR, TREE_TYPE (vr1.min), vr1.min);
2522 if (tree_int_cst_lt (max, vr1.max))
2523 max = vr1.max;
2524 max = int_const_binop (MINUS_EXPR, max, integer_one_node, 0);
2525 /* If the dividend is non-negative the modulus will be
2526 non-negative as well. */
2527 if (TYPE_UNSIGNED (TREE_TYPE (max))
2528 || (vrp_expr_computes_nonnegative (op0, &sop) && !sop))
2529 min = build_int_cst (TREE_TYPE (max), 0);
2530 else
2531 min = fold_unary_to_constant (NEGATE_EXPR, TREE_TYPE (max), max);
2532 }
2533 else if (code == MINUS_EXPR)
2534 {
2535 /* If we have a MINUS_EXPR with two VR_ANTI_RANGEs, drop to
2536 VR_VARYING. It would take more effort to compute a precise
2537 range for such a case. For example, if we have op0 == 1 and
2538 op1 == 1 with their ranges both being ~[0,0], we would have
2539 op0 - op1 == 0, so we cannot claim that the difference is in
2540 ~[0,0]. Note that we are guaranteed to have
2541 vr0.type == vr1.type at this point. */
2542 if (vr0.type == VR_ANTI_RANGE)
2543 {
2544 set_value_range_to_varying (vr);
2545 return;
2546 }
2547
2548 /* For MINUS_EXPR, apply the operation to the opposite ends of
2549 each range. */
2550 min = vrp_int_const_binop (code, vr0.min, vr1.max);
2551 max = vrp_int_const_binop (code, vr0.max, vr1.min);
2552 }
2553 else if (code == BIT_AND_EXPR)
2554 {
2555 bool vr0_int_cst_singleton_p, vr1_int_cst_singleton_p;
2556
2557 vr0_int_cst_singleton_p = range_int_cst_singleton_p (&vr0);
2558 vr1_int_cst_singleton_p = range_int_cst_singleton_p (&vr1);
2559
2560 if (vr0_int_cst_singleton_p && vr1_int_cst_singleton_p)
2561 min = max = int_const_binop (code, vr0.max, vr1.max, 0);
2562 else if (vr0_int_cst_singleton_p
2563 && tree_int_cst_sgn (vr0.max) >= 0)
2564 {
2565 min = build_int_cst (expr_type, 0);
2566 max = vr0.max;
2567 }
2568 else if (vr1_int_cst_singleton_p
2569 && tree_int_cst_sgn (vr1.max) >= 0)
2570 {
2571 type = VR_RANGE;
2572 min = build_int_cst (expr_type, 0);
2573 max = vr1.max;
2574 }
2575 else
2576 {
2577 set_value_range_to_varying (vr);
2578 return;
2579 }
2580 }
2581 else if (code == BIT_IOR_EXPR)
2582 {
2583 if (range_int_cst_p (&vr0)
2584 && range_int_cst_p (&vr1)
2585 && tree_int_cst_sgn (vr0.min) >= 0
2586 && tree_int_cst_sgn (vr1.min) >= 0)
2587 {
2588 double_int vr0_max = tree_to_double_int (vr0.max);
2589 double_int vr1_max = tree_to_double_int (vr1.max);
2590 double_int ior_max;
2591
2592 /* Set all bits to the right of the most significant one to 1.
2593 For example, [0, 4] | [4, 4] = [4, 7]. */
2594 ior_max.low = vr0_max.low | vr1_max.low;
2595 ior_max.high = vr0_max.high | vr1_max.high;
2596 if (ior_max.high != 0)
2597 {
2598 ior_max.low = ~(unsigned HOST_WIDE_INT)0u;
2599 ior_max.high |= ((HOST_WIDE_INT) 1
2600 << floor_log2 (ior_max.high)) - 1;
2601 }
2602 else if (ior_max.low != 0)
2603 ior_max.low |= ((unsigned HOST_WIDE_INT) 1u
2604 << floor_log2 (ior_max.low)) - 1;
2605
2606 /* Both of these endpoints are conservative. */
2607 min = vrp_int_const_binop (MAX_EXPR, vr0.min, vr1.min);
2608 max = double_int_to_tree (expr_type, ior_max);
2609 }
2610 else
2611 {
2612 set_value_range_to_varying (vr);
2613 return;
2614 }
2615 }
2616 else
2617 gcc_unreachable ();
2618
2619 /* If either MIN or MAX overflowed, then set the resulting range to
2620 VARYING. But we do accept an overflow infinity
2621 representation. */
2622 if (min == NULL_TREE
2623 || !is_gimple_min_invariant (min)
2624 || (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
2625 || max == NULL_TREE
2626 || !is_gimple_min_invariant (max)
2627 || (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
2628 {
2629 set_value_range_to_varying (vr);
2630 return;
2631 }
2632
2633 /* We punt if:
2634 1) [-INF, +INF]
2635 2) [-INF, +-INF(OVF)]
2636 3) [+-INF(OVF), +INF]
2637 4) [+-INF(OVF), +-INF(OVF)]
2638 We learn nothing when we have INF and INF(OVF) on both sides.
2639 Note that we do accept [-INF, -INF] and [+INF, +INF] without
2640 overflow. */
2641 if ((vrp_val_is_min (min) || is_overflow_infinity (min))
2642 && (vrp_val_is_max (max) || is_overflow_infinity (max)))
2643 {
2644 set_value_range_to_varying (vr);
2645 return;
2646 }
2647
2648 cmp = compare_values (min, max);
2649 if (cmp == -2 || cmp == 1)
2650 {
2651 /* If the new range has its limits swapped around (MIN > MAX),
2652 then the operation caused one of them to wrap around, mark
2653 the new range VARYING. */
2654 set_value_range_to_varying (vr);
2655 }
2656 else
2657 set_value_range (vr, type, min, max, NULL);
2658 }
2659
2660
2661 /* Extract range information from a unary expression EXPR based on
2662 the range of its operand and the expression code. */
2663
2664 static void
2665 extract_range_from_unary_expr (value_range_t *vr, enum tree_code code,
2666 tree type, tree op0)
2667 {
2668 tree min, max;
2669 int cmp;
2670 value_range_t vr0 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
2671
2672 /* Refuse to operate on certain unary expressions for which we
2673 cannot easily determine a resulting range. */
2674 if (code == FIX_TRUNC_EXPR
2675 || code == FLOAT_EXPR
2676 || code == BIT_NOT_EXPR
2677 || code == CONJ_EXPR)
2678 {
2679 /* We can still do constant propagation here. */
2680 if ((op0 = op_with_constant_singleton_value_range (op0)) != NULL_TREE)
2681 {
2682 tree tem = fold_unary (code, type, op0);
2683 if (tem
2684 && is_gimple_min_invariant (tem)
2685 && !is_overflow_infinity (tem))
2686 {
2687 set_value_range (vr, VR_RANGE, tem, tem, NULL);
2688 return;
2689 }
2690 }
2691 set_value_range_to_varying (vr);
2692 return;
2693 }
2694
2695 /* Get value ranges for the operand. For constant operands, create
2696 a new value range with the operand to simplify processing. */
2697 if (TREE_CODE (op0) == SSA_NAME)
2698 vr0 = *(get_value_range (op0));
2699 else if (is_gimple_min_invariant (op0))
2700 set_value_range_to_value (&vr0, op0, NULL);
2701 else
2702 set_value_range_to_varying (&vr0);
2703
2704 /* If VR0 is UNDEFINED, so is the result. */
2705 if (vr0.type == VR_UNDEFINED)
2706 {
2707 set_value_range_to_undefined (vr);
2708 return;
2709 }
2710
2711 /* Refuse to operate on symbolic ranges, or if neither operand is
2712 a pointer or integral type. */
2713 if ((!INTEGRAL_TYPE_P (TREE_TYPE (op0))
2714 && !POINTER_TYPE_P (TREE_TYPE (op0)))
2715 || (vr0.type != VR_VARYING
2716 && symbolic_range_p (&vr0)))
2717 {
2718 set_value_range_to_varying (vr);
2719 return;
2720 }
2721
2722 /* If the expression involves pointers, we are only interested in
2723 determining if it evaluates to NULL [0, 0] or non-NULL (~[0, 0]). */
2724 if (POINTER_TYPE_P (type) || POINTER_TYPE_P (TREE_TYPE (op0)))
2725 {
2726 bool sop;
2727
2728 sop = false;
2729 if (range_is_nonnull (&vr0)
2730 || (tree_unary_nonzero_warnv_p (code, type, op0, &sop)
2731 && !sop))
2732 set_value_range_to_nonnull (vr, type);
2733 else if (range_is_null (&vr0))
2734 set_value_range_to_null (vr, type);
2735 else
2736 set_value_range_to_varying (vr);
2737
2738 return;
2739 }
2740
2741 /* Handle unary expressions on integer ranges. */
2742 if (CONVERT_EXPR_CODE_P (code)
2743 && INTEGRAL_TYPE_P (type)
2744 && INTEGRAL_TYPE_P (TREE_TYPE (op0)))
2745 {
2746 tree inner_type = TREE_TYPE (op0);
2747 tree outer_type = type;
2748
2749 /* If VR0 is varying and we increase the type precision, assume
2750 a full range for the following transformation. */
2751 if (vr0.type == VR_VARYING
2752 && TYPE_PRECISION (inner_type) < TYPE_PRECISION (outer_type))
2753 {
2754 vr0.type = VR_RANGE;
2755 vr0.min = TYPE_MIN_VALUE (inner_type);
2756 vr0.max = TYPE_MAX_VALUE (inner_type);
2757 }
2758
2759 /* If VR0 is a constant range or anti-range and the conversion is
2760 not truncating we can convert the min and max values and
2761 canonicalize the resulting range. Otherwise we can do the
2762 conversion if the size of the range is less than what the
2763 precision of the target type can represent and the range is
2764 not an anti-range. */
2765 if ((vr0.type == VR_RANGE
2766 || vr0.type == VR_ANTI_RANGE)
2767 && TREE_CODE (vr0.min) == INTEGER_CST
2768 && TREE_CODE (vr0.max) == INTEGER_CST
2769 && (!is_overflow_infinity (vr0.min)
2770 || (vr0.type == VR_RANGE
2771 && TYPE_PRECISION (outer_type) > TYPE_PRECISION (inner_type)
2772 && needs_overflow_infinity (outer_type)
2773 && supports_overflow_infinity (outer_type)))
2774 && (!is_overflow_infinity (vr0.max)
2775 || (vr0.type == VR_RANGE
2776 && TYPE_PRECISION (outer_type) > TYPE_PRECISION (inner_type)
2777 && needs_overflow_infinity (outer_type)
2778 && supports_overflow_infinity (outer_type)))
2779 && (TYPE_PRECISION (outer_type) >= TYPE_PRECISION (inner_type)
2780 || (vr0.type == VR_RANGE
2781 && integer_zerop (int_const_binop (RSHIFT_EXPR,
2782 int_const_binop (MINUS_EXPR, vr0.max, vr0.min, 0),
2783 size_int (TYPE_PRECISION (outer_type)), 0)))))
2784 {
2785 tree new_min, new_max;
2786 new_min = force_fit_type_double (outer_type,
2787 TREE_INT_CST_LOW (vr0.min),
2788 TREE_INT_CST_HIGH (vr0.min), 0, 0);
2789 new_max = force_fit_type_double (outer_type,
2790 TREE_INT_CST_LOW (vr0.max),
2791 TREE_INT_CST_HIGH (vr0.max), 0, 0);
2792 if (is_overflow_infinity (vr0.min))
2793 new_min = negative_overflow_infinity (outer_type);
2794 if (is_overflow_infinity (vr0.max))
2795 new_max = positive_overflow_infinity (outer_type);
2796 set_and_canonicalize_value_range (vr, vr0.type,
2797 new_min, new_max, NULL);
2798 return;
2799 }
2800
2801 set_value_range_to_varying (vr);
2802 return;
2803 }
2804
2805 /* Conversion of a VR_VARYING value to a wider type can result
2806 in a usable range. So wait until after we've handled conversions
2807 before dropping the result to VR_VARYING if we had a source
2808 operand that is VR_VARYING. */
2809 if (vr0.type == VR_VARYING)
2810 {
2811 set_value_range_to_varying (vr);
2812 return;
2813 }
2814
2815 /* Apply the operation to each end of the range and see what we end
2816 up with. */
2817 if (code == NEGATE_EXPR
2818 && !TYPE_UNSIGNED (type))
2819 {
2820 /* NEGATE_EXPR flips the range around. We need to treat
2821 TYPE_MIN_VALUE specially. */
2822 if (is_positive_overflow_infinity (vr0.max))
2823 min = negative_overflow_infinity (type);
2824 else if (is_negative_overflow_infinity (vr0.max))
2825 min = positive_overflow_infinity (type);
2826 else if (!vrp_val_is_min (vr0.max))
2827 min = fold_unary_to_constant (code, type, vr0.max);
2828 else if (needs_overflow_infinity (type))
2829 {
2830 if (supports_overflow_infinity (type)
2831 && !is_overflow_infinity (vr0.min)
2832 && !vrp_val_is_min (vr0.min))
2833 min = positive_overflow_infinity (type);
2834 else
2835 {
2836 set_value_range_to_varying (vr);
2837 return;
2838 }
2839 }
2840 else
2841 min = TYPE_MIN_VALUE (type);
2842
2843 if (is_positive_overflow_infinity (vr0.min))
2844 max = negative_overflow_infinity (type);
2845 else if (is_negative_overflow_infinity (vr0.min))
2846 max = positive_overflow_infinity (type);
2847 else if (!vrp_val_is_min (vr0.min))
2848 max = fold_unary_to_constant (code, type, vr0.min);
2849 else if (needs_overflow_infinity (type))
2850 {
2851 if (supports_overflow_infinity (type))
2852 max = positive_overflow_infinity (type);
2853 else
2854 {
2855 set_value_range_to_varying (vr);
2856 return;
2857 }
2858 }
2859 else
2860 max = TYPE_MIN_VALUE (type);
2861 }
2862 else if (code == NEGATE_EXPR
2863 && TYPE_UNSIGNED (type))
2864 {
2865 if (!range_includes_zero_p (&vr0))
2866 {
2867 max = fold_unary_to_constant (code, type, vr0.min);
2868 min = fold_unary_to_constant (code, type, vr0.max);
2869 }
2870 else
2871 {
2872 if (range_is_null (&vr0))
2873 set_value_range_to_null (vr, type);
2874 else
2875 set_value_range_to_varying (vr);
2876 return;
2877 }
2878 }
2879 else if (code == ABS_EXPR
2880 && !TYPE_UNSIGNED (type))
2881 {
2882 /* -TYPE_MIN_VALUE = TYPE_MIN_VALUE with flag_wrapv so we can't get a
2883 useful range. */
2884 if (!TYPE_OVERFLOW_UNDEFINED (type)
2885 && ((vr0.type == VR_RANGE
2886 && vrp_val_is_min (vr0.min))
2887 || (vr0.type == VR_ANTI_RANGE
2888 && !vrp_val_is_min (vr0.min)
2889 && !range_includes_zero_p (&vr0))))
2890 {
2891 set_value_range_to_varying (vr);
2892 return;
2893 }
2894
2895 /* ABS_EXPR may flip the range around, if the original range
2896 included negative values. */
2897 if (is_overflow_infinity (vr0.min))
2898 min = positive_overflow_infinity (type);
2899 else if (!vrp_val_is_min (vr0.min))
2900 min = fold_unary_to_constant (code, type, vr0.min);
2901 else if (!needs_overflow_infinity (type))
2902 min = TYPE_MAX_VALUE (type);
2903 else if (supports_overflow_infinity (type))
2904 min = positive_overflow_infinity (type);
2905 else
2906 {
2907 set_value_range_to_varying (vr);
2908 return;
2909 }
2910
2911 if (is_overflow_infinity (vr0.max))
2912 max = positive_overflow_infinity (type);
2913 else if (!vrp_val_is_min (vr0.max))
2914 max = fold_unary_to_constant (code, type, vr0.max);
2915 else if (!needs_overflow_infinity (type))
2916 max = TYPE_MAX_VALUE (type);
2917 else if (supports_overflow_infinity (type)
2918 /* We shouldn't generate [+INF, +INF] as set_value_range
2919 doesn't like this and ICEs. */
2920 && !is_positive_overflow_infinity (min))
2921 max = positive_overflow_infinity (type);
2922 else
2923 {
2924 set_value_range_to_varying (vr);
2925 return;
2926 }
2927
2928 cmp = compare_values (min, max);
2929
2930 /* If a VR_ANTI_RANGEs contains zero, then we have
2931 ~[-INF, min(MIN, MAX)]. */
2932 if (vr0.type == VR_ANTI_RANGE)
2933 {
2934 if (range_includes_zero_p (&vr0))
2935 {
2936 /* Take the lower of the two values. */
2937 if (cmp != 1)
2938 max = min;
2939
2940 /* Create ~[-INF, min (abs(MIN), abs(MAX))]
2941 or ~[-INF + 1, min (abs(MIN), abs(MAX))] when
2942 flag_wrapv is set and the original anti-range doesn't include
2943 TYPE_MIN_VALUE, remember -TYPE_MIN_VALUE = TYPE_MIN_VALUE. */
2944 if (TYPE_OVERFLOW_WRAPS (type))
2945 {
2946 tree type_min_value = TYPE_MIN_VALUE (type);
2947
2948 min = (vr0.min != type_min_value
2949 ? int_const_binop (PLUS_EXPR, type_min_value,
2950 integer_one_node, 0)
2951 : type_min_value);
2952 }
2953 else
2954 {
2955 if (overflow_infinity_range_p (&vr0))
2956 min = negative_overflow_infinity (type);
2957 else
2958 min = TYPE_MIN_VALUE (type);
2959 }
2960 }
2961 else
2962 {
2963 /* All else has failed, so create the range [0, INF], even for
2964 flag_wrapv since TYPE_MIN_VALUE is in the original
2965 anti-range. */
2966 vr0.type = VR_RANGE;
2967 min = build_int_cst (type, 0);
2968 if (needs_overflow_infinity (type))
2969 {
2970 if (supports_overflow_infinity (type))
2971 max = positive_overflow_infinity (type);
2972 else
2973 {
2974 set_value_range_to_varying (vr);
2975 return;
2976 }
2977 }
2978 else
2979 max = TYPE_MAX_VALUE (type);
2980 }
2981 }
2982
2983 /* If the range contains zero then we know that the minimum value in the
2984 range will be zero. */
2985 else if (range_includes_zero_p (&vr0))
2986 {
2987 if (cmp == 1)
2988 max = min;
2989 min = build_int_cst (type, 0);
2990 }
2991 else
2992 {
2993 /* If the range was reversed, swap MIN and MAX. */
2994 if (cmp == 1)
2995 {
2996 tree t = min;
2997 min = max;
2998 max = t;
2999 }
3000 }
3001 }
3002 else
3003 {
3004 /* Otherwise, operate on each end of the range. */
3005 min = fold_unary_to_constant (code, type, vr0.min);
3006 max = fold_unary_to_constant (code, type, vr0.max);
3007
3008 if (needs_overflow_infinity (type))
3009 {
3010 gcc_assert (code != NEGATE_EXPR && code != ABS_EXPR);
3011
3012 /* If both sides have overflowed, we don't know
3013 anything. */
3014 if ((is_overflow_infinity (vr0.min)
3015 || TREE_OVERFLOW (min))
3016 && (is_overflow_infinity (vr0.max)
3017 || TREE_OVERFLOW (max)))
3018 {
3019 set_value_range_to_varying (vr);
3020 return;
3021 }
3022
3023 if (is_overflow_infinity (vr0.min))
3024 min = vr0.min;
3025 else if (TREE_OVERFLOW (min))
3026 {
3027 if (supports_overflow_infinity (type))
3028 min = (tree_int_cst_sgn (min) >= 0
3029 ? positive_overflow_infinity (TREE_TYPE (min))
3030 : negative_overflow_infinity (TREE_TYPE (min)));
3031 else
3032 {
3033 set_value_range_to_varying (vr);
3034 return;
3035 }
3036 }
3037
3038 if (is_overflow_infinity (vr0.max))
3039 max = vr0.max;
3040 else if (TREE_OVERFLOW (max))
3041 {
3042 if (supports_overflow_infinity (type))
3043 max = (tree_int_cst_sgn (max) >= 0
3044 ? positive_overflow_infinity (TREE_TYPE (max))
3045 : negative_overflow_infinity (TREE_TYPE (max)));
3046 else
3047 {
3048 set_value_range_to_varying (vr);
3049 return;
3050 }
3051 }
3052 }
3053 }
3054
3055 cmp = compare_values (min, max);
3056 if (cmp == -2 || cmp == 1)
3057 {
3058 /* If the new range has its limits swapped around (MIN > MAX),
3059 then the operation caused one of them to wrap around, mark
3060 the new range VARYING. */
3061 set_value_range_to_varying (vr);
3062 }
3063 else
3064 set_value_range (vr, vr0.type, min, max, NULL);
3065 }
3066
3067
3068 /* Extract range information from a conditional expression EXPR based on
3069 the ranges of each of its operands and the expression code. */
3070
3071 static void
3072 extract_range_from_cond_expr (value_range_t *vr, tree expr)
3073 {
3074 tree op0, op1;
3075 value_range_t vr0 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
3076 value_range_t vr1 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
3077
3078 /* Get value ranges for each operand. For constant operands, create
3079 a new value range with the operand to simplify processing. */
3080 op0 = COND_EXPR_THEN (expr);
3081 if (TREE_CODE (op0) == SSA_NAME)
3082 vr0 = *(get_value_range (op0));
3083 else if (is_gimple_min_invariant (op0))
3084 set_value_range_to_value (&vr0, op0, NULL);
3085 else
3086 set_value_range_to_varying (&vr0);
3087
3088 op1 = COND_EXPR_ELSE (expr);
3089 if (TREE_CODE (op1) == SSA_NAME)
3090 vr1 = *(get_value_range (op1));
3091 else if (is_gimple_min_invariant (op1))
3092 set_value_range_to_value (&vr1, op1, NULL);
3093 else
3094 set_value_range_to_varying (&vr1);
3095
3096 /* The resulting value range is the union of the operand ranges */
3097 vrp_meet (&vr0, &vr1);
3098 copy_value_range (vr, &vr0);
3099 }
3100
3101
3102 /* Extract range information from a comparison expression EXPR based
3103 on the range of its operand and the expression code. */
3104
3105 static void
3106 extract_range_from_comparison (value_range_t *vr, enum tree_code code,
3107 tree type, tree op0, tree op1)
3108 {
3109 bool sop = false;
3110 tree val;
3111
3112 val = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, false, &sop,
3113 NULL);
3114
3115 /* A disadvantage of using a special infinity as an overflow
3116 representation is that we lose the ability to record overflow
3117 when we don't have an infinity. So we have to ignore a result
3118 which relies on overflow. */
3119
3120 if (val && !is_overflow_infinity (val) && !sop)
3121 {
3122 /* Since this expression was found on the RHS of an assignment,
3123 its type may be different from _Bool. Convert VAL to EXPR's
3124 type. */
3125 val = fold_convert (type, val);
3126 if (is_gimple_min_invariant (val))
3127 set_value_range_to_value (vr, val, vr->equiv);
3128 else
3129 set_value_range (vr, VR_RANGE, val, val, vr->equiv);
3130 }
3131 else
3132 /* The result of a comparison is always true or false. */
3133 set_value_range_to_truthvalue (vr, type);
3134 }
3135
3136 /* Try to derive a nonnegative or nonzero range out of STMT relying
3137 primarily on generic routines in fold in conjunction with range data.
3138 Store the result in *VR */
3139
3140 static void
3141 extract_range_basic (value_range_t *vr, gimple stmt)
3142 {
3143 bool sop = false;
3144 tree type = gimple_expr_type (stmt);
3145
3146 if (INTEGRAL_TYPE_P (type)
3147 && gimple_stmt_nonnegative_warnv_p (stmt, &sop))
3148 set_value_range_to_nonnegative (vr, type,
3149 sop || stmt_overflow_infinity (stmt));
3150 else if (vrp_stmt_computes_nonzero (stmt, &sop)
3151 && !sop)
3152 set_value_range_to_nonnull (vr, type);
3153 else
3154 set_value_range_to_varying (vr);
3155 }
3156
3157
3158 /* Try to compute a useful range out of assignment STMT and store it
3159 in *VR. */
3160
3161 static void
3162 extract_range_from_assignment (value_range_t *vr, gimple stmt)
3163 {
3164 enum tree_code code = gimple_assign_rhs_code (stmt);
3165
3166 if (code == ASSERT_EXPR)
3167 extract_range_from_assert (vr, gimple_assign_rhs1 (stmt));
3168 else if (code == SSA_NAME)
3169 extract_range_from_ssa_name (vr, gimple_assign_rhs1 (stmt));
3170 else if (TREE_CODE_CLASS (code) == tcc_binary
3171 || code == TRUTH_AND_EXPR
3172 || code == TRUTH_OR_EXPR
3173 || code == TRUTH_XOR_EXPR)
3174 extract_range_from_binary_expr (vr, gimple_assign_rhs_code (stmt),
3175 gimple_expr_type (stmt),
3176 gimple_assign_rhs1 (stmt),
3177 gimple_assign_rhs2 (stmt));
3178 else if (TREE_CODE_CLASS (code) == tcc_unary)
3179 extract_range_from_unary_expr (vr, gimple_assign_rhs_code (stmt),
3180 gimple_expr_type (stmt),
3181 gimple_assign_rhs1 (stmt));
3182 else if (code == COND_EXPR)
3183 extract_range_from_cond_expr (vr, gimple_assign_rhs1 (stmt));
3184 else if (TREE_CODE_CLASS (code) == tcc_comparison)
3185 extract_range_from_comparison (vr, gimple_assign_rhs_code (stmt),
3186 gimple_expr_type (stmt),
3187 gimple_assign_rhs1 (stmt),
3188 gimple_assign_rhs2 (stmt));
3189 else if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS
3190 && is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
3191 set_value_range_to_value (vr, gimple_assign_rhs1 (stmt), NULL);
3192 else
3193 set_value_range_to_varying (vr);
3194
3195 if (vr->type == VR_VARYING)
3196 extract_range_basic (vr, stmt);
3197 }
3198
3199 /* Given a range VR, a LOOP and a variable VAR, determine whether it
3200 would be profitable to adjust VR using scalar evolution information
3201 for VAR. If so, update VR with the new limits. */
3202
3203 static void
3204 adjust_range_with_scev (value_range_t *vr, struct loop *loop,
3205 gimple stmt, tree var)
3206 {
3207 tree init, step, chrec, tmin, tmax, min, max, type, tem;
3208 enum ev_direction dir;
3209
3210 /* TODO. Don't adjust anti-ranges. An anti-range may provide
3211 better opportunities than a regular range, but I'm not sure. */
3212 if (vr->type == VR_ANTI_RANGE)
3213 return;
3214
3215 chrec = instantiate_parameters (loop, analyze_scalar_evolution (loop, var));
3216
3217 /* Like in PR19590, scev can return a constant function. */
3218 if (is_gimple_min_invariant (chrec))
3219 {
3220 set_value_range_to_value (vr, chrec, vr->equiv);
3221 return;
3222 }
3223
3224 if (TREE_CODE (chrec) != POLYNOMIAL_CHREC)
3225 return;
3226
3227 init = initial_condition_in_loop_num (chrec, loop->num);
3228 tem = op_with_constant_singleton_value_range (init);
3229 if (tem)
3230 init = tem;
3231 step = evolution_part_in_loop_num (chrec, loop->num);
3232 tem = op_with_constant_singleton_value_range (step);
3233 if (tem)
3234 step = tem;
3235
3236 /* If STEP is symbolic, we can't know whether INIT will be the
3237 minimum or maximum value in the range. Also, unless INIT is
3238 a simple expression, compare_values and possibly other functions
3239 in tree-vrp won't be able to handle it. */
3240 if (step == NULL_TREE
3241 || !is_gimple_min_invariant (step)
3242 || !valid_value_p (init))
3243 return;
3244
3245 dir = scev_direction (chrec);
3246 if (/* Do not adjust ranges if we do not know whether the iv increases
3247 or decreases, ... */
3248 dir == EV_DIR_UNKNOWN
3249 /* ... or if it may wrap. */
3250 || scev_probably_wraps_p (init, step, stmt, get_chrec_loop (chrec),
3251 true))
3252 return;
3253
3254 /* We use TYPE_MIN_VALUE and TYPE_MAX_VALUE here instead of
3255 negative_overflow_infinity and positive_overflow_infinity,
3256 because we have concluded that the loop probably does not
3257 wrap. */
3258
3259 type = TREE_TYPE (var);
3260 if (POINTER_TYPE_P (type) || !TYPE_MIN_VALUE (type))
3261 tmin = lower_bound_in_type (type, type);
3262 else
3263 tmin = TYPE_MIN_VALUE (type);
3264 if (POINTER_TYPE_P (type) || !TYPE_MAX_VALUE (type))
3265 tmax = upper_bound_in_type (type, type);
3266 else
3267 tmax = TYPE_MAX_VALUE (type);
3268
3269 if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED)
3270 {
3271 min = tmin;
3272 max = tmax;
3273
3274 /* For VARYING or UNDEFINED ranges, just about anything we get
3275 from scalar evolutions should be better. */
3276
3277 if (dir == EV_DIR_DECREASES)
3278 max = init;
3279 else
3280 min = init;
3281
3282 /* If we would create an invalid range, then just assume we
3283 know absolutely nothing. This may be over-conservative,
3284 but it's clearly safe, and should happen only in unreachable
3285 parts of code, or for invalid programs. */
3286 if (compare_values (min, max) == 1)
3287 return;
3288
3289 set_value_range (vr, VR_RANGE, min, max, vr->equiv);
3290 }
3291 else if (vr->type == VR_RANGE)
3292 {
3293 min = vr->min;
3294 max = vr->max;
3295
3296 if (dir == EV_DIR_DECREASES)
3297 {
3298 /* INIT is the maximum value. If INIT is lower than VR->MAX
3299 but no smaller than VR->MIN, set VR->MAX to INIT. */
3300 if (compare_values (init, max) == -1)
3301 {
3302 max = init;
3303
3304 /* If we just created an invalid range with the minimum
3305 greater than the maximum, we fail conservatively.
3306 This should happen only in unreachable
3307 parts of code, or for invalid programs. */
3308 if (compare_values (min, max) == 1)
3309 return;
3310 }
3311
3312 /* According to the loop information, the variable does not
3313 overflow. If we think it does, probably because of an
3314 overflow due to arithmetic on a different INF value,
3315 reset now. */
3316 if (is_negative_overflow_infinity (min))
3317 min = tmin;
3318 }
3319 else
3320 {
3321 /* If INIT is bigger than VR->MIN, set VR->MIN to INIT. */
3322 if (compare_values (init, min) == 1)
3323 {
3324 min = init;
3325
3326 /* Again, avoid creating invalid range by failing. */
3327 if (compare_values (min, max) == 1)
3328 return;
3329 }
3330
3331 if (is_positive_overflow_infinity (max))
3332 max = tmax;
3333 }
3334
3335 set_value_range (vr, VR_RANGE, min, max, vr->equiv);
3336 }
3337 }
3338
3339 /* Return true if VAR may overflow at STMT. This checks any available
3340 loop information to see if we can determine that VAR does not
3341 overflow. */
3342
3343 static bool
3344 vrp_var_may_overflow (tree var, gimple stmt)
3345 {
3346 struct loop *l;
3347 tree chrec, init, step;
3348
3349 if (current_loops == NULL)
3350 return true;
3351
3352 l = loop_containing_stmt (stmt);
3353 if (l == NULL
3354 || !loop_outer (l))
3355 return true;
3356
3357 chrec = instantiate_parameters (l, analyze_scalar_evolution (l, var));
3358 if (TREE_CODE (chrec) != POLYNOMIAL_CHREC)
3359 return true;
3360
3361 init = initial_condition_in_loop_num (chrec, l->num);
3362 step = evolution_part_in_loop_num (chrec, l->num);
3363
3364 if (step == NULL_TREE
3365 || !is_gimple_min_invariant (step)
3366 || !valid_value_p (init))
3367 return true;
3368
3369 /* If we get here, we know something useful about VAR based on the
3370 loop information. If it wraps, it may overflow. */
3371
3372 if (scev_probably_wraps_p (init, step, stmt, get_chrec_loop (chrec),
3373 true))
3374 return true;
3375
3376 if (dump_file && (dump_flags & TDF_DETAILS) != 0)
3377 {
3378 print_generic_expr (dump_file, var, 0);
3379 fprintf (dump_file, ": loop information indicates does not overflow\n");
3380 }
3381
3382 return false;
3383 }
3384
3385
3386 /* Given two numeric value ranges VR0, VR1 and a comparison code COMP:
3387
3388 - Return BOOLEAN_TRUE_NODE if VR0 COMP VR1 always returns true for
3389 all the values in the ranges.
3390
3391 - Return BOOLEAN_FALSE_NODE if the comparison always returns false.
3392
3393 - Return NULL_TREE if it is not always possible to determine the
3394 value of the comparison.
3395
3396 Also set *STRICT_OVERFLOW_P to indicate whether a range with an
3397 overflow infinity was used in the test. */
3398
3399
3400 static tree
3401 compare_ranges (enum tree_code comp, value_range_t *vr0, value_range_t *vr1,
3402 bool *strict_overflow_p)
3403 {
3404 /* VARYING or UNDEFINED ranges cannot be compared. */
3405 if (vr0->type == VR_VARYING
3406 || vr0->type == VR_UNDEFINED
3407 || vr1->type == VR_VARYING
3408 || vr1->type == VR_UNDEFINED)
3409 return NULL_TREE;
3410
3411 /* Anti-ranges need to be handled separately. */
3412 if (vr0->type == VR_ANTI_RANGE || vr1->type == VR_ANTI_RANGE)
3413 {
3414 /* If both are anti-ranges, then we cannot compute any
3415 comparison. */
3416 if (vr0->type == VR_ANTI_RANGE && vr1->type == VR_ANTI_RANGE)
3417 return NULL_TREE;
3418
3419 /* These comparisons are never statically computable. */
3420 if (comp == GT_EXPR
3421 || comp == GE_EXPR
3422 || comp == LT_EXPR
3423 || comp == LE_EXPR)
3424 return NULL_TREE;
3425
3426 /* Equality can be computed only between a range and an
3427 anti-range. ~[VAL1, VAL2] == [VAL1, VAL2] is always false. */
3428 if (vr0->type == VR_RANGE)
3429 {
3430 /* To simplify processing, make VR0 the anti-range. */
3431 value_range_t *tmp = vr0;
3432 vr0 = vr1;
3433 vr1 = tmp;
3434 }
3435
3436 gcc_assert (comp == NE_EXPR || comp == EQ_EXPR);
3437
3438 if (compare_values_warnv (vr0->min, vr1->min, strict_overflow_p) == 0
3439 && compare_values_warnv (vr0->max, vr1->max, strict_overflow_p) == 0)
3440 return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node;
3441
3442 return NULL_TREE;
3443 }
3444
3445 if (!usable_range_p (vr0, strict_overflow_p)
3446 || !usable_range_p (vr1, strict_overflow_p))
3447 return NULL_TREE;
3448
3449 /* Simplify processing. If COMP is GT_EXPR or GE_EXPR, switch the
3450 operands around and change the comparison code. */
3451 if (comp == GT_EXPR || comp == GE_EXPR)
3452 {
3453 value_range_t *tmp;
3454 comp = (comp == GT_EXPR) ? LT_EXPR : LE_EXPR;
3455 tmp = vr0;
3456 vr0 = vr1;
3457 vr1 = tmp;
3458 }
3459
3460 if (comp == EQ_EXPR)
3461 {
3462 /* Equality may only be computed if both ranges represent
3463 exactly one value. */
3464 if (compare_values_warnv (vr0->min, vr0->max, strict_overflow_p) == 0
3465 && compare_values_warnv (vr1->min, vr1->max, strict_overflow_p) == 0)
3466 {
3467 int cmp_min = compare_values_warnv (vr0->min, vr1->min,
3468 strict_overflow_p);
3469 int cmp_max = compare_values_warnv (vr0->max, vr1->max,
3470 strict_overflow_p);
3471 if (cmp_min == 0 && cmp_max == 0)
3472 return boolean_true_node;
3473 else if (cmp_min != -2 && cmp_max != -2)
3474 return boolean_false_node;
3475 }
3476 /* If [V0_MIN, V1_MAX] < [V1_MIN, V1_MAX] then V0 != V1. */
3477 else if (compare_values_warnv (vr0->min, vr1->max,
3478 strict_overflow_p) == 1
3479 || compare_values_warnv (vr1->min, vr0->max,
3480 strict_overflow_p) == 1)
3481 return boolean_false_node;
3482
3483 return NULL_TREE;
3484 }
3485 else if (comp == NE_EXPR)
3486 {
3487 int cmp1, cmp2;
3488
3489 /* If VR0 is completely to the left or completely to the right
3490 of VR1, they are always different. Notice that we need to
3491 make sure that both comparisons yield similar results to
3492 avoid comparing values that cannot be compared at
3493 compile-time. */
3494 cmp1 = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p);
3495 cmp2 = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p);
3496 if ((cmp1 == -1 && cmp2 == -1) || (cmp1 == 1 && cmp2 == 1))
3497 return boolean_true_node;
3498
3499 /* If VR0 and VR1 represent a single value and are identical,
3500 return false. */
3501 else if (compare_values_warnv (vr0->min, vr0->max,
3502 strict_overflow_p) == 0
3503 && compare_values_warnv (vr1->min, vr1->max,
3504 strict_overflow_p) == 0
3505 && compare_values_warnv (vr0->min, vr1->min,
3506 strict_overflow_p) == 0
3507 && compare_values_warnv (vr0->max, vr1->max,
3508 strict_overflow_p) == 0)
3509 return boolean_false_node;
3510
3511 /* Otherwise, they may or may not be different. */
3512 else
3513 return NULL_TREE;
3514 }
3515 else if (comp == LT_EXPR || comp == LE_EXPR)
3516 {
3517 int tst;
3518
3519 /* If VR0 is to the left of VR1, return true. */
3520 tst = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p);
3521 if ((comp == LT_EXPR && tst == -1)
3522 || (comp == LE_EXPR && (tst == -1 || tst == 0)))
3523 {
3524 if (overflow_infinity_range_p (vr0)
3525 || overflow_infinity_range_p (vr1))
3526 *strict_overflow_p = true;
3527 return boolean_true_node;
3528 }
3529
3530 /* If VR0 is to the right of VR1, return false. */
3531 tst = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p);
3532 if ((comp == LT_EXPR && (tst == 0 || tst == 1))
3533 || (comp == LE_EXPR && tst == 1))
3534 {
3535 if (overflow_infinity_range_p (vr0)
3536 || overflow_infinity_range_p (vr1))
3537 *strict_overflow_p = true;
3538 return boolean_false_node;
3539 }
3540
3541 /* Otherwise, we don't know. */
3542 return NULL_TREE;
3543 }
3544
3545 gcc_unreachable ();
3546 }
3547
3548
3549 /* Given a value range VR, a value VAL and a comparison code COMP, return
3550 BOOLEAN_TRUE_NODE if VR COMP VAL always returns true for all the
3551 values in VR. Return BOOLEAN_FALSE_NODE if the comparison
3552 always returns false. Return NULL_TREE if it is not always
3553 possible to determine the value of the comparison. Also set
3554 *STRICT_OVERFLOW_P to indicate whether a range with an overflow
3555 infinity was used in the test. */
3556
3557 static tree
3558 compare_range_with_value (enum tree_code comp, value_range_t *vr, tree val,
3559 bool *strict_overflow_p)
3560 {
3561 if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED)
3562 return NULL_TREE;
3563
3564 /* Anti-ranges need to be handled separately. */
3565 if (vr->type == VR_ANTI_RANGE)
3566 {
3567 /* For anti-ranges, the only predicates that we can compute at
3568 compile time are equality and inequality. */
3569 if (comp == GT_EXPR
3570 || comp == GE_EXPR
3571 || comp == LT_EXPR
3572 || comp == LE_EXPR)
3573 return NULL_TREE;
3574
3575 /* ~[VAL_1, VAL_2] OP VAL is known if VAL_1 <= VAL <= VAL_2. */
3576 if (value_inside_range (val, vr) == 1)
3577 return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node;
3578
3579 return NULL_TREE;
3580 }
3581
3582 if (!usable_range_p (vr, strict_overflow_p))
3583 return NULL_TREE;
3584
3585 if (comp == EQ_EXPR)
3586 {
3587 /* EQ_EXPR may only be computed if VR represents exactly
3588 one value. */
3589 if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0)
3590 {
3591 int cmp = compare_values_warnv (vr->min, val, strict_overflow_p);
3592 if (cmp == 0)
3593 return boolean_true_node;
3594 else if (cmp == -1 || cmp == 1 || cmp == 2)
3595 return boolean_false_node;
3596 }
3597 else if (compare_values_warnv (val, vr->min, strict_overflow_p) == -1
3598 || compare_values_warnv (vr->max, val, strict_overflow_p) == -1)
3599 return boolean_false_node;
3600
3601 return NULL_TREE;
3602 }
3603 else if (comp == NE_EXPR)
3604 {
3605 /* If VAL is not inside VR, then they are always different. */
3606 if (compare_values_warnv (vr->max, val, strict_overflow_p) == -1
3607 || compare_values_warnv (vr->min, val, strict_overflow_p) == 1)
3608 return boolean_true_node;
3609
3610 /* If VR represents exactly one value equal to VAL, then return
3611 false. */
3612 if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0
3613 && compare_values_warnv (vr->min, val, strict_overflow_p) == 0)
3614 return boolean_false_node;
3615
3616 /* Otherwise, they may or may not be different. */
3617 return NULL_TREE;
3618 }
3619 else if (comp == LT_EXPR || comp == LE_EXPR)
3620 {
3621 int tst;
3622
3623 /* If VR is to the left of VAL, return true. */
3624 tst = compare_values_warnv (vr->max, val, strict_overflow_p);
3625 if ((comp == LT_EXPR && tst == -1)
3626 || (comp == LE_EXPR && (tst == -1 || tst == 0)))
3627 {
3628 if (overflow_infinity_range_p (vr))
3629 *strict_overflow_p = true;
3630 return boolean_true_node;
3631 }
3632
3633 /* If VR is to the right of VAL, return false. */
3634 tst = compare_values_warnv (vr->min, val, strict_overflow_p);
3635 if ((comp == LT_EXPR && (tst == 0 || tst == 1))
3636 || (comp == LE_EXPR && tst == 1))
3637 {
3638 if (overflow_infinity_range_p (vr))
3639 *strict_overflow_p = true;
3640 return boolean_false_node;
3641 }
3642
3643 /* Otherwise, we don't know. */
3644 return NULL_TREE;
3645 }
3646 else if (comp == GT_EXPR || comp == GE_EXPR)
3647 {
3648 int tst;
3649
3650 /* If VR is to the right of VAL, return true. */
3651 tst = compare_values_warnv (vr->min, val, strict_overflow_p);
3652 if ((comp == GT_EXPR && tst == 1)
3653 || (comp == GE_EXPR && (tst == 0 || tst == 1)))
3654 {
3655 if (overflow_infinity_range_p (vr))
3656 *strict_overflow_p = true;
3657 return boolean_true_node;
3658 }
3659
3660 /* If VR is to the left of VAL, return false. */
3661 tst = compare_values_warnv (vr->max, val, strict_overflow_p);
3662 if ((comp == GT_EXPR && (tst == -1 || tst == 0))
3663 || (comp == GE_EXPR && tst == -1))
3664 {
3665 if (overflow_infinity_range_p (vr))
3666 *strict_overflow_p = true;
3667 return boolean_false_node;
3668 }
3669
3670 /* Otherwise, we don't know. */
3671 return NULL_TREE;
3672 }
3673
3674 gcc_unreachable ();
3675 }
3676
3677
3678 /* Debugging dumps. */
3679
3680 void dump_value_range (FILE *, value_range_t *);
3681 void debug_value_range (value_range_t *);
3682 void dump_all_value_ranges (FILE *);
3683 void debug_all_value_ranges (void);
3684 void dump_vr_equiv (FILE *, bitmap);
3685 void debug_vr_equiv (bitmap);
3686
3687
3688 /* Dump value range VR to FILE. */
3689
3690 void
3691 dump_value_range (FILE *file, value_range_t *vr)
3692 {
3693 if (vr == NULL)
3694 fprintf (file, "[]");
3695 else if (vr->type == VR_UNDEFINED)
3696 fprintf (file, "UNDEFINED");
3697 else if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
3698 {
3699 tree type = TREE_TYPE (vr->min);
3700
3701 fprintf (file, "%s[", (vr->type == VR_ANTI_RANGE) ? "~" : "");
3702
3703 if (is_negative_overflow_infinity (vr->min))
3704 fprintf (file, "-INF(OVF)");
3705 else if (INTEGRAL_TYPE_P (type)
3706 && !TYPE_UNSIGNED (type)
3707 && vrp_val_is_min (vr->min))
3708 fprintf (file, "-INF");
3709 else
3710 print_generic_expr (file, vr->min, 0);
3711
3712 fprintf (file, ", ");
3713
3714 if (is_positive_overflow_infinity (vr->max))
3715 fprintf (file, "+INF(OVF)");
3716 else if (INTEGRAL_TYPE_P (type)
3717 && vrp_val_is_max (vr->max))
3718 fprintf (file, "+INF");
3719 else
3720 print_generic_expr (file, vr->max, 0);
3721
3722 fprintf (file, "]");
3723
3724 if (vr->equiv)
3725 {
3726 bitmap_iterator bi;
3727 unsigned i, c = 0;
3728
3729 fprintf (file, " EQUIVALENCES: { ");
3730
3731 EXECUTE_IF_SET_IN_BITMAP (vr->equiv, 0, i, bi)
3732 {
3733 print_generic_expr (file, ssa_name (i), 0);
3734 fprintf (file, " ");
3735 c++;
3736 }
3737
3738 fprintf (file, "} (%u elements)", c);
3739 }
3740 }
3741 else if (vr->type == VR_VARYING)
3742 fprintf (file, "VARYING");
3743 else
3744 fprintf (file, "INVALID RANGE");
3745 }
3746
3747
3748 /* Dump value range VR to stderr. */
3749
3750 void
3751 debug_value_range (value_range_t *vr)
3752 {
3753 dump_value_range (stderr, vr);
3754 fprintf (stderr, "\n");
3755 }
3756
3757
3758 /* Dump value ranges of all SSA_NAMEs to FILE. */
3759
3760 void
3761 dump_all_value_ranges (FILE *file)
3762 {
3763 size_t i;
3764
3765 for (i = 0; i < num_ssa_names; i++)
3766 {
3767 if (vr_value[i])
3768 {
3769 print_generic_expr (file, ssa_name (i), 0);
3770 fprintf (file, ": ");
3771 dump_value_range (file, vr_value[i]);
3772 fprintf (file, "\n");
3773 }
3774 }
3775
3776 fprintf (file, "\n");
3777 }
3778
3779
3780 /* Dump all value ranges to stderr. */
3781
3782 void
3783 debug_all_value_ranges (void)
3784 {
3785 dump_all_value_ranges (stderr);
3786 }
3787
3788
3789 /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
3790 create a new SSA name N and return the assertion assignment
3791 'V = ASSERT_EXPR <V, V OP W>'. */
3792
3793 static gimple
3794 build_assert_expr_for (tree cond, tree v)
3795 {
3796 tree n;
3797 gimple assertion;
3798
3799 gcc_assert (TREE_CODE (v) == SSA_NAME);
3800 n = duplicate_ssa_name (v, NULL);
3801
3802 if (COMPARISON_CLASS_P (cond))
3803 {
3804 tree a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond);
3805 assertion = gimple_build_assign (n, a);
3806 }
3807 else if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
3808 {
3809 /* Given !V, build the assignment N = false. */
3810 tree op0 = TREE_OPERAND (cond, 0);
3811 gcc_assert (op0 == v);
3812 assertion = gimple_build_assign (n, boolean_false_node);
3813 }
3814 else if (TREE_CODE (cond) == SSA_NAME)
3815 {
3816 /* Given V, build the assignment N = true. */
3817 gcc_assert (v == cond);
3818 assertion = gimple_build_assign (n, boolean_true_node);
3819 }
3820 else
3821 gcc_unreachable ();
3822
3823 SSA_NAME_DEF_STMT (n) = assertion;
3824
3825 /* The new ASSERT_EXPR, creates a new SSA name that replaces the
3826 operand of the ASSERT_EXPR. Register the new name and the old one
3827 in the replacement table so that we can fix the SSA web after
3828 adding all the ASSERT_EXPRs. */
3829 register_new_name_mapping (n, v);
3830
3831 return assertion;
3832 }
3833
3834
3835 /* Return false if EXPR is a predicate expression involving floating
3836 point values. */
3837
3838 static inline bool
3839 fp_predicate (gimple stmt)
3840 {
3841 GIMPLE_CHECK (stmt, GIMPLE_COND);
3842
3843 return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)));
3844 }
3845
3846
3847 /* If the range of values taken by OP can be inferred after STMT executes,
3848 return the comparison code (COMP_CODE_P) and value (VAL_P) that
3849 describes the inferred range. Return true if a range could be
3850 inferred. */
3851
3852 static bool
3853 infer_value_range (gimple stmt, tree op, enum tree_code *comp_code_p, tree *val_p)
3854 {
3855 *val_p = NULL_TREE;
3856 *comp_code_p = ERROR_MARK;
3857
3858 /* Do not attempt to infer anything in names that flow through
3859 abnormal edges. */
3860 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op))
3861 return false;
3862
3863 /* Similarly, don't infer anything from statements that may throw
3864 exceptions. */
3865 if (stmt_could_throw_p (stmt))
3866 return false;
3867
3868 /* If STMT is the last statement of a basic block with no
3869 successors, there is no point inferring anything about any of its
3870 operands. We would not be able to find a proper insertion point
3871 for the assertion, anyway. */
3872 if (stmt_ends_bb_p (stmt) && EDGE_COUNT (gimple_bb (stmt)->succs) == 0)
3873 return false;
3874
3875 /* We can only assume that a pointer dereference will yield
3876 non-NULL if -fdelete-null-pointer-checks is enabled. */
3877 if (flag_delete_null_pointer_checks
3878 && POINTER_TYPE_P (TREE_TYPE (op))
3879 && gimple_code (stmt) != GIMPLE_ASM)
3880 {
3881 unsigned num_uses, num_loads, num_stores;
3882
3883 count_uses_and_derefs (op, stmt, &num_uses, &num_loads, &num_stores);
3884 if (num_loads + num_stores > 0)
3885 {
3886 *val_p = build_int_cst (TREE_TYPE (op), 0);
3887 *comp_code_p = NE_EXPR;
3888 return true;
3889 }
3890 }
3891
3892 return false;
3893 }
3894
3895
3896 void dump_asserts_for (FILE *, tree);
3897 void debug_asserts_for (tree);
3898 void dump_all_asserts (FILE *);
3899 void debug_all_asserts (void);
3900
3901 /* Dump all the registered assertions for NAME to FILE. */
3902
3903 void
3904 dump_asserts_for (FILE *file, tree name)
3905 {
3906 assert_locus_t loc;
3907
3908 fprintf (file, "Assertions to be inserted for ");
3909 print_generic_expr (file, name, 0);
3910 fprintf (file, "\n");
3911
3912 loc = asserts_for[SSA_NAME_VERSION (name)];
3913 while (loc)
3914 {
3915 fprintf (file, "\t");
3916 print_gimple_stmt (file, gsi_stmt (loc->si), 0, 0);
3917 fprintf (file, "\n\tBB #%d", loc->bb->index);
3918 if (loc->e)
3919 {
3920 fprintf (file, "\n\tEDGE %d->%d", loc->e->src->index,
3921 loc->e->dest->index);
3922 dump_edge_info (file, loc->e, 0);
3923 }
3924 fprintf (file, "\n\tPREDICATE: ");
3925 print_generic_expr (file, name, 0);
3926 fprintf (file, " %s ", tree_code_name[(int)loc->comp_code]);
3927 print_generic_expr (file, loc->val, 0);
3928 fprintf (file, "\n\n");
3929 loc = loc->next;
3930 }
3931
3932 fprintf (file, "\n");
3933 }
3934
3935
3936 /* Dump all the registered assertions for NAME to stderr. */
3937
3938 void
3939 debug_asserts_for (tree name)
3940 {
3941 dump_asserts_for (stderr, name);
3942 }
3943
3944
3945 /* Dump all the registered assertions for all the names to FILE. */
3946
3947 void
3948 dump_all_asserts (FILE *file)
3949 {
3950 unsigned i;
3951 bitmap_iterator bi;
3952
3953 fprintf (file, "\nASSERT_EXPRs to be inserted\n\n");
3954 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
3955 dump_asserts_for (file, ssa_name (i));
3956 fprintf (file, "\n");
3957 }
3958
3959
3960 /* Dump all the registered assertions for all the names to stderr. */
3961
3962 void
3963 debug_all_asserts (void)
3964 {
3965 dump_all_asserts (stderr);
3966 }
3967
3968
3969 /* If NAME doesn't have an ASSERT_EXPR registered for asserting
3970 'EXPR COMP_CODE VAL' at a location that dominates block BB or
3971 E->DEST, then register this location as a possible insertion point
3972 for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
3973
3974 BB, E and SI provide the exact insertion point for the new
3975 ASSERT_EXPR. If BB is NULL, then the ASSERT_EXPR is to be inserted
3976 on edge E. Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
3977 BB. If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
3978 must not be NULL. */
3979
3980 static void
3981 register_new_assert_for (tree name, tree expr,
3982 enum tree_code comp_code,
3983 tree val,
3984 basic_block bb,
3985 edge e,
3986 gimple_stmt_iterator si)
3987 {
3988 assert_locus_t n, loc, last_loc;
3989 basic_block dest_bb;
3990
3991 #if defined ENABLE_CHECKING
3992 gcc_assert (bb == NULL || e == NULL);
3993
3994 if (e == NULL)
3995 gcc_assert (gimple_code (gsi_stmt (si)) != GIMPLE_COND
3996 && gimple_code (gsi_stmt (si)) != GIMPLE_SWITCH);
3997 #endif
3998
3999 /* Never build an assert comparing against an integer constant with
4000 TREE_OVERFLOW set. This confuses our undefined overflow warning
4001 machinery. */
4002 if (TREE_CODE (val) == INTEGER_CST
4003 && TREE_OVERFLOW (val))
4004 val = build_int_cst_wide (TREE_TYPE (val),
4005 TREE_INT_CST_LOW (val), TREE_INT_CST_HIGH (val));
4006
4007 /* The new assertion A will be inserted at BB or E. We need to
4008 determine if the new location is dominated by a previously
4009 registered location for A. If we are doing an edge insertion,
4010 assume that A will be inserted at E->DEST. Note that this is not
4011 necessarily true.
4012
4013 If E is a critical edge, it will be split. But even if E is
4014 split, the new block will dominate the same set of blocks that
4015 E->DEST dominates.
4016
4017 The reverse, however, is not true, blocks dominated by E->DEST
4018 will not be dominated by the new block created to split E. So,
4019 if the insertion location is on a critical edge, we will not use
4020 the new location to move another assertion previously registered
4021 at a block dominated by E->DEST. */
4022 dest_bb = (bb) ? bb : e->dest;
4023
4024 /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and
4025 VAL at a block dominating DEST_BB, then we don't need to insert a new
4026 one. Similarly, if the same assertion already exists at a block
4027 dominated by DEST_BB and the new location is not on a critical
4028 edge, then update the existing location for the assertion (i.e.,
4029 move the assertion up in the dominance tree).
4030
4031 Note, this is implemented as a simple linked list because there
4032 should not be more than a handful of assertions registered per
4033 name. If this becomes a performance problem, a table hashed by
4034 COMP_CODE and VAL could be implemented. */
4035 loc = asserts_for[SSA_NAME_VERSION (name)];
4036 last_loc = loc;
4037 while (loc)
4038 {
4039 if (loc->comp_code == comp_code
4040 && (loc->val == val
4041 || operand_equal_p (loc->val, val, 0))
4042 && (loc->expr == expr
4043 || operand_equal_p (loc->expr, expr, 0)))
4044 {
4045 /* If the assertion NAME COMP_CODE VAL has already been
4046 registered at a basic block that dominates DEST_BB, then
4047 we don't need to insert the same assertion again. Note
4048 that we don't check strict dominance here to avoid
4049 replicating the same assertion inside the same basic
4050 block more than once (e.g., when a pointer is
4051 dereferenced several times inside a block).
4052
4053 An exception to this rule are edge insertions. If the
4054 new assertion is to be inserted on edge E, then it will
4055 dominate all the other insertions that we may want to
4056 insert in DEST_BB. So, if we are doing an edge
4057 insertion, don't do this dominance check. */
4058 if (e == NULL
4059 && dominated_by_p (CDI_DOMINATORS, dest_bb, loc->bb))
4060 return;
4061
4062 /* Otherwise, if E is not a critical edge and DEST_BB
4063 dominates the existing location for the assertion, move
4064 the assertion up in the dominance tree by updating its
4065 location information. */
4066 if ((e == NULL || !EDGE_CRITICAL_P (e))
4067 && dominated_by_p (CDI_DOMINATORS, loc->bb, dest_bb))
4068 {
4069 loc->bb = dest_bb;
4070 loc->e = e;
4071 loc->si = si;
4072 return;
4073 }
4074 }
4075
4076 /* Update the last node of the list and move to the next one. */
4077 last_loc = loc;
4078 loc = loc->next;
4079 }
4080
4081 /* If we didn't find an assertion already registered for
4082 NAME COMP_CODE VAL, add a new one at the end of the list of
4083 assertions associated with NAME. */
4084 n = XNEW (struct assert_locus_d);
4085 n->bb = dest_bb;
4086 n->e = e;
4087 n->si = si;
4088 n->comp_code = comp_code;
4089 n->val = val;
4090 n->expr = expr;
4091 n->next = NULL;
4092
4093 if (last_loc)
4094 last_loc->next = n;
4095 else
4096 asserts_for[SSA_NAME_VERSION (name)] = n;
4097
4098 bitmap_set_bit (need_assert_for, SSA_NAME_VERSION (name));
4099 }
4100
4101 /* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME.
4102 Extract a suitable test code and value and store them into *CODE_P and
4103 *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P.
4104
4105 If no extraction was possible, return FALSE, otherwise return TRUE.
4106
4107 If INVERT is true, then we invert the result stored into *CODE_P. */
4108
4109 static bool
4110 extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code,
4111 tree cond_op0, tree cond_op1,
4112 bool invert, enum tree_code *code_p,
4113 tree *val_p)
4114 {
4115 enum tree_code comp_code;
4116 tree val;
4117
4118 /* Otherwise, we have a comparison of the form NAME COMP VAL
4119 or VAL COMP NAME. */
4120 if (name == cond_op1)
4121 {
4122 /* If the predicate is of the form VAL COMP NAME, flip
4123 COMP around because we need to register NAME as the
4124 first operand in the predicate. */
4125 comp_code = swap_tree_comparison (cond_code);
4126 val = cond_op0;
4127 }
4128 else
4129 {
4130 /* The comparison is of the form NAME COMP VAL, so the
4131 comparison code remains unchanged. */
4132 comp_code = cond_code;
4133 val = cond_op1;
4134 }
4135
4136 /* Invert the comparison code as necessary. */
4137 if (invert)
4138 comp_code = invert_tree_comparison (comp_code, 0);
4139
4140 /* VRP does not handle float types. */
4141 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (val)))
4142 return false;
4143
4144 /* Do not register always-false predicates.
4145 FIXME: this works around a limitation in fold() when dealing with
4146 enumerations. Given 'enum { N1, N2 } x;', fold will not
4147 fold 'if (x > N2)' to 'if (0)'. */
4148 if ((comp_code == GT_EXPR || comp_code == LT_EXPR)
4149 && INTEGRAL_TYPE_P (TREE_TYPE (val)))
4150 {
4151 tree min = TYPE_MIN_VALUE (TREE_TYPE (val));
4152 tree max = TYPE_MAX_VALUE (TREE_TYPE (val));
4153
4154 if (comp_code == GT_EXPR
4155 && (!max
4156 || compare_values (val, max) == 0))
4157 return false;
4158
4159 if (comp_code == LT_EXPR
4160 && (!min
4161 || compare_values (val, min) == 0))
4162 return false;
4163 }
4164 *code_p = comp_code;
4165 *val_p = val;
4166 return true;
4167 }
4168
4169 /* Try to register an edge assertion for SSA name NAME on edge E for
4170 the condition COND contributing to the conditional jump pointed to by BSI.
4171 Invert the condition COND if INVERT is true.
4172 Return true if an assertion for NAME could be registered. */
4173
4174 static bool
4175 register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi,
4176 enum tree_code cond_code,
4177 tree cond_op0, tree cond_op1, bool invert)
4178 {
4179 tree val;
4180 enum tree_code comp_code;
4181 bool retval = false;
4182
4183 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
4184 cond_op0,
4185 cond_op1,
4186 invert, &comp_code, &val))
4187 return false;
4188
4189 /* Only register an ASSERT_EXPR if NAME was found in the sub-graph
4190 reachable from E. */
4191 if (live_on_edge (e, name)
4192 && !has_single_use (name))
4193 {
4194 register_new_assert_for (name, name, comp_code, val, NULL, e, bsi);
4195 retval = true;
4196 }
4197
4198 /* In the case of NAME <= CST and NAME being defined as
4199 NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2
4200 and NAME2 <= CST - CST2. We can do the same for NAME > CST.
4201 This catches range and anti-range tests. */
4202 if ((comp_code == LE_EXPR
4203 || comp_code == GT_EXPR)
4204 && TREE_CODE (val) == INTEGER_CST
4205 && TYPE_UNSIGNED (TREE_TYPE (val)))
4206 {
4207 gimple def_stmt = SSA_NAME_DEF_STMT (name);
4208 tree cst2 = NULL_TREE, name2 = NULL_TREE, name3 = NULL_TREE;
4209
4210 /* Extract CST2 from the (optional) addition. */
4211 if (is_gimple_assign (def_stmt)
4212 && gimple_assign_rhs_code (def_stmt) == PLUS_EXPR)
4213 {
4214 name2 = gimple_assign_rhs1 (def_stmt);
4215 cst2 = gimple_assign_rhs2 (def_stmt);
4216 if (TREE_CODE (name2) == SSA_NAME
4217 && TREE_CODE (cst2) == INTEGER_CST)
4218 def_stmt = SSA_NAME_DEF_STMT (name2);
4219 }
4220
4221 /* Extract NAME2 from the (optional) sign-changing cast. */
4222 if (gimple_assign_cast_p (def_stmt))
4223 {
4224 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt))
4225 && ! TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
4226 && (TYPE_PRECISION (gimple_expr_type (def_stmt))
4227 == TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))))
4228 name3 = gimple_assign_rhs1 (def_stmt);
4229 }
4230
4231 /* If name3 is used later, create an ASSERT_EXPR for it. */
4232 if (name3 != NULL_TREE
4233 && TREE_CODE (name3) == SSA_NAME
4234 && (cst2 == NULL_TREE
4235 || TREE_CODE (cst2) == INTEGER_CST)
4236 && INTEGRAL_TYPE_P (TREE_TYPE (name3))
4237 && live_on_edge (e, name3)
4238 && !has_single_use (name3))
4239 {
4240 tree tmp;
4241
4242 /* Build an expression for the range test. */
4243 tmp = build1 (NOP_EXPR, TREE_TYPE (name), name3);
4244 if (cst2 != NULL_TREE)
4245 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
4246
4247 if (dump_file)
4248 {
4249 fprintf (dump_file, "Adding assert for ");
4250 print_generic_expr (dump_file, name3, 0);
4251 fprintf (dump_file, " from ");
4252 print_generic_expr (dump_file, tmp, 0);
4253 fprintf (dump_file, "\n");
4254 }
4255
4256 register_new_assert_for (name3, tmp, comp_code, val, NULL, e, bsi);
4257
4258 retval = true;
4259 }
4260
4261 /* If name2 is used later, create an ASSERT_EXPR for it. */
4262 if (name2 != NULL_TREE
4263 && TREE_CODE (name2) == SSA_NAME
4264 && TREE_CODE (cst2) == INTEGER_CST
4265 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
4266 && live_on_edge (e, name2)
4267 && !has_single_use (name2))
4268 {
4269 tree tmp;
4270
4271 /* Build an expression for the range test. */
4272 tmp = name2;
4273 if (TREE_TYPE (name) != TREE_TYPE (name2))
4274 tmp = build1 (NOP_EXPR, TREE_TYPE (name), tmp);
4275 if (cst2 != NULL_TREE)
4276 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
4277
4278 if (dump_file)
4279 {
4280 fprintf (dump_file, "Adding assert for ");
4281 print_generic_expr (dump_file, name2, 0);
4282 fprintf (dump_file, " from ");
4283 print_generic_expr (dump_file, tmp, 0);
4284 fprintf (dump_file, "\n");
4285 }
4286
4287 register_new_assert_for (name2, tmp, comp_code, val, NULL, e, bsi);
4288
4289 retval = true;
4290 }
4291 }
4292
4293 return retval;
4294 }
4295
4296 /* OP is an operand of a truth value expression which is known to have
4297 a particular value. Register any asserts for OP and for any
4298 operands in OP's defining statement.
4299
4300 If CODE is EQ_EXPR, then we want to register OP is zero (false),
4301 if CODE is NE_EXPR, then we want to register OP is nonzero (true). */
4302
4303 static bool
4304 register_edge_assert_for_1 (tree op, enum tree_code code,
4305 edge e, gimple_stmt_iterator bsi)
4306 {
4307 bool retval = false;
4308 gimple op_def;
4309 tree val;
4310 enum tree_code rhs_code;
4311
4312 /* We only care about SSA_NAMEs. */
4313 if (TREE_CODE (op) != SSA_NAME)
4314 return false;
4315
4316 /* We know that OP will have a zero or nonzero value. If OP is used
4317 more than once go ahead and register an assert for OP.
4318
4319 The FOUND_IN_SUBGRAPH support is not helpful in this situation as
4320 it will always be set for OP (because OP is used in a COND_EXPR in
4321 the subgraph). */
4322 if (!has_single_use (op))
4323 {
4324 val = build_int_cst (TREE_TYPE (op), 0);
4325 register_new_assert_for (op, op, code, val, NULL, e, bsi);
4326 retval = true;
4327 }
4328
4329 /* Now look at how OP is set. If it's set from a comparison,
4330 a truth operation or some bit operations, then we may be able
4331 to register information about the operands of that assignment. */
4332 op_def = SSA_NAME_DEF_STMT (op);
4333 if (gimple_code (op_def) != GIMPLE_ASSIGN)
4334 return retval;
4335
4336 rhs_code = gimple_assign_rhs_code (op_def);
4337
4338 if (TREE_CODE_CLASS (rhs_code) == tcc_comparison)
4339 {
4340 bool invert = (code == EQ_EXPR ? true : false);
4341 tree op0 = gimple_assign_rhs1 (op_def);
4342 tree op1 = gimple_assign_rhs2 (op_def);
4343
4344 if (TREE_CODE (op0) == SSA_NAME)
4345 retval |= register_edge_assert_for_2 (op0, e, bsi, rhs_code, op0, op1,
4346 invert);
4347 if (TREE_CODE (op1) == SSA_NAME)
4348 retval |= register_edge_assert_for_2 (op1, e, bsi, rhs_code, op0, op1,
4349 invert);
4350 }
4351 else if ((code == NE_EXPR
4352 && (gimple_assign_rhs_code (op_def) == TRUTH_AND_EXPR
4353 || gimple_assign_rhs_code (op_def) == BIT_AND_EXPR))
4354 || (code == EQ_EXPR
4355 && (gimple_assign_rhs_code (op_def) == TRUTH_OR_EXPR
4356 || gimple_assign_rhs_code (op_def) == BIT_IOR_EXPR)))
4357 {
4358 /* Recurse on each operand. */
4359 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def),
4360 code, e, bsi);
4361 retval |= register_edge_assert_for_1 (gimple_assign_rhs2 (op_def),
4362 code, e, bsi);
4363 }
4364 else if (gimple_assign_rhs_code (op_def) == TRUTH_NOT_EXPR)
4365 {
4366 /* Recurse, flipping CODE. */
4367 code = invert_tree_comparison (code, false);
4368 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def),
4369 code, e, bsi);
4370 }
4371 else if (gimple_assign_rhs_code (op_def) == SSA_NAME)
4372 {
4373 /* Recurse through the copy. */
4374 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def),
4375 code, e, bsi);
4376 }
4377 else if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (op_def)))
4378 {
4379 /* Recurse through the type conversion. */
4380 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def),
4381 code, e, bsi);
4382 }
4383
4384 return retval;
4385 }
4386
4387 /* Try to register an edge assertion for SSA name NAME on edge E for
4388 the condition COND contributing to the conditional jump pointed to by SI.
4389 Return true if an assertion for NAME could be registered. */
4390
4391 static bool
4392 register_edge_assert_for (tree name, edge e, gimple_stmt_iterator si,
4393 enum tree_code cond_code, tree cond_op0,
4394 tree cond_op1)
4395 {
4396 tree val;
4397 enum tree_code comp_code;
4398 bool retval = false;
4399 bool is_else_edge = (e->flags & EDGE_FALSE_VALUE) != 0;
4400
4401 /* Do not attempt to infer anything in names that flow through
4402 abnormal edges. */
4403 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
4404 return false;
4405
4406 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
4407 cond_op0, cond_op1,
4408 is_else_edge,
4409 &comp_code, &val))
4410 return false;
4411
4412 /* Register ASSERT_EXPRs for name. */
4413 retval |= register_edge_assert_for_2 (name, e, si, cond_code, cond_op0,
4414 cond_op1, is_else_edge);
4415
4416
4417 /* If COND is effectively an equality test of an SSA_NAME against
4418 the value zero or one, then we may be able to assert values
4419 for SSA_NAMEs which flow into COND. */
4420
4421 /* In the case of NAME == 1 or NAME != 0, for TRUTH_AND_EXPR defining
4422 statement of NAME we can assert both operands of the TRUTH_AND_EXPR
4423 have nonzero value. */
4424 if (((comp_code == EQ_EXPR && integer_onep (val))
4425 || (comp_code == NE_EXPR && integer_zerop (val))))
4426 {
4427 gimple def_stmt = SSA_NAME_DEF_STMT (name);
4428
4429 if (is_gimple_assign (def_stmt)
4430 && (gimple_assign_rhs_code (def_stmt) == TRUTH_AND_EXPR
4431 || gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR))
4432 {
4433 tree op0 = gimple_assign_rhs1 (def_stmt);
4434 tree op1 = gimple_assign_rhs2 (def_stmt);
4435 retval |= register_edge_assert_for_1 (op0, NE_EXPR, e, si);
4436 retval |= register_edge_assert_for_1 (op1, NE_EXPR, e, si);
4437 }
4438 }
4439
4440 /* In the case of NAME == 0 or NAME != 1, for TRUTH_OR_EXPR defining
4441 statement of NAME we can assert both operands of the TRUTH_OR_EXPR
4442 have zero value. */
4443 if (((comp_code == EQ_EXPR && integer_zerop (val))
4444 || (comp_code == NE_EXPR && integer_onep (val))))
4445 {
4446 gimple def_stmt = SSA_NAME_DEF_STMT (name);
4447
4448 if (is_gimple_assign (def_stmt)
4449 && (gimple_assign_rhs_code (def_stmt) == TRUTH_OR_EXPR
4450 /* For BIT_IOR_EXPR only if NAME == 0 both operands have
4451 necessarily zero value. */
4452 || (comp_code == EQ_EXPR
4453 && (gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR))))
4454 {
4455 tree op0 = gimple_assign_rhs1 (def_stmt);
4456 tree op1 = gimple_assign_rhs2 (def_stmt);
4457 retval |= register_edge_assert_for_1 (op0, EQ_EXPR, e, si);
4458 retval |= register_edge_assert_for_1 (op1, EQ_EXPR, e, si);
4459 }
4460 }
4461
4462 return retval;
4463 }
4464
4465
4466 /* Determine whether the outgoing edges of BB should receive an
4467 ASSERT_EXPR for each of the operands of BB's LAST statement.
4468 The last statement of BB must be a COND_EXPR.
4469
4470 If any of the sub-graphs rooted at BB have an interesting use of
4471 the predicate operands, an assert location node is added to the
4472 list of assertions for the corresponding operands. */
4473
4474 static bool
4475 find_conditional_asserts (basic_block bb, gimple last)
4476 {
4477 bool need_assert;
4478 gimple_stmt_iterator bsi;
4479 tree op;
4480 edge_iterator ei;
4481 edge e;
4482 ssa_op_iter iter;
4483
4484 need_assert = false;
4485 bsi = gsi_for_stmt (last);
4486
4487 /* Look for uses of the operands in each of the sub-graphs
4488 rooted at BB. We need to check each of the outgoing edges
4489 separately, so that we know what kind of ASSERT_EXPR to
4490 insert. */
4491 FOR_EACH_EDGE (e, ei, bb->succs)
4492 {
4493 if (e->dest == bb)
4494 continue;
4495
4496 /* Register the necessary assertions for each operand in the
4497 conditional predicate. */
4498 FOR_EACH_SSA_TREE_OPERAND (op, last, iter, SSA_OP_USE)
4499 {
4500 need_assert |= register_edge_assert_for (op, e, bsi,
4501 gimple_cond_code (last),
4502 gimple_cond_lhs (last),
4503 gimple_cond_rhs (last));
4504 }
4505 }
4506
4507 return need_assert;
4508 }
4509
4510 /* Compare two case labels sorting first by the destination label uid
4511 and then by the case value. */
4512
4513 static int
4514 compare_case_labels (const void *p1, const void *p2)
4515 {
4516 const_tree const case1 = *(const_tree const*)p1;
4517 const_tree const case2 = *(const_tree const*)p2;
4518 unsigned int uid1 = DECL_UID (CASE_LABEL (case1));
4519 unsigned int uid2 = DECL_UID (CASE_LABEL (case2));
4520
4521 if (uid1 < uid2)
4522 return -1;
4523 else if (uid1 == uid2)
4524 {
4525 /* Make sure the default label is first in a group. */
4526 if (!CASE_LOW (case1))
4527 return -1;
4528 else if (!CASE_LOW (case2))
4529 return 1;
4530 else
4531 return tree_int_cst_compare (CASE_LOW (case1), CASE_LOW (case2));
4532 }
4533 else
4534 return 1;
4535 }
4536
4537 /* Determine whether the outgoing edges of BB should receive an
4538 ASSERT_EXPR for each of the operands of BB's LAST statement.
4539 The last statement of BB must be a SWITCH_EXPR.
4540
4541 If any of the sub-graphs rooted at BB have an interesting use of
4542 the predicate operands, an assert location node is added to the
4543 list of assertions for the corresponding operands. */
4544
4545 static bool
4546 find_switch_asserts (basic_block bb, gimple last)
4547 {
4548 bool need_assert;
4549 gimple_stmt_iterator bsi;
4550 tree op;
4551 edge e;
4552 tree vec2;
4553 size_t n = gimple_switch_num_labels(last);
4554 #if GCC_VERSION >= 4000
4555 unsigned int idx;
4556 #else
4557 /* Work around GCC 3.4 bug (PR 37086). */
4558 volatile unsigned int idx;
4559 #endif
4560
4561 need_assert = false;
4562 bsi = gsi_for_stmt (last);
4563 op = gimple_switch_index (last);
4564 if (TREE_CODE (op) != SSA_NAME)
4565 return false;
4566
4567 /* Build a vector of case labels sorted by destination label. */
4568 vec2 = make_tree_vec (n);
4569 for (idx = 0; idx < n; ++idx)
4570 TREE_VEC_ELT (vec2, idx) = gimple_switch_label (last, idx);
4571 qsort (&TREE_VEC_ELT (vec2, 0), n, sizeof (tree), compare_case_labels);
4572
4573 for (idx = 0; idx < n; ++idx)
4574 {
4575 tree min, max;
4576 tree cl = TREE_VEC_ELT (vec2, idx);
4577
4578 min = CASE_LOW (cl);
4579 max = CASE_HIGH (cl);
4580
4581 /* If there are multiple case labels with the same destination
4582 we need to combine them to a single value range for the edge. */
4583 if (idx + 1 < n
4584 && CASE_LABEL (cl) == CASE_LABEL (TREE_VEC_ELT (vec2, idx + 1)))
4585 {
4586 /* Skip labels until the last of the group. */
4587 do {
4588 ++idx;
4589 } while (idx < n
4590 && CASE_LABEL (cl) == CASE_LABEL (TREE_VEC_ELT (vec2, idx)));
4591 --idx;
4592
4593 /* Pick up the maximum of the case label range. */
4594 if (CASE_HIGH (TREE_VEC_ELT (vec2, idx)))
4595 max = CASE_HIGH (TREE_VEC_ELT (vec2, idx));
4596 else
4597 max = CASE_LOW (TREE_VEC_ELT (vec2, idx));
4598 }
4599
4600 /* Nothing to do if the range includes the default label until we
4601 can register anti-ranges. */
4602 if (min == NULL_TREE)
4603 continue;
4604
4605 /* Find the edge to register the assert expr on. */
4606 e = find_edge (bb, label_to_block (CASE_LABEL (cl)));
4607
4608 /* Register the necessary assertions for the operand in the
4609 SWITCH_EXPR. */
4610 need_assert |= register_edge_assert_for (op, e, bsi,
4611 max ? GE_EXPR : EQ_EXPR,
4612 op,
4613 fold_convert (TREE_TYPE (op),
4614 min));
4615 if (max)
4616 {
4617 need_assert |= register_edge_assert_for (op, e, bsi, LE_EXPR,
4618 op,
4619 fold_convert (TREE_TYPE (op),
4620 max));
4621 }
4622 }
4623
4624 return need_assert;
4625 }
4626
4627
4628 /* Traverse all the statements in block BB looking for statements that
4629 may generate useful assertions for the SSA names in their operand.
4630 If a statement produces a useful assertion A for name N_i, then the
4631 list of assertions already generated for N_i is scanned to
4632 determine if A is actually needed.
4633
4634 If N_i already had the assertion A at a location dominating the
4635 current location, then nothing needs to be done. Otherwise, the
4636 new location for A is recorded instead.
4637
4638 1- For every statement S in BB, all the variables used by S are
4639 added to bitmap FOUND_IN_SUBGRAPH.
4640
4641 2- If statement S uses an operand N in a way that exposes a known
4642 value range for N, then if N was not already generated by an
4643 ASSERT_EXPR, create a new assert location for N. For instance,
4644 if N is a pointer and the statement dereferences it, we can
4645 assume that N is not NULL.
4646
4647 3- COND_EXPRs are a special case of #2. We can derive range
4648 information from the predicate but need to insert different
4649 ASSERT_EXPRs for each of the sub-graphs rooted at the
4650 conditional block. If the last statement of BB is a conditional
4651 expression of the form 'X op Y', then
4652
4653 a) Remove X and Y from the set FOUND_IN_SUBGRAPH.
4654
4655 b) If the conditional is the only entry point to the sub-graph
4656 corresponding to the THEN_CLAUSE, recurse into it. On
4657 return, if X and/or Y are marked in FOUND_IN_SUBGRAPH, then
4658 an ASSERT_EXPR is added for the corresponding variable.
4659
4660 c) Repeat step (b) on the ELSE_CLAUSE.
4661
4662 d) Mark X and Y in FOUND_IN_SUBGRAPH.
4663
4664 For instance,
4665
4666 if (a == 9)
4667 b = a;
4668 else
4669 b = c + 1;
4670
4671 In this case, an assertion on the THEN clause is useful to
4672 determine that 'a' is always 9 on that edge. However, an assertion
4673 on the ELSE clause would be unnecessary.
4674
4675 4- If BB does not end in a conditional expression, then we recurse
4676 into BB's dominator children.
4677
4678 At the end of the recursive traversal, every SSA name will have a
4679 list of locations where ASSERT_EXPRs should be added. When a new
4680 location for name N is found, it is registered by calling
4681 register_new_assert_for. That function keeps track of all the
4682 registered assertions to prevent adding unnecessary assertions.
4683 For instance, if a pointer P_4 is dereferenced more than once in a
4684 dominator tree, only the location dominating all the dereference of
4685 P_4 will receive an ASSERT_EXPR.
4686
4687 If this function returns true, then it means that there are names
4688 for which we need to generate ASSERT_EXPRs. Those assertions are
4689 inserted by process_assert_insertions. */
4690
4691 static bool
4692 find_assert_locations_1 (basic_block bb, sbitmap live)
4693 {
4694 gimple_stmt_iterator si;
4695 gimple last;
4696 gimple phi;
4697 bool need_assert;
4698
4699 need_assert = false;
4700 last = last_stmt (bb);
4701
4702 /* If BB's last statement is a conditional statement involving integer
4703 operands, determine if we need to add ASSERT_EXPRs. */
4704 if (last
4705 && gimple_code (last) == GIMPLE_COND
4706 && !fp_predicate (last)
4707 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
4708 need_assert |= find_conditional_asserts (bb, last);
4709
4710 /* If BB's last statement is a switch statement involving integer
4711 operands, determine if we need to add ASSERT_EXPRs. */
4712 if (last
4713 && gimple_code (last) == GIMPLE_SWITCH
4714 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
4715 need_assert |= find_switch_asserts (bb, last);
4716
4717 /* Traverse all the statements in BB marking used names and looking
4718 for statements that may infer assertions for their used operands. */
4719 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
4720 {
4721 gimple stmt;
4722 tree op;
4723 ssa_op_iter i;
4724
4725 stmt = gsi_stmt (si);
4726
4727 if (is_gimple_debug (stmt))
4728 continue;
4729
4730 /* See if we can derive an assertion for any of STMT's operands. */
4731 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
4732 {
4733 tree value;
4734 enum tree_code comp_code;
4735
4736 /* Mark OP in our live bitmap. */
4737 SET_BIT (live, SSA_NAME_VERSION (op));
4738
4739 /* If OP is used in such a way that we can infer a value
4740 range for it, and we don't find a previous assertion for
4741 it, create a new assertion location node for OP. */
4742 if (infer_value_range (stmt, op, &comp_code, &value))
4743 {
4744 /* If we are able to infer a nonzero value range for OP,
4745 then walk backwards through the use-def chain to see if OP
4746 was set via a typecast.
4747
4748 If so, then we can also infer a nonzero value range
4749 for the operand of the NOP_EXPR. */
4750 if (comp_code == NE_EXPR && integer_zerop (value))
4751 {
4752 tree t = op;
4753 gimple def_stmt = SSA_NAME_DEF_STMT (t);
4754
4755 while (is_gimple_assign (def_stmt)
4756 && gimple_assign_rhs_code (def_stmt) == NOP_EXPR
4757 && TREE_CODE
4758 (gimple_assign_rhs1 (def_stmt)) == SSA_NAME
4759 && POINTER_TYPE_P
4760 (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))
4761 {
4762 t = gimple_assign_rhs1 (def_stmt);
4763 def_stmt = SSA_NAME_DEF_STMT (t);
4764
4765 /* Note we want to register the assert for the
4766 operand of the NOP_EXPR after SI, not after the
4767 conversion. */
4768 if (! has_single_use (t))
4769 {
4770 register_new_assert_for (t, t, comp_code, value,
4771 bb, NULL, si);
4772 need_assert = true;
4773 }
4774 }
4775 }
4776
4777 /* If OP is used only once, namely in this STMT, don't
4778 bother creating an ASSERT_EXPR for it. Such an
4779 ASSERT_EXPR would do nothing but increase compile time. */
4780 if (!has_single_use (op))
4781 {
4782 register_new_assert_for (op, op, comp_code, value,
4783 bb, NULL, si);
4784 need_assert = true;
4785 }
4786 }
4787 }
4788 }
4789
4790 /* Traverse all PHI nodes in BB marking used operands. */
4791 for (si = gsi_start_phis (bb); !gsi_end_p(si); gsi_next (&si))
4792 {
4793 use_operand_p arg_p;
4794 ssa_op_iter i;
4795 phi = gsi_stmt (si);
4796
4797 FOR_EACH_PHI_ARG (arg_p, phi, i, SSA_OP_USE)
4798 {
4799 tree arg = USE_FROM_PTR (arg_p);
4800 if (TREE_CODE (arg) == SSA_NAME)
4801 SET_BIT (live, SSA_NAME_VERSION (arg));
4802 }
4803 }
4804
4805 return need_assert;
4806 }
4807
4808 /* Do an RPO walk over the function computing SSA name liveness
4809 on-the-fly and deciding on assert expressions to insert.
4810 Returns true if there are assert expressions to be inserted. */
4811
4812 static bool
4813 find_assert_locations (void)
4814 {
4815 int *rpo = XCNEWVEC (int, last_basic_block + NUM_FIXED_BLOCKS);
4816 int *bb_rpo = XCNEWVEC (int, last_basic_block + NUM_FIXED_BLOCKS);
4817 int *last_rpo = XCNEWVEC (int, last_basic_block + NUM_FIXED_BLOCKS);
4818 int rpo_cnt, i;
4819 bool need_asserts;
4820
4821 live = XCNEWVEC (sbitmap, last_basic_block + NUM_FIXED_BLOCKS);
4822 rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
4823 for (i = 0; i < rpo_cnt; ++i)
4824 bb_rpo[rpo[i]] = i;
4825
4826 need_asserts = false;
4827 for (i = rpo_cnt-1; i >= 0; --i)
4828 {
4829 basic_block bb = BASIC_BLOCK (rpo[i]);
4830 edge e;
4831 edge_iterator ei;
4832
4833 if (!live[rpo[i]])
4834 {
4835 live[rpo[i]] = sbitmap_alloc (num_ssa_names);
4836 sbitmap_zero (live[rpo[i]]);
4837 }
4838
4839 /* Process BB and update the live information with uses in
4840 this block. */
4841 need_asserts |= find_assert_locations_1 (bb, live[rpo[i]]);
4842
4843 /* Merge liveness into the predecessor blocks and free it. */
4844 if (!sbitmap_empty_p (live[rpo[i]]))
4845 {
4846 int pred_rpo = i;
4847 FOR_EACH_EDGE (e, ei, bb->preds)
4848 {
4849 int pred = e->src->index;
4850 if (e->flags & EDGE_DFS_BACK)
4851 continue;
4852
4853 if (!live[pred])
4854 {
4855 live[pred] = sbitmap_alloc (num_ssa_names);
4856 sbitmap_zero (live[pred]);
4857 }
4858 sbitmap_a_or_b (live[pred], live[pred], live[rpo[i]]);
4859
4860 if (bb_rpo[pred] < pred_rpo)
4861 pred_rpo = bb_rpo[pred];
4862 }
4863
4864 /* Record the RPO number of the last visited block that needs
4865 live information from this block. */
4866 last_rpo[rpo[i]] = pred_rpo;
4867 }
4868 else
4869 {
4870 sbitmap_free (live[rpo[i]]);
4871 live[rpo[i]] = NULL;
4872 }
4873
4874 /* We can free all successors live bitmaps if all their
4875 predecessors have been visited already. */
4876 FOR_EACH_EDGE (e, ei, bb->succs)
4877 if (last_rpo[e->dest->index] == i
4878 && live[e->dest->index])
4879 {
4880 sbitmap_free (live[e->dest->index]);
4881 live[e->dest->index] = NULL;
4882 }
4883 }
4884
4885 XDELETEVEC (rpo);
4886 XDELETEVEC (bb_rpo);
4887 XDELETEVEC (last_rpo);
4888 for (i = 0; i < last_basic_block + NUM_FIXED_BLOCKS; ++i)
4889 if (live[i])
4890 sbitmap_free (live[i]);
4891 XDELETEVEC (live);
4892
4893 return need_asserts;
4894 }
4895
4896 /* Create an ASSERT_EXPR for NAME and insert it in the location
4897 indicated by LOC. Return true if we made any edge insertions. */
4898
4899 static bool
4900 process_assert_insertions_for (tree name, assert_locus_t loc)
4901 {
4902 /* Build the comparison expression NAME_i COMP_CODE VAL. */
4903 gimple stmt;
4904 tree cond;
4905 gimple assert_stmt;
4906 edge_iterator ei;
4907 edge e;
4908
4909 /* If we have X <=> X do not insert an assert expr for that. */
4910 if (loc->expr == loc->val)
4911 return false;
4912
4913 cond = build2 (loc->comp_code, boolean_type_node, loc->expr, loc->val);
4914 assert_stmt = build_assert_expr_for (cond, name);
4915 if (loc->e)
4916 {
4917 /* We have been asked to insert the assertion on an edge. This
4918 is used only by COND_EXPR and SWITCH_EXPR assertions. */
4919 #if defined ENABLE_CHECKING
4920 gcc_assert (gimple_code (gsi_stmt (loc->si)) == GIMPLE_COND
4921 || gimple_code (gsi_stmt (loc->si)) == GIMPLE_SWITCH);
4922 #endif
4923
4924 gsi_insert_on_edge (loc->e, assert_stmt);
4925 return true;
4926 }
4927
4928 /* Otherwise, we can insert right after LOC->SI iff the
4929 statement must not be the last statement in the block. */
4930 stmt = gsi_stmt (loc->si);
4931 if (!stmt_ends_bb_p (stmt))
4932 {
4933 gsi_insert_after (&loc->si, assert_stmt, GSI_SAME_STMT);
4934 return false;
4935 }
4936
4937 /* If STMT must be the last statement in BB, we can only insert new
4938 assertions on the non-abnormal edge out of BB. Note that since
4939 STMT is not control flow, there may only be one non-abnormal edge
4940 out of BB. */
4941 FOR_EACH_EDGE (e, ei, loc->bb->succs)
4942 if (!(e->flags & EDGE_ABNORMAL))
4943 {
4944 gsi_insert_on_edge (e, assert_stmt);
4945 return true;
4946 }
4947
4948 gcc_unreachable ();
4949 }
4950
4951
4952 /* Process all the insertions registered for every name N_i registered
4953 in NEED_ASSERT_FOR. The list of assertions to be inserted are
4954 found in ASSERTS_FOR[i]. */
4955
4956 static void
4957 process_assert_insertions (void)
4958 {
4959 unsigned i;
4960 bitmap_iterator bi;
4961 bool update_edges_p = false;
4962 int num_asserts = 0;
4963
4964 if (dump_file && (dump_flags & TDF_DETAILS))
4965 dump_all_asserts (dump_file);
4966
4967 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
4968 {
4969 assert_locus_t loc = asserts_for[i];
4970 gcc_assert (loc);
4971
4972 while (loc)
4973 {
4974 assert_locus_t next = loc->next;
4975 update_edges_p |= process_assert_insertions_for (ssa_name (i), loc);
4976 free (loc);
4977 loc = next;
4978 num_asserts++;
4979 }
4980 }
4981
4982 if (update_edges_p)
4983 gsi_commit_edge_inserts ();
4984
4985 statistics_counter_event (cfun, "Number of ASSERT_EXPR expressions inserted",
4986 num_asserts);
4987 }
4988
4989
4990 /* Traverse the flowgraph looking for conditional jumps to insert range
4991 expressions. These range expressions are meant to provide information
4992 to optimizations that need to reason in terms of value ranges. They
4993 will not be expanded into RTL. For instance, given:
4994
4995 x = ...
4996 y = ...
4997 if (x < y)
4998 y = x - 2;
4999 else
5000 x = y + 3;
5001
5002 this pass will transform the code into:
5003
5004 x = ...
5005 y = ...
5006 if (x < y)
5007 {
5008 x = ASSERT_EXPR <x, x < y>
5009 y = x - 2
5010 }
5011 else
5012 {
5013 y = ASSERT_EXPR <y, x <= y>
5014 x = y + 3
5015 }
5016
5017 The idea is that once copy and constant propagation have run, other
5018 optimizations will be able to determine what ranges of values can 'x'
5019 take in different paths of the code, simply by checking the reaching
5020 definition of 'x'. */
5021
5022 static void
5023 insert_range_assertions (void)
5024 {
5025 need_assert_for = BITMAP_ALLOC (NULL);
5026 asserts_for = XCNEWVEC (assert_locus_t, num_ssa_names);
5027
5028 calculate_dominance_info (CDI_DOMINATORS);
5029
5030 if (find_assert_locations ())
5031 {
5032 process_assert_insertions ();
5033 update_ssa (TODO_update_ssa_no_phi);
5034 }
5035
5036 if (dump_file && (dump_flags & TDF_DETAILS))
5037 {
5038 fprintf (dump_file, "\nSSA form after inserting ASSERT_EXPRs\n");
5039 dump_function_to_file (current_function_decl, dump_file, dump_flags);
5040 }
5041
5042 free (asserts_for);
5043 BITMAP_FREE (need_assert_for);
5044 }
5045
5046 /* Checks one ARRAY_REF in REF, located at LOCUS. Ignores flexible arrays
5047 and "struct" hacks. If VRP can determine that the
5048 array subscript is a constant, check if it is outside valid
5049 range. If the array subscript is a RANGE, warn if it is
5050 non-overlapping with valid range.
5051 IGNORE_OFF_BY_ONE is true if the ARRAY_REF is inside a ADDR_EXPR. */
5052
5053 static void
5054 check_array_ref (location_t location, tree ref, bool ignore_off_by_one)
5055 {
5056 value_range_t* vr = NULL;
5057 tree low_sub, up_sub;
5058 tree low_bound, up_bound, up_bound_p1;
5059 tree base;
5060
5061 if (TREE_NO_WARNING (ref))
5062 return;
5063
5064 low_sub = up_sub = TREE_OPERAND (ref, 1);
5065 up_bound = array_ref_up_bound (ref);
5066
5067 /* Can not check flexible arrays. */
5068 if (!up_bound
5069 || TREE_CODE (up_bound) != INTEGER_CST)
5070 return;
5071
5072 /* Accesses to trailing arrays via pointers may access storage
5073 beyond the types array bounds. */
5074 base = get_base_address (ref);
5075 if (base
5076 && INDIRECT_REF_P (base))
5077 {
5078 tree cref, next = NULL_TREE;
5079
5080 if (TREE_CODE (TREE_OPERAND (ref, 0)) != COMPONENT_REF)
5081 return;
5082
5083 cref = TREE_OPERAND (ref, 0);
5084 if (TREE_CODE (TREE_TYPE (TREE_OPERAND (cref, 0))) == RECORD_TYPE)
5085 for (next = TREE_CHAIN (TREE_OPERAND (cref, 1));
5086 next && TREE_CODE (next) != FIELD_DECL;
5087 next = TREE_CHAIN (next))
5088 ;
5089
5090 /* If this is the last field in a struct type or a field in a
5091 union type do not warn. */
5092 if (!next)
5093 return;
5094 }
5095
5096 low_bound = array_ref_low_bound (ref);
5097 up_bound_p1 = int_const_binop (PLUS_EXPR, up_bound, integer_one_node, 0);
5098
5099 if (TREE_CODE (low_sub) == SSA_NAME)
5100 {
5101 vr = get_value_range (low_sub);
5102 if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
5103 {
5104 low_sub = vr->type == VR_RANGE ? vr->max : vr->min;
5105 up_sub = vr->type == VR_RANGE ? vr->min : vr->max;
5106 }
5107 }
5108
5109 if (vr && vr->type == VR_ANTI_RANGE)
5110 {
5111 if (TREE_CODE (up_sub) == INTEGER_CST
5112 && tree_int_cst_lt (up_bound, up_sub)
5113 && TREE_CODE (low_sub) == INTEGER_CST
5114 && tree_int_cst_lt (low_sub, low_bound))
5115 {
5116 warning_at (location, OPT_Warray_bounds,
5117 "array subscript is outside array bounds");
5118 TREE_NO_WARNING (ref) = 1;
5119 }
5120 }
5121 else if (TREE_CODE (up_sub) == INTEGER_CST
5122 && (ignore_off_by_one
5123 ? (tree_int_cst_lt (up_bound, up_sub)
5124 && !tree_int_cst_equal (up_bound_p1, up_sub))
5125 : (tree_int_cst_lt (up_bound, up_sub)
5126 || tree_int_cst_equal (up_bound_p1, up_sub))))
5127 {
5128 warning_at (location, OPT_Warray_bounds,
5129 "array subscript is above array bounds");
5130 TREE_NO_WARNING (ref) = 1;
5131 }
5132 else if (TREE_CODE (low_sub) == INTEGER_CST
5133 && tree_int_cst_lt (low_sub, low_bound))
5134 {
5135 warning_at (location, OPT_Warray_bounds,
5136 "array subscript is below array bounds");
5137 TREE_NO_WARNING (ref) = 1;
5138 }
5139 }
5140
5141 /* Searches if the expr T, located at LOCATION computes
5142 address of an ARRAY_REF, and call check_array_ref on it. */
5143
5144 static void
5145 search_for_addr_array (tree t, location_t location)
5146 {
5147 while (TREE_CODE (t) == SSA_NAME)
5148 {
5149 gimple g = SSA_NAME_DEF_STMT (t);
5150
5151 if (gimple_code (g) != GIMPLE_ASSIGN)
5152 return;
5153
5154 if (get_gimple_rhs_class (gimple_assign_rhs_code (g))
5155 != GIMPLE_SINGLE_RHS)
5156 return;
5157
5158 t = gimple_assign_rhs1 (g);
5159 }
5160
5161
5162 /* We are only interested in addresses of ARRAY_REF's. */
5163 if (TREE_CODE (t) != ADDR_EXPR)
5164 return;
5165
5166 /* Check each ARRAY_REFs in the reference chain. */
5167 do
5168 {
5169 if (TREE_CODE (t) == ARRAY_REF)
5170 check_array_ref (location, t, true /*ignore_off_by_one*/);
5171
5172 t = TREE_OPERAND (t, 0);
5173 }
5174 while (handled_component_p (t));
5175 }
5176
5177 /* walk_tree() callback that checks if *TP is
5178 an ARRAY_REF inside an ADDR_EXPR (in which an array
5179 subscript one outside the valid range is allowed). Call
5180 check_array_ref for each ARRAY_REF found. The location is
5181 passed in DATA. */
5182
5183 static tree
5184 check_array_bounds (tree *tp, int *walk_subtree, void *data)
5185 {
5186 tree t = *tp;
5187 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
5188 location_t location;
5189
5190 if (EXPR_HAS_LOCATION (t))
5191 location = EXPR_LOCATION (t);
5192 else
5193 {
5194 location_t *locp = (location_t *) wi->info;
5195 location = *locp;
5196 }
5197
5198 *walk_subtree = TRUE;
5199
5200 if (TREE_CODE (t) == ARRAY_REF)
5201 check_array_ref (location, t, false /*ignore_off_by_one*/);
5202
5203 if (TREE_CODE (t) == INDIRECT_REF
5204 || (TREE_CODE (t) == RETURN_EXPR && TREE_OPERAND (t, 0)))
5205 search_for_addr_array (TREE_OPERAND (t, 0), location);
5206
5207 if (TREE_CODE (t) == ADDR_EXPR)
5208 *walk_subtree = FALSE;
5209
5210 return NULL_TREE;
5211 }
5212
5213 /* Walk over all statements of all reachable BBs and call check_array_bounds
5214 on them. */
5215
5216 static void
5217 check_all_array_refs (void)
5218 {
5219 basic_block bb;
5220 gimple_stmt_iterator si;
5221
5222 FOR_EACH_BB (bb)
5223 {
5224 edge_iterator ei;
5225 edge e;
5226 bool executable = false;
5227
5228 /* Skip blocks that were found to be unreachable. */
5229 FOR_EACH_EDGE (e, ei, bb->preds)
5230 executable |= !!(e->flags & EDGE_EXECUTABLE);
5231 if (!executable)
5232 continue;
5233
5234 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
5235 {
5236 gimple stmt = gsi_stmt (si);
5237 struct walk_stmt_info wi;
5238 if (!gimple_has_location (stmt))
5239 continue;
5240
5241 if (is_gimple_call (stmt))
5242 {
5243 size_t i;
5244 size_t n = gimple_call_num_args (stmt);
5245 for (i = 0; i < n; i++)
5246 {
5247 tree arg = gimple_call_arg (stmt, i);
5248 search_for_addr_array (arg, gimple_location (stmt));
5249 }
5250 }
5251 else
5252 {
5253 memset (&wi, 0, sizeof (wi));
5254 wi.info = CONST_CAST (void *, (const void *)
5255 gimple_location_ptr (stmt));
5256
5257 walk_gimple_op (gsi_stmt (si),
5258 check_array_bounds,
5259 &wi);
5260 }
5261 }
5262 }
5263 }
5264
5265 /* Convert range assertion expressions into the implied copies and
5266 copy propagate away the copies. Doing the trivial copy propagation
5267 here avoids the need to run the full copy propagation pass after
5268 VRP.
5269
5270 FIXME, this will eventually lead to copy propagation removing the
5271 names that had useful range information attached to them. For
5272 instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>,
5273 then N_i will have the range [3, +INF].
5274
5275 However, by converting the assertion into the implied copy
5276 operation N_i = N_j, we will then copy-propagate N_j into the uses
5277 of N_i and lose the range information. We may want to hold on to
5278 ASSERT_EXPRs a little while longer as the ranges could be used in
5279 things like jump threading.
5280
5281 The problem with keeping ASSERT_EXPRs around is that passes after
5282 VRP need to handle them appropriately.
5283
5284 Another approach would be to make the range information a first
5285 class property of the SSA_NAME so that it can be queried from
5286 any pass. This is made somewhat more complex by the need for
5287 multiple ranges to be associated with one SSA_NAME. */
5288
5289 static void
5290 remove_range_assertions (void)
5291 {
5292 basic_block bb;
5293 gimple_stmt_iterator si;
5294
5295 /* Note that the BSI iterator bump happens at the bottom of the
5296 loop and no bump is necessary if we're removing the statement
5297 referenced by the current BSI. */
5298 FOR_EACH_BB (bb)
5299 for (si = gsi_start_bb (bb); !gsi_end_p (si);)
5300 {
5301 gimple stmt = gsi_stmt (si);
5302 gimple use_stmt;
5303
5304 if (is_gimple_assign (stmt)
5305 && gimple_assign_rhs_code (stmt) == ASSERT_EXPR)
5306 {
5307 tree rhs = gimple_assign_rhs1 (stmt);
5308 tree var;
5309 tree cond = fold (ASSERT_EXPR_COND (rhs));
5310 use_operand_p use_p;
5311 imm_use_iterator iter;
5312
5313 gcc_assert (cond != boolean_false_node);
5314
5315 /* Propagate the RHS into every use of the LHS. */
5316 var = ASSERT_EXPR_VAR (rhs);
5317 FOR_EACH_IMM_USE_STMT (use_stmt, iter,
5318 gimple_assign_lhs (stmt))
5319 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
5320 {
5321 SET_USE (use_p, var);
5322 gcc_assert (TREE_CODE (var) == SSA_NAME);
5323 }
5324
5325 /* And finally, remove the copy, it is not needed. */
5326 gsi_remove (&si, true);
5327 release_defs (stmt);
5328 }
5329 else
5330 gsi_next (&si);
5331 }
5332 }
5333
5334
5335 /* Return true if STMT is interesting for VRP. */
5336
5337 static bool
5338 stmt_interesting_for_vrp (gimple stmt)
5339 {
5340 if (gimple_code (stmt) == GIMPLE_PHI
5341 && is_gimple_reg (gimple_phi_result (stmt))
5342 && (INTEGRAL_TYPE_P (TREE_TYPE (gimple_phi_result (stmt)))
5343 || POINTER_TYPE_P (TREE_TYPE (gimple_phi_result (stmt)))))
5344 return true;
5345 else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
5346 {
5347 tree lhs = gimple_get_lhs (stmt);
5348
5349 /* In general, assignments with virtual operands are not useful
5350 for deriving ranges, with the obvious exception of calls to
5351 builtin functions. */
5352 if (lhs && TREE_CODE (lhs) == SSA_NAME
5353 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
5354 || POINTER_TYPE_P (TREE_TYPE (lhs)))
5355 && ((is_gimple_call (stmt)
5356 && gimple_call_fndecl (stmt) != NULL_TREE
5357 && DECL_IS_BUILTIN (gimple_call_fndecl (stmt)))
5358 || !gimple_vuse (stmt)))
5359 return true;
5360 }
5361 else if (gimple_code (stmt) == GIMPLE_COND
5362 || gimple_code (stmt) == GIMPLE_SWITCH)
5363 return true;
5364
5365 return false;
5366 }
5367
5368
5369 /* Initialize local data structures for VRP. */
5370
5371 static void
5372 vrp_initialize (void)
5373 {
5374 basic_block bb;
5375
5376 vr_value = XCNEWVEC (value_range_t *, num_ssa_names);
5377 vr_phi_edge_counts = XCNEWVEC (int, num_ssa_names);
5378
5379 FOR_EACH_BB (bb)
5380 {
5381 gimple_stmt_iterator si;
5382
5383 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
5384 {
5385 gimple phi = gsi_stmt (si);
5386 if (!stmt_interesting_for_vrp (phi))
5387 {
5388 tree lhs = PHI_RESULT (phi);
5389 set_value_range_to_varying (get_value_range (lhs));
5390 prop_set_simulate_again (phi, false);
5391 }
5392 else
5393 prop_set_simulate_again (phi, true);
5394 }
5395
5396 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
5397 {
5398 gimple stmt = gsi_stmt (si);
5399
5400 /* If the statement is a control insn, then we do not
5401 want to avoid simulating the statement once. Failure
5402 to do so means that those edges will never get added. */
5403 if (stmt_ends_bb_p (stmt))
5404 prop_set_simulate_again (stmt, true);
5405 else if (!stmt_interesting_for_vrp (stmt))
5406 {
5407 ssa_op_iter i;
5408 tree def;
5409 FOR_EACH_SSA_TREE_OPERAND (def, stmt, i, SSA_OP_DEF)
5410 set_value_range_to_varying (get_value_range (def));
5411 prop_set_simulate_again (stmt, false);
5412 }
5413 else
5414 prop_set_simulate_again (stmt, true);
5415 }
5416 }
5417 }
5418
5419
5420 /* Visit assignment STMT. If it produces an interesting range, record
5421 the SSA name in *OUTPUT_P. */
5422
5423 static enum ssa_prop_result
5424 vrp_visit_assignment_or_call (gimple stmt, tree *output_p)
5425 {
5426 tree def, lhs;
5427 ssa_op_iter iter;
5428 enum gimple_code code = gimple_code (stmt);
5429 lhs = gimple_get_lhs (stmt);
5430
5431 /* We only keep track of ranges in integral and pointer types. */
5432 if (TREE_CODE (lhs) == SSA_NAME
5433 && ((INTEGRAL_TYPE_P (TREE_TYPE (lhs))
5434 /* It is valid to have NULL MIN/MAX values on a type. See
5435 build_range_type. */
5436 && TYPE_MIN_VALUE (TREE_TYPE (lhs))
5437 && TYPE_MAX_VALUE (TREE_TYPE (lhs)))
5438 || POINTER_TYPE_P (TREE_TYPE (lhs))))
5439 {
5440 value_range_t new_vr = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
5441
5442 if (code == GIMPLE_CALL)
5443 extract_range_basic (&new_vr, stmt);
5444 else
5445 extract_range_from_assignment (&new_vr, stmt);
5446
5447 if (update_value_range (lhs, &new_vr))
5448 {
5449 *output_p = lhs;
5450
5451 if (dump_file && (dump_flags & TDF_DETAILS))
5452 {
5453 fprintf (dump_file, "Found new range for ");
5454 print_generic_expr (dump_file, lhs, 0);
5455 fprintf (dump_file, ": ");
5456 dump_value_range (dump_file, &new_vr);
5457 fprintf (dump_file, "\n\n");
5458 }
5459
5460 if (new_vr.type == VR_VARYING)
5461 return SSA_PROP_VARYING;
5462
5463 return SSA_PROP_INTERESTING;
5464 }
5465
5466 return SSA_PROP_NOT_INTERESTING;
5467 }
5468
5469 /* Every other statement produces no useful ranges. */
5470 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_DEF)
5471 set_value_range_to_varying (get_value_range (def));
5472
5473 return SSA_PROP_VARYING;
5474 }
5475
5476 /* Helper that gets the value range of the SSA_NAME with version I
5477 or a symbolic range containing the SSA_NAME only if the value range
5478 is varying or undefined. */
5479
5480 static inline value_range_t
5481 get_vr_for_comparison (int i)
5482 {
5483 value_range_t vr = *(vr_value[i]);
5484
5485 /* If name N_i does not have a valid range, use N_i as its own
5486 range. This allows us to compare against names that may
5487 have N_i in their ranges. */
5488 if (vr.type == VR_VARYING || vr.type == VR_UNDEFINED)
5489 {
5490 vr.type = VR_RANGE;
5491 vr.min = ssa_name (i);
5492 vr.max = ssa_name (i);
5493 }
5494
5495 return vr;
5496 }
5497
5498 /* Compare all the value ranges for names equivalent to VAR with VAL
5499 using comparison code COMP. Return the same value returned by
5500 compare_range_with_value, including the setting of
5501 *STRICT_OVERFLOW_P. */
5502
5503 static tree
5504 compare_name_with_value (enum tree_code comp, tree var, tree val,
5505 bool *strict_overflow_p)
5506 {
5507 bitmap_iterator bi;
5508 unsigned i;
5509 bitmap e;
5510 tree retval, t;
5511 int used_strict_overflow;
5512 bool sop;
5513 value_range_t equiv_vr;
5514
5515 /* Get the set of equivalences for VAR. */
5516 e = get_value_range (var)->equiv;
5517
5518 /* Start at -1. Set it to 0 if we do a comparison without relying
5519 on overflow, or 1 if all comparisons rely on overflow. */
5520 used_strict_overflow = -1;
5521
5522 /* Compare vars' value range with val. */
5523 equiv_vr = get_vr_for_comparison (SSA_NAME_VERSION (var));
5524 sop = false;
5525 retval = compare_range_with_value (comp, &equiv_vr, val, &sop);
5526 if (retval)
5527 used_strict_overflow = sop ? 1 : 0;
5528
5529 /* If the equiv set is empty we have done all work we need to do. */
5530 if (e == NULL)
5531 {
5532 if (retval
5533 && used_strict_overflow > 0)
5534 *strict_overflow_p = true;
5535 return retval;
5536 }
5537
5538 EXECUTE_IF_SET_IN_BITMAP (e, 0, i, bi)
5539 {
5540 equiv_vr = get_vr_for_comparison (i);
5541 sop = false;
5542 t = compare_range_with_value (comp, &equiv_vr, val, &sop);
5543 if (t)
5544 {
5545 /* If we get different answers from different members
5546 of the equivalence set this check must be in a dead
5547 code region. Folding it to a trap representation
5548 would be correct here. For now just return don't-know. */
5549 if (retval != NULL
5550 && t != retval)
5551 {
5552 retval = NULL_TREE;
5553 break;
5554 }
5555 retval = t;
5556
5557 if (!sop)
5558 used_strict_overflow = 0;
5559 else if (used_strict_overflow < 0)
5560 used_strict_overflow = 1;
5561 }
5562 }
5563
5564 if (retval
5565 && used_strict_overflow > 0)
5566 *strict_overflow_p = true;
5567
5568 return retval;
5569 }
5570
5571
5572 /* Given a comparison code COMP and names N1 and N2, compare all the
5573 ranges equivalent to N1 against all the ranges equivalent to N2
5574 to determine the value of N1 COMP N2. Return the same value
5575 returned by compare_ranges. Set *STRICT_OVERFLOW_P to indicate
5576 whether we relied on an overflow infinity in the comparison. */
5577
5578
5579 static tree
5580 compare_names (enum tree_code comp, tree n1, tree n2,
5581 bool *strict_overflow_p)
5582 {
5583 tree t, retval;
5584 bitmap e1, e2;
5585 bitmap_iterator bi1, bi2;
5586 unsigned i1, i2;
5587 int used_strict_overflow;
5588 static bitmap_obstack *s_obstack = NULL;
5589 static bitmap s_e1 = NULL, s_e2 = NULL;
5590
5591 /* Compare the ranges of every name equivalent to N1 against the
5592 ranges of every name equivalent to N2. */
5593 e1 = get_value_range (n1)->equiv;
5594 e2 = get_value_range (n2)->equiv;
5595
5596 /* Use the fake bitmaps if e1 or e2 are not available. */
5597 if (s_obstack == NULL)
5598 {
5599 s_obstack = XNEW (bitmap_obstack);
5600 bitmap_obstack_initialize (s_obstack);
5601 s_e1 = BITMAP_ALLOC (s_obstack);
5602 s_e2 = BITMAP_ALLOC (s_obstack);
5603 }
5604 if (e1 == NULL)
5605 e1 = s_e1;
5606 if (e2 == NULL)
5607 e2 = s_e2;
5608
5609 /* Add N1 and N2 to their own set of equivalences to avoid
5610 duplicating the body of the loop just to check N1 and N2
5611 ranges. */
5612 bitmap_set_bit (e1, SSA_NAME_VERSION (n1));
5613 bitmap_set_bit (e2, SSA_NAME_VERSION (n2));
5614
5615 /* If the equivalence sets have a common intersection, then the two
5616 names can be compared without checking their ranges. */
5617 if (bitmap_intersect_p (e1, e2))
5618 {
5619 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
5620 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
5621
5622 return (comp == EQ_EXPR || comp == GE_EXPR || comp == LE_EXPR)
5623 ? boolean_true_node
5624 : boolean_false_node;
5625 }
5626
5627 /* Start at -1. Set it to 0 if we do a comparison without relying
5628 on overflow, or 1 if all comparisons rely on overflow. */
5629 used_strict_overflow = -1;
5630
5631 /* Otherwise, compare all the equivalent ranges. First, add N1 and
5632 N2 to their own set of equivalences to avoid duplicating the body
5633 of the loop just to check N1 and N2 ranges. */
5634 EXECUTE_IF_SET_IN_BITMAP (e1, 0, i1, bi1)
5635 {
5636 value_range_t vr1 = get_vr_for_comparison (i1);
5637
5638 t = retval = NULL_TREE;
5639 EXECUTE_IF_SET_IN_BITMAP (e2, 0, i2, bi2)
5640 {
5641 bool sop = false;
5642
5643 value_range_t vr2 = get_vr_for_comparison (i2);
5644
5645 t = compare_ranges (comp, &vr1, &vr2, &sop);
5646 if (t)
5647 {
5648 /* If we get different answers from different members
5649 of the equivalence set this check must be in a dead
5650 code region. Folding it to a trap representation
5651 would be correct here. For now just return don't-know. */
5652 if (retval != NULL
5653 && t != retval)
5654 {
5655 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
5656 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
5657 return NULL_TREE;
5658 }
5659 retval = t;
5660
5661 if (!sop)
5662 used_strict_overflow = 0;
5663 else if (used_strict_overflow < 0)
5664 used_strict_overflow = 1;
5665 }
5666 }
5667
5668 if (retval)
5669 {
5670 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
5671 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
5672 if (used_strict_overflow > 0)
5673 *strict_overflow_p = true;
5674 return retval;
5675 }
5676 }
5677
5678 /* None of the equivalent ranges are useful in computing this
5679 comparison. */
5680 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
5681 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
5682 return NULL_TREE;
5683 }
5684
5685 /* Helper function for vrp_evaluate_conditional_warnv. */
5686
5687 static tree
5688 vrp_evaluate_conditional_warnv_with_ops_using_ranges (enum tree_code code,
5689 tree op0, tree op1,
5690 bool * strict_overflow_p)
5691 {
5692 value_range_t *vr0, *vr1;
5693
5694 vr0 = (TREE_CODE (op0) == SSA_NAME) ? get_value_range (op0) : NULL;
5695 vr1 = (TREE_CODE (op1) == SSA_NAME) ? get_value_range (op1) : NULL;
5696
5697 if (vr0 && vr1)
5698 return compare_ranges (code, vr0, vr1, strict_overflow_p);
5699 else if (vr0 && vr1 == NULL)
5700 return compare_range_with_value (code, vr0, op1, strict_overflow_p);
5701 else if (vr0 == NULL && vr1)
5702 return (compare_range_with_value
5703 (swap_tree_comparison (code), vr1, op0, strict_overflow_p));
5704 return NULL;
5705 }
5706
5707 /* Helper function for vrp_evaluate_conditional_warnv. */
5708
5709 static tree
5710 vrp_evaluate_conditional_warnv_with_ops (enum tree_code code, tree op0,
5711 tree op1, bool use_equiv_p,
5712 bool *strict_overflow_p, bool *only_ranges)
5713 {
5714 tree ret;
5715 if (only_ranges)
5716 *only_ranges = true;
5717
5718 /* We only deal with integral and pointer types. */
5719 if (!INTEGRAL_TYPE_P (TREE_TYPE (op0))
5720 && !POINTER_TYPE_P (TREE_TYPE (op0)))
5721 return NULL_TREE;
5722
5723 if (use_equiv_p)
5724 {
5725 if (only_ranges
5726 && (ret = vrp_evaluate_conditional_warnv_with_ops_using_ranges
5727 (code, op0, op1, strict_overflow_p)))
5728 return ret;
5729 *only_ranges = false;
5730 if (TREE_CODE (op0) == SSA_NAME && TREE_CODE (op1) == SSA_NAME)
5731 return compare_names (code, op0, op1, strict_overflow_p);
5732 else if (TREE_CODE (op0) == SSA_NAME)
5733 return compare_name_with_value (code, op0, op1, strict_overflow_p);
5734 else if (TREE_CODE (op1) == SSA_NAME)
5735 return (compare_name_with_value
5736 (swap_tree_comparison (code), op1, op0, strict_overflow_p));
5737 }
5738 else
5739 return vrp_evaluate_conditional_warnv_with_ops_using_ranges (code, op0, op1,
5740 strict_overflow_p);
5741 return NULL_TREE;
5742 }
5743
5744 /* Given (CODE OP0 OP1) within STMT, try to simplify it based on value range
5745 information. Return NULL if the conditional can not be evaluated.
5746 The ranges of all the names equivalent with the operands in COND
5747 will be used when trying to compute the value. If the result is
5748 based on undefined signed overflow, issue a warning if
5749 appropriate. */
5750
5751 static tree
5752 vrp_evaluate_conditional (enum tree_code code, tree op0, tree op1, gimple stmt)
5753 {
5754 bool sop;
5755 tree ret;
5756 bool only_ranges;
5757
5758 /* Some passes and foldings leak constants with overflow flag set
5759 into the IL. Avoid doing wrong things with these and bail out. */
5760 if ((TREE_CODE (op0) == INTEGER_CST
5761 && TREE_OVERFLOW (op0))
5762 || (TREE_CODE (op1) == INTEGER_CST
5763 && TREE_OVERFLOW (op1)))
5764 return NULL_TREE;
5765
5766 sop = false;
5767 ret = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, true, &sop,
5768 &only_ranges);
5769
5770 if (ret && sop)
5771 {
5772 enum warn_strict_overflow_code wc;
5773 const char* warnmsg;
5774
5775 if (is_gimple_min_invariant (ret))
5776 {
5777 wc = WARN_STRICT_OVERFLOW_CONDITIONAL;
5778 warnmsg = G_("assuming signed overflow does not occur when "
5779 "simplifying conditional to constant");
5780 }
5781 else
5782 {
5783 wc = WARN_STRICT_OVERFLOW_COMPARISON;
5784 warnmsg = G_("assuming signed overflow does not occur when "
5785 "simplifying conditional");
5786 }
5787
5788 if (issue_strict_overflow_warning (wc))
5789 {
5790 location_t location;
5791
5792 if (!gimple_has_location (stmt))
5793 location = input_location;
5794 else
5795 location = gimple_location (stmt);
5796 warning_at (location, OPT_Wstrict_overflow, "%s", warnmsg);
5797 }
5798 }
5799
5800 if (warn_type_limits
5801 && ret && only_ranges
5802 && TREE_CODE_CLASS (code) == tcc_comparison
5803 && TREE_CODE (op0) == SSA_NAME)
5804 {
5805 /* If the comparison is being folded and the operand on the LHS
5806 is being compared against a constant value that is outside of
5807 the natural range of OP0's type, then the predicate will
5808 always fold regardless of the value of OP0. If -Wtype-limits
5809 was specified, emit a warning. */
5810 tree type = TREE_TYPE (op0);
5811 value_range_t *vr0 = get_value_range (op0);
5812
5813 if (vr0->type != VR_VARYING
5814 && INTEGRAL_TYPE_P (type)
5815 && vrp_val_is_min (vr0->min)
5816 && vrp_val_is_max (vr0->max)
5817 && is_gimple_min_invariant (op1))
5818 {
5819 location_t location;
5820
5821 if (!gimple_has_location (stmt))
5822 location = input_location;
5823 else
5824 location = gimple_location (stmt);
5825
5826 warning_at (location, OPT_Wtype_limits,
5827 integer_zerop (ret)
5828 ? G_("comparison always false "
5829 "due to limited range of data type")
5830 : G_("comparison always true "
5831 "due to limited range of data type"));
5832 }
5833 }
5834
5835 return ret;
5836 }
5837
5838
5839 /* Visit conditional statement STMT. If we can determine which edge
5840 will be taken out of STMT's basic block, record it in
5841 *TAKEN_EDGE_P and return SSA_PROP_INTERESTING. Otherwise, return
5842 SSA_PROP_VARYING. */
5843
5844 static enum ssa_prop_result
5845 vrp_visit_cond_stmt (gimple stmt, edge *taken_edge_p)
5846 {
5847 tree val;
5848 bool sop;
5849
5850 *taken_edge_p = NULL;
5851
5852 if (dump_file && (dump_flags & TDF_DETAILS))
5853 {
5854 tree use;
5855 ssa_op_iter i;
5856
5857 fprintf (dump_file, "\nVisiting conditional with predicate: ");
5858 print_gimple_stmt (dump_file, stmt, 0, 0);
5859 fprintf (dump_file, "\nWith known ranges\n");
5860
5861 FOR_EACH_SSA_TREE_OPERAND (use, stmt, i, SSA_OP_USE)
5862 {
5863 fprintf (dump_file, "\t");
5864 print_generic_expr (dump_file, use, 0);
5865 fprintf (dump_file, ": ");
5866 dump_value_range (dump_file, vr_value[SSA_NAME_VERSION (use)]);
5867 }
5868
5869 fprintf (dump_file, "\n");
5870 }
5871
5872 /* Compute the value of the predicate COND by checking the known
5873 ranges of each of its operands.
5874
5875 Note that we cannot evaluate all the equivalent ranges here
5876 because those ranges may not yet be final and with the current
5877 propagation strategy, we cannot determine when the value ranges
5878 of the names in the equivalence set have changed.
5879
5880 For instance, given the following code fragment
5881
5882 i_5 = PHI <8, i_13>
5883 ...
5884 i_14 = ASSERT_EXPR <i_5, i_5 != 0>
5885 if (i_14 == 1)
5886 ...
5887
5888 Assume that on the first visit to i_14, i_5 has the temporary
5889 range [8, 8] because the second argument to the PHI function is
5890 not yet executable. We derive the range ~[0, 0] for i_14 and the
5891 equivalence set { i_5 }. So, when we visit 'if (i_14 == 1)' for
5892 the first time, since i_14 is equivalent to the range [8, 8], we
5893 determine that the predicate is always false.
5894
5895 On the next round of propagation, i_13 is determined to be
5896 VARYING, which causes i_5 to drop down to VARYING. So, another
5897 visit to i_14 is scheduled. In this second visit, we compute the
5898 exact same range and equivalence set for i_14, namely ~[0, 0] and
5899 { i_5 }. But we did not have the previous range for i_5
5900 registered, so vrp_visit_assignment thinks that the range for
5901 i_14 has not changed. Therefore, the predicate 'if (i_14 == 1)'
5902 is not visited again, which stops propagation from visiting
5903 statements in the THEN clause of that if().
5904
5905 To properly fix this we would need to keep the previous range
5906 value for the names in the equivalence set. This way we would've
5907 discovered that from one visit to the other i_5 changed from
5908 range [8, 8] to VR_VARYING.
5909
5910 However, fixing this apparent limitation may not be worth the
5911 additional checking. Testing on several code bases (GCC, DLV,
5912 MICO, TRAMP3D and SPEC2000) showed that doing this results in
5913 4 more predicates folded in SPEC. */
5914 sop = false;
5915
5916 val = vrp_evaluate_conditional_warnv_with_ops (gimple_cond_code (stmt),
5917 gimple_cond_lhs (stmt),
5918 gimple_cond_rhs (stmt),
5919 false, &sop, NULL);
5920 if (val)
5921 {
5922 if (!sop)
5923 *taken_edge_p = find_taken_edge (gimple_bb (stmt), val);
5924 else
5925 {
5926 if (dump_file && (dump_flags & TDF_DETAILS))
5927 fprintf (dump_file,
5928 "\nIgnoring predicate evaluation because "
5929 "it assumes that signed overflow is undefined");
5930 val = NULL_TREE;
5931 }
5932 }
5933
5934 if (dump_file && (dump_flags & TDF_DETAILS))
5935 {
5936 fprintf (dump_file, "\nPredicate evaluates to: ");
5937 if (val == NULL_TREE)
5938 fprintf (dump_file, "DON'T KNOW\n");
5939 else
5940 print_generic_stmt (dump_file, val, 0);
5941 }
5942
5943 return (*taken_edge_p) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING;
5944 }
5945
5946 /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL
5947 that includes the value VAL. The search is restricted to the range
5948 [START_IDX, n - 1] where n is the size of VEC.
5949
5950 If there is a CASE_LABEL for VAL, its index is placed in IDX and true is
5951 returned.
5952
5953 If there is no CASE_LABEL for VAL and there is one that is larger than VAL,
5954 it is placed in IDX and false is returned.
5955
5956 If VAL is larger than any CASE_LABEL, n is placed on IDX and false is
5957 returned. */
5958
5959 static bool
5960 find_case_label_index (gimple stmt, size_t start_idx, tree val, size_t *idx)
5961 {
5962 size_t n = gimple_switch_num_labels (stmt);
5963 size_t low, high;
5964
5965 /* Find case label for minimum of the value range or the next one.
5966 At each iteration we are searching in [low, high - 1]. */
5967
5968 for (low = start_idx, high = n; high != low; )
5969 {
5970 tree t;
5971 int cmp;
5972 /* Note that i != high, so we never ask for n. */
5973 size_t i = (high + low) / 2;
5974 t = gimple_switch_label (stmt, i);
5975
5976 /* Cache the result of comparing CASE_LOW and val. */
5977 cmp = tree_int_cst_compare (CASE_LOW (t), val);
5978
5979 if (cmp == 0)
5980 {
5981 /* Ranges cannot be empty. */
5982 *idx = i;
5983 return true;
5984 }
5985 else if (cmp > 0)
5986 high = i;
5987 else
5988 {
5989 low = i + 1;
5990 if (CASE_HIGH (t) != NULL
5991 && tree_int_cst_compare (CASE_HIGH (t), val) >= 0)
5992 {
5993 *idx = i;
5994 return true;
5995 }
5996 }
5997 }
5998
5999 *idx = high;
6000 return false;
6001 }
6002
6003 /* Searches the case label vector VEC for the range of CASE_LABELs that is used
6004 for values between MIN and MAX. The first index is placed in MIN_IDX. The
6005 last index is placed in MAX_IDX. If the range of CASE_LABELs is empty
6006 then MAX_IDX < MIN_IDX.
6007 Returns true if the default label is not needed. */
6008
6009 static bool
6010 find_case_label_range (gimple stmt, tree min, tree max, size_t *min_idx,
6011 size_t *max_idx)
6012 {
6013 size_t i, j;
6014 bool min_take_default = !find_case_label_index (stmt, 1, min, &i);
6015 bool max_take_default = !find_case_label_index (stmt, i, max, &j);
6016
6017 if (i == j
6018 && min_take_default
6019 && max_take_default)
6020 {
6021 /* Only the default case label reached.
6022 Return an empty range. */
6023 *min_idx = 1;
6024 *max_idx = 0;
6025 return false;
6026 }
6027 else
6028 {
6029 bool take_default = min_take_default || max_take_default;
6030 tree low, high;
6031 size_t k;
6032
6033 if (max_take_default)
6034 j--;
6035
6036 /* If the case label range is continuous, we do not need
6037 the default case label. Verify that. */
6038 high = CASE_LOW (gimple_switch_label (stmt, i));
6039 if (CASE_HIGH (gimple_switch_label (stmt, i)))
6040 high = CASE_HIGH (gimple_switch_label (stmt, i));
6041 for (k = i + 1; k <= j; ++k)
6042 {
6043 low = CASE_LOW (gimple_switch_label (stmt, k));
6044 if (!integer_onep (int_const_binop (MINUS_EXPR, low, high, 0)))
6045 {
6046 take_default = true;
6047 break;
6048 }
6049 high = low;
6050 if (CASE_HIGH (gimple_switch_label (stmt, k)))
6051 high = CASE_HIGH (gimple_switch_label (stmt, k));
6052 }
6053
6054 *min_idx = i;
6055 *max_idx = j;
6056 return !take_default;
6057 }
6058 }
6059
6060 /* Visit switch statement STMT. If we can determine which edge
6061 will be taken out of STMT's basic block, record it in
6062 *TAKEN_EDGE_P and return SSA_PROP_INTERESTING. Otherwise, return
6063 SSA_PROP_VARYING. */
6064
6065 static enum ssa_prop_result
6066 vrp_visit_switch_stmt (gimple stmt, edge *taken_edge_p)
6067 {
6068 tree op, val;
6069 value_range_t *vr;
6070 size_t i = 0, j = 0;
6071 bool take_default;
6072
6073 *taken_edge_p = NULL;
6074 op = gimple_switch_index (stmt);
6075 if (TREE_CODE (op) != SSA_NAME)
6076 return SSA_PROP_VARYING;
6077
6078 vr = get_value_range (op);
6079 if (dump_file && (dump_flags & TDF_DETAILS))
6080 {
6081 fprintf (dump_file, "\nVisiting switch expression with operand ");
6082 print_generic_expr (dump_file, op, 0);
6083 fprintf (dump_file, " with known range ");
6084 dump_value_range (dump_file, vr);
6085 fprintf (dump_file, "\n");
6086 }
6087
6088 if (vr->type != VR_RANGE
6089 || symbolic_range_p (vr))
6090 return SSA_PROP_VARYING;
6091
6092 /* Find the single edge that is taken from the switch expression. */
6093 take_default = !find_case_label_range (stmt, vr->min, vr->max, &i, &j);
6094
6095 /* Check if the range spans no CASE_LABEL. If so, we only reach the default
6096 label */
6097 if (j < i)
6098 {
6099 gcc_assert (take_default);
6100 val = gimple_switch_default_label (stmt);
6101 }
6102 else
6103 {
6104 /* Check if labels with index i to j and maybe the default label
6105 are all reaching the same label. */
6106
6107 val = gimple_switch_label (stmt, i);
6108 if (take_default
6109 && CASE_LABEL (gimple_switch_default_label (stmt))
6110 != CASE_LABEL (val))
6111 {
6112 if (dump_file && (dump_flags & TDF_DETAILS))
6113 fprintf (dump_file, " not a single destination for this "
6114 "range\n");
6115 return SSA_PROP_VARYING;
6116 }
6117 for (++i; i <= j; ++i)
6118 {
6119 if (CASE_LABEL (gimple_switch_label (stmt, i)) != CASE_LABEL (val))
6120 {
6121 if (dump_file && (dump_flags & TDF_DETAILS))
6122 fprintf (dump_file, " not a single destination for this "
6123 "range\n");
6124 return SSA_PROP_VARYING;
6125 }
6126 }
6127 }
6128
6129 *taken_edge_p = find_edge (gimple_bb (stmt),
6130 label_to_block (CASE_LABEL (val)));
6131
6132 if (dump_file && (dump_flags & TDF_DETAILS))
6133 {
6134 fprintf (dump_file, " will take edge to ");
6135 print_generic_stmt (dump_file, CASE_LABEL (val), 0);
6136 }
6137
6138 return SSA_PROP_INTERESTING;
6139 }
6140
6141
6142 /* Evaluate statement STMT. If the statement produces a useful range,
6143 return SSA_PROP_INTERESTING and record the SSA name with the
6144 interesting range into *OUTPUT_P.
6145
6146 If STMT is a conditional branch and we can determine its truth
6147 value, the taken edge is recorded in *TAKEN_EDGE_P.
6148
6149 If STMT produces a varying value, return SSA_PROP_VARYING. */
6150
6151 static enum ssa_prop_result
6152 vrp_visit_stmt (gimple stmt, edge *taken_edge_p, tree *output_p)
6153 {
6154 tree def;
6155 ssa_op_iter iter;
6156
6157 if (dump_file && (dump_flags & TDF_DETAILS))
6158 {
6159 fprintf (dump_file, "\nVisiting statement:\n");
6160 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
6161 fprintf (dump_file, "\n");
6162 }
6163
6164 if (!stmt_interesting_for_vrp (stmt))
6165 gcc_assert (stmt_ends_bb_p (stmt));
6166 else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
6167 {
6168 /* In general, assignments with virtual operands are not useful
6169 for deriving ranges, with the obvious exception of calls to
6170 builtin functions. */
6171
6172 if ((is_gimple_call (stmt)
6173 && gimple_call_fndecl (stmt) != NULL_TREE
6174 && DECL_IS_BUILTIN (gimple_call_fndecl (stmt)))
6175 || !gimple_vuse (stmt))
6176 return vrp_visit_assignment_or_call (stmt, output_p);
6177 }
6178 else if (gimple_code (stmt) == GIMPLE_COND)
6179 return vrp_visit_cond_stmt (stmt, taken_edge_p);
6180 else if (gimple_code (stmt) == GIMPLE_SWITCH)
6181 return vrp_visit_switch_stmt (stmt, taken_edge_p);
6182
6183 /* All other statements produce nothing of interest for VRP, so mark
6184 their outputs varying and prevent further simulation. */
6185 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_DEF)
6186 set_value_range_to_varying (get_value_range (def));
6187
6188 return SSA_PROP_VARYING;
6189 }
6190
6191
6192 /* Meet operation for value ranges. Given two value ranges VR0 and
6193 VR1, store in VR0 a range that contains both VR0 and VR1. This
6194 may not be the smallest possible such range. */
6195
6196 static void
6197 vrp_meet (value_range_t *vr0, value_range_t *vr1)
6198 {
6199 if (vr0->type == VR_UNDEFINED)
6200 {
6201 copy_value_range (vr0, vr1);
6202 return;
6203 }
6204
6205 if (vr1->type == VR_UNDEFINED)
6206 {
6207 /* Nothing to do. VR0 already has the resulting range. */
6208 return;
6209 }
6210
6211 if (vr0->type == VR_VARYING)
6212 {
6213 /* Nothing to do. VR0 already has the resulting range. */
6214 return;
6215 }
6216
6217 if (vr1->type == VR_VARYING)
6218 {
6219 set_value_range_to_varying (vr0);
6220 return;
6221 }
6222
6223 if (vr0->type == VR_RANGE && vr1->type == VR_RANGE)
6224 {
6225 int cmp;
6226 tree min, max;
6227
6228 /* Compute the convex hull of the ranges. The lower limit of
6229 the new range is the minimum of the two ranges. If they
6230 cannot be compared, then give up. */
6231 cmp = compare_values (vr0->min, vr1->min);
6232 if (cmp == 0 || cmp == 1)
6233 min = vr1->min;
6234 else if (cmp == -1)
6235 min = vr0->min;
6236 else
6237 goto give_up;
6238
6239 /* Similarly, the upper limit of the new range is the maximum
6240 of the two ranges. If they cannot be compared, then
6241 give up. */
6242 cmp = compare_values (vr0->max, vr1->max);
6243 if (cmp == 0 || cmp == -1)
6244 max = vr1->max;
6245 else if (cmp == 1)
6246 max = vr0->max;
6247 else
6248 goto give_up;
6249
6250 /* Check for useless ranges. */
6251 if (INTEGRAL_TYPE_P (TREE_TYPE (min))
6252 && ((vrp_val_is_min (min) || is_overflow_infinity (min))
6253 && (vrp_val_is_max (max) || is_overflow_infinity (max))))
6254 goto give_up;
6255
6256 /* The resulting set of equivalences is the intersection of
6257 the two sets. */
6258 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
6259 bitmap_and_into (vr0->equiv, vr1->equiv);
6260 else if (vr0->equiv && !vr1->equiv)
6261 bitmap_clear (vr0->equiv);
6262
6263 set_value_range (vr0, vr0->type, min, max, vr0->equiv);
6264 }
6265 else if (vr0->type == VR_ANTI_RANGE && vr1->type == VR_ANTI_RANGE)
6266 {
6267 /* Two anti-ranges meet only if their complements intersect.
6268 Only handle the case of identical ranges. */
6269 if (compare_values (vr0->min, vr1->min) == 0
6270 && compare_values (vr0->max, vr1->max) == 0
6271 && compare_values (vr0->min, vr0->max) == 0)
6272 {
6273 /* The resulting set of equivalences is the intersection of
6274 the two sets. */
6275 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
6276 bitmap_and_into (vr0->equiv, vr1->equiv);
6277 else if (vr0->equiv && !vr1->equiv)
6278 bitmap_clear (vr0->equiv);
6279 }
6280 else
6281 goto give_up;
6282 }
6283 else if (vr0->type == VR_ANTI_RANGE || vr1->type == VR_ANTI_RANGE)
6284 {
6285 /* For a numeric range [VAL1, VAL2] and an anti-range ~[VAL3, VAL4],
6286 only handle the case where the ranges have an empty intersection.
6287 The result of the meet operation is the anti-range. */
6288 if (!symbolic_range_p (vr0)
6289 && !symbolic_range_p (vr1)
6290 && !value_ranges_intersect_p (vr0, vr1))
6291 {
6292 /* Copy most of VR1 into VR0. Don't copy VR1's equivalence
6293 set. We need to compute the intersection of the two
6294 equivalence sets. */
6295 if (vr1->type == VR_ANTI_RANGE)
6296 set_value_range (vr0, vr1->type, vr1->min, vr1->max, vr0->equiv);
6297
6298 /* The resulting set of equivalences is the intersection of
6299 the two sets. */
6300 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
6301 bitmap_and_into (vr0->equiv, vr1->equiv);
6302 else if (vr0->equiv && !vr1->equiv)
6303 bitmap_clear (vr0->equiv);
6304 }
6305 else
6306 goto give_up;
6307 }
6308 else
6309 gcc_unreachable ();
6310
6311 return;
6312
6313 give_up:
6314 /* Failed to find an efficient meet. Before giving up and setting
6315 the result to VARYING, see if we can at least derive a useful
6316 anti-range. FIXME, all this nonsense about distinguishing
6317 anti-ranges from ranges is necessary because of the odd
6318 semantics of range_includes_zero_p and friends. */
6319 if (!symbolic_range_p (vr0)
6320 && ((vr0->type == VR_RANGE && !range_includes_zero_p (vr0))
6321 || (vr0->type == VR_ANTI_RANGE && range_includes_zero_p (vr0)))
6322 && !symbolic_range_p (vr1)
6323 && ((vr1->type == VR_RANGE && !range_includes_zero_p (vr1))
6324 || (vr1->type == VR_ANTI_RANGE && range_includes_zero_p (vr1))))
6325 {
6326 set_value_range_to_nonnull (vr0, TREE_TYPE (vr0->min));
6327
6328 /* Since this meet operation did not result from the meeting of
6329 two equivalent names, VR0 cannot have any equivalences. */
6330 if (vr0->equiv)
6331 bitmap_clear (vr0->equiv);
6332 }
6333 else
6334 set_value_range_to_varying (vr0);
6335 }
6336
6337
6338 /* Visit all arguments for PHI node PHI that flow through executable
6339 edges. If a valid value range can be derived from all the incoming
6340 value ranges, set a new range for the LHS of PHI. */
6341
6342 static enum ssa_prop_result
6343 vrp_visit_phi_node (gimple phi)
6344 {
6345 size_t i;
6346 tree lhs = PHI_RESULT (phi);
6347 value_range_t *lhs_vr = get_value_range (lhs);
6348 value_range_t vr_result = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
6349 int edges, old_edges;
6350 struct loop *l;
6351
6352 copy_value_range (&vr_result, lhs_vr);
6353
6354 if (dump_file && (dump_flags & TDF_DETAILS))
6355 {
6356 fprintf (dump_file, "\nVisiting PHI node: ");
6357 print_gimple_stmt (dump_file, phi, 0, dump_flags);
6358 }
6359
6360 edges = 0;
6361 for (i = 0; i < gimple_phi_num_args (phi); i++)
6362 {
6363 edge e = gimple_phi_arg_edge (phi, i);
6364
6365 if (dump_file && (dump_flags & TDF_DETAILS))
6366 {
6367 fprintf (dump_file,
6368 "\n Argument #%d (%d -> %d %sexecutable)\n",
6369 (int) i, e->src->index, e->dest->index,
6370 (e->flags & EDGE_EXECUTABLE) ? "" : "not ");
6371 }
6372
6373 if (e->flags & EDGE_EXECUTABLE)
6374 {
6375 tree arg = PHI_ARG_DEF (phi, i);
6376 value_range_t vr_arg;
6377
6378 ++edges;
6379
6380 if (TREE_CODE (arg) == SSA_NAME)
6381 {
6382 vr_arg = *(get_value_range (arg));
6383 }
6384 else
6385 {
6386 if (is_overflow_infinity (arg))
6387 {
6388 arg = copy_node (arg);
6389 TREE_OVERFLOW (arg) = 0;
6390 }
6391
6392 vr_arg.type = VR_RANGE;
6393 vr_arg.min = arg;
6394 vr_arg.max = arg;
6395 vr_arg.equiv = NULL;
6396 }
6397
6398 if (dump_file && (dump_flags & TDF_DETAILS))
6399 {
6400 fprintf (dump_file, "\t");
6401 print_generic_expr (dump_file, arg, dump_flags);
6402 fprintf (dump_file, "\n\tValue: ");
6403 dump_value_range (dump_file, &vr_arg);
6404 fprintf (dump_file, "\n");
6405 }
6406
6407 vrp_meet (&vr_result, &vr_arg);
6408
6409 if (vr_result.type == VR_VARYING)
6410 break;
6411 }
6412 }
6413
6414 /* If this is a loop PHI node SCEV may known more about its
6415 value-range. */
6416 if (current_loops
6417 && (l = loop_containing_stmt (phi))
6418 && l->header == gimple_bb (phi))
6419 adjust_range_with_scev (&vr_result, l, phi, lhs);
6420
6421 if (vr_result.type == VR_VARYING)
6422 goto varying;
6423
6424 old_edges = vr_phi_edge_counts[SSA_NAME_VERSION (lhs)];
6425 vr_phi_edge_counts[SSA_NAME_VERSION (lhs)] = edges;
6426
6427 /* To prevent infinite iterations in the algorithm, derive ranges
6428 when the new value is slightly bigger or smaller than the
6429 previous one. We don't do this if we have seen a new executable
6430 edge; this helps us avoid an overflow infinity for conditionals
6431 which are not in a loop. */
6432 if (lhs_vr->type == VR_RANGE && vr_result.type == VR_RANGE
6433 && edges <= old_edges)
6434 {
6435 if (!POINTER_TYPE_P (TREE_TYPE (lhs)))
6436 {
6437 int cmp_min = compare_values (lhs_vr->min, vr_result.min);
6438 int cmp_max = compare_values (lhs_vr->max, vr_result.max);
6439
6440 /* If the new minimum is smaller or larger than the previous
6441 one, go all the way to -INF. In the first case, to avoid
6442 iterating millions of times to reach -INF, and in the
6443 other case to avoid infinite bouncing between different
6444 minimums. */
6445 if (cmp_min > 0 || cmp_min < 0)
6446 {
6447 /* If we will end up with a (-INF, +INF) range, set it to
6448 VARYING. Same if the previous max value was invalid for
6449 the type and we'd end up with vr_result.min > vr_result.max. */
6450 if (vrp_val_is_max (vr_result.max)
6451 || compare_values (TYPE_MIN_VALUE (TREE_TYPE (vr_result.min)),
6452 vr_result.max) > 0)
6453 goto varying;
6454
6455 if (!needs_overflow_infinity (TREE_TYPE (vr_result.min))
6456 || !vrp_var_may_overflow (lhs, phi))
6457 vr_result.min = TYPE_MIN_VALUE (TREE_TYPE (vr_result.min));
6458 else if (supports_overflow_infinity (TREE_TYPE (vr_result.min)))
6459 vr_result.min =
6460 negative_overflow_infinity (TREE_TYPE (vr_result.min));
6461 else
6462 goto varying;
6463 }
6464
6465 /* Similarly, if the new maximum is smaller or larger than
6466 the previous one, go all the way to +INF. */
6467 if (cmp_max < 0 || cmp_max > 0)
6468 {
6469 /* If we will end up with a (-INF, +INF) range, set it to
6470 VARYING. Same if the previous min value was invalid for
6471 the type and we'd end up with vr_result.max < vr_result.min. */
6472 if (vrp_val_is_min (vr_result.min)
6473 || compare_values (TYPE_MAX_VALUE (TREE_TYPE (vr_result.max)),
6474 vr_result.min) < 0)
6475 goto varying;
6476
6477 if (!needs_overflow_infinity (TREE_TYPE (vr_result.max))
6478 || !vrp_var_may_overflow (lhs, phi))
6479 vr_result.max = TYPE_MAX_VALUE (TREE_TYPE (vr_result.max));
6480 else if (supports_overflow_infinity (TREE_TYPE (vr_result.max)))
6481 vr_result.max =
6482 positive_overflow_infinity (TREE_TYPE (vr_result.max));
6483 else
6484 goto varying;
6485 }
6486 }
6487 }
6488
6489 /* If the new range is different than the previous value, keep
6490 iterating. */
6491 if (update_value_range (lhs, &vr_result))
6492 {
6493 if (dump_file && (dump_flags & TDF_DETAILS))
6494 {
6495 fprintf (dump_file, "Found new range for ");
6496 print_generic_expr (dump_file, lhs, 0);
6497 fprintf (dump_file, ": ");
6498 dump_value_range (dump_file, &vr_result);
6499 fprintf (dump_file, "\n\n");
6500 }
6501
6502 return SSA_PROP_INTERESTING;
6503 }
6504
6505 /* Nothing changed, don't add outgoing edges. */
6506 return SSA_PROP_NOT_INTERESTING;
6507
6508 /* No match found. Set the LHS to VARYING. */
6509 varying:
6510 set_value_range_to_varying (lhs_vr);
6511 return SSA_PROP_VARYING;
6512 }
6513
6514 /* Simplify boolean operations if the source is known
6515 to be already a boolean. */
6516 static bool
6517 simplify_truth_ops_using_ranges (gimple_stmt_iterator *gsi, gimple stmt)
6518 {
6519 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
6520 tree val = NULL;
6521 tree op0, op1;
6522 value_range_t *vr;
6523 bool sop = false;
6524 bool need_conversion;
6525
6526 op0 = gimple_assign_rhs1 (stmt);
6527 if (TYPE_PRECISION (TREE_TYPE (op0)) != 1)
6528 {
6529 if (TREE_CODE (op0) != SSA_NAME)
6530 return false;
6531 vr = get_value_range (op0);
6532
6533 val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, &sop);
6534 if (!val || !integer_onep (val))
6535 return false;
6536
6537 val = compare_range_with_value (LE_EXPR, vr, integer_one_node, &sop);
6538 if (!val || !integer_onep (val))
6539 return false;
6540 }
6541
6542 if (rhs_code == TRUTH_NOT_EXPR)
6543 {
6544 rhs_code = NE_EXPR;
6545 op1 = build_int_cst (TREE_TYPE (op0), 1);
6546 }
6547 else
6548 {
6549 op1 = gimple_assign_rhs2 (stmt);
6550
6551 /* Reduce number of cases to handle. */
6552 if (is_gimple_min_invariant (op1))
6553 {
6554 /* Exclude anything that should have been already folded. */
6555 if (rhs_code != EQ_EXPR
6556 && rhs_code != NE_EXPR
6557 && rhs_code != TRUTH_XOR_EXPR)
6558 return false;
6559
6560 if (!integer_zerop (op1)
6561 && !integer_onep (op1)
6562 && !integer_all_onesp (op1))
6563 return false;
6564
6565 /* Limit the number of cases we have to consider. */
6566 if (rhs_code == EQ_EXPR)
6567 {
6568 rhs_code = NE_EXPR;
6569 op1 = fold_unary (TRUTH_NOT_EXPR, TREE_TYPE (op1), op1);
6570 }
6571 }
6572 else
6573 {
6574 /* Punt on A == B as there is no BIT_XNOR_EXPR. */
6575 if (rhs_code == EQ_EXPR)
6576 return false;
6577
6578 if (TYPE_PRECISION (TREE_TYPE (op1)) != 1)
6579 {
6580 vr = get_value_range (op1);
6581 val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, &sop);
6582 if (!val || !integer_onep (val))
6583 return false;
6584
6585 val = compare_range_with_value (LE_EXPR, vr, integer_one_node, &sop);
6586 if (!val || !integer_onep (val))
6587 return false;
6588 }
6589 }
6590 }
6591
6592 if (sop && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
6593 {
6594 location_t location;
6595
6596 if (!gimple_has_location (stmt))
6597 location = input_location;
6598 else
6599 location = gimple_location (stmt);
6600
6601 if (rhs_code == TRUTH_AND_EXPR || rhs_code == TRUTH_OR_EXPR)
6602 warning_at (location, OPT_Wstrict_overflow,
6603 _("assuming signed overflow does not occur when "
6604 "simplifying && or || to & or |"));
6605 else
6606 warning_at (location, OPT_Wstrict_overflow,
6607 _("assuming signed overflow does not occur when "
6608 "simplifying ==, != or ! to identity or ^"));
6609 }
6610
6611 need_conversion =
6612 !useless_type_conversion_p (TREE_TYPE (gimple_assign_lhs (stmt)),
6613 TREE_TYPE (op0));
6614
6615 /* Make sure to not sign-extend -1 as a boolean value. */
6616 if (need_conversion
6617 && !TYPE_UNSIGNED (TREE_TYPE (op0))
6618 && TYPE_PRECISION (TREE_TYPE (op0)) == 1)
6619 return false;
6620
6621 switch (rhs_code)
6622 {
6623 case TRUTH_AND_EXPR:
6624 rhs_code = BIT_AND_EXPR;
6625 break;
6626 case TRUTH_OR_EXPR:
6627 rhs_code = BIT_IOR_EXPR;
6628 break;
6629 case TRUTH_XOR_EXPR:
6630 case NE_EXPR:
6631 if (integer_zerop (op1))
6632 {
6633 gimple_assign_set_rhs_with_ops (gsi,
6634 need_conversion ? NOP_EXPR : SSA_NAME,
6635 op0, NULL);
6636 update_stmt (gsi_stmt (*gsi));
6637 return true;
6638 }
6639
6640 rhs_code = BIT_XOR_EXPR;
6641 break;
6642 default:
6643 gcc_unreachable ();
6644 }
6645
6646 if (need_conversion)
6647 return false;
6648
6649 gimple_assign_set_rhs_with_ops (gsi, rhs_code, op0, op1);
6650 update_stmt (gsi_stmt (*gsi));
6651 return true;
6652 }
6653
6654 /* Simplify a division or modulo operator to a right shift or
6655 bitwise and if the first operand is unsigned or is greater
6656 than zero and the second operand is an exact power of two. */
6657
6658 static bool
6659 simplify_div_or_mod_using_ranges (gimple stmt)
6660 {
6661 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
6662 tree val = NULL;
6663 tree op0 = gimple_assign_rhs1 (stmt);
6664 tree op1 = gimple_assign_rhs2 (stmt);
6665 value_range_t *vr = get_value_range (gimple_assign_rhs1 (stmt));
6666
6667 if (TYPE_UNSIGNED (TREE_TYPE (op0)))
6668 {
6669 val = integer_one_node;
6670 }
6671 else
6672 {
6673 bool sop = false;
6674
6675 val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, &sop);
6676
6677 if (val
6678 && sop
6679 && integer_onep (val)
6680 && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
6681 {
6682 location_t location;
6683
6684 if (!gimple_has_location (stmt))
6685 location = input_location;
6686 else
6687 location = gimple_location (stmt);
6688 warning_at (location, OPT_Wstrict_overflow,
6689 "assuming signed overflow does not occur when "
6690 "simplifying %</%> or %<%%%> to %<>>%> or %<&%>");
6691 }
6692 }
6693
6694 if (val && integer_onep (val))
6695 {
6696 tree t;
6697
6698 if (rhs_code == TRUNC_DIV_EXPR)
6699 {
6700 t = build_int_cst (NULL_TREE, tree_log2 (op1));
6701 gimple_assign_set_rhs_code (stmt, RSHIFT_EXPR);
6702 gimple_assign_set_rhs1 (stmt, op0);
6703 gimple_assign_set_rhs2 (stmt, t);
6704 }
6705 else
6706 {
6707 t = build_int_cst (TREE_TYPE (op1), 1);
6708 t = int_const_binop (MINUS_EXPR, op1, t, 0);
6709 t = fold_convert (TREE_TYPE (op0), t);
6710
6711 gimple_assign_set_rhs_code (stmt, BIT_AND_EXPR);
6712 gimple_assign_set_rhs1 (stmt, op0);
6713 gimple_assign_set_rhs2 (stmt, t);
6714 }
6715
6716 update_stmt (stmt);
6717 return true;
6718 }
6719
6720 return false;
6721 }
6722
6723 /* If the operand to an ABS_EXPR is >= 0, then eliminate the
6724 ABS_EXPR. If the operand is <= 0, then simplify the
6725 ABS_EXPR into a NEGATE_EXPR. */
6726
6727 static bool
6728 simplify_abs_using_ranges (gimple stmt)
6729 {
6730 tree val = NULL;
6731 tree op = gimple_assign_rhs1 (stmt);
6732 tree type = TREE_TYPE (op);
6733 value_range_t *vr = get_value_range (op);
6734
6735 if (TYPE_UNSIGNED (type))
6736 {
6737 val = integer_zero_node;
6738 }
6739 else if (vr)
6740 {
6741 bool sop = false;
6742
6743 val = compare_range_with_value (LE_EXPR, vr, integer_zero_node, &sop);
6744 if (!val)
6745 {
6746 sop = false;
6747 val = compare_range_with_value (GE_EXPR, vr, integer_zero_node,
6748 &sop);
6749
6750 if (val)
6751 {
6752 if (integer_zerop (val))
6753 val = integer_one_node;
6754 else if (integer_onep (val))
6755 val = integer_zero_node;
6756 }
6757 }
6758
6759 if (val
6760 && (integer_onep (val) || integer_zerop (val)))
6761 {
6762 if (sop && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
6763 {
6764 location_t location;
6765
6766 if (!gimple_has_location (stmt))
6767 location = input_location;
6768 else
6769 location = gimple_location (stmt);
6770 warning_at (location, OPT_Wstrict_overflow,
6771 "assuming signed overflow does not occur when "
6772 "simplifying %<abs (X)%> to %<X%> or %<-X%>");
6773 }
6774
6775 gimple_assign_set_rhs1 (stmt, op);
6776 if (integer_onep (val))
6777 gimple_assign_set_rhs_code (stmt, NEGATE_EXPR);
6778 else
6779 gimple_assign_set_rhs_code (stmt, SSA_NAME);
6780 update_stmt (stmt);
6781 return true;
6782 }
6783 }
6784
6785 return false;
6786 }
6787
6788 /* We are comparing trees OP0 and OP1 using COND_CODE. OP0 has
6789 a known value range VR.
6790
6791 If there is one and only one value which will satisfy the
6792 conditional, then return that value. Else return NULL. */
6793
6794 static tree
6795 test_for_singularity (enum tree_code cond_code, tree op0,
6796 tree op1, value_range_t *vr)
6797 {
6798 tree min = NULL;
6799 tree max = NULL;
6800
6801 /* Extract minimum/maximum values which satisfy the
6802 the conditional as it was written. */
6803 if (cond_code == LE_EXPR || cond_code == LT_EXPR)
6804 {
6805 /* This should not be negative infinity; there is no overflow
6806 here. */
6807 min = TYPE_MIN_VALUE (TREE_TYPE (op0));
6808
6809 max = op1;
6810 if (cond_code == LT_EXPR && !is_overflow_infinity (max))
6811 {
6812 tree one = build_int_cst (TREE_TYPE (op0), 1);
6813 max = fold_build2 (MINUS_EXPR, TREE_TYPE (op0), max, one);
6814 if (EXPR_P (max))
6815 TREE_NO_WARNING (max) = 1;
6816 }
6817 }
6818 else if (cond_code == GE_EXPR || cond_code == GT_EXPR)
6819 {
6820 /* This should not be positive infinity; there is no overflow
6821 here. */
6822 max = TYPE_MAX_VALUE (TREE_TYPE (op0));
6823
6824 min = op1;
6825 if (cond_code == GT_EXPR && !is_overflow_infinity (min))
6826 {
6827 tree one = build_int_cst (TREE_TYPE (op0), 1);
6828 min = fold_build2 (PLUS_EXPR, TREE_TYPE (op0), min, one);
6829 if (EXPR_P (min))
6830 TREE_NO_WARNING (min) = 1;
6831 }
6832 }
6833
6834 /* Now refine the minimum and maximum values using any
6835 value range information we have for op0. */
6836 if (min && max)
6837 {
6838 if (compare_values (vr->min, min) == 1)
6839 min = vr->min;
6840 if (compare_values (vr->max, max) == -1)
6841 max = vr->max;
6842
6843 /* If the new min/max values have converged to a single value,
6844 then there is only one value which can satisfy the condition,
6845 return that value. */
6846 if (operand_equal_p (min, max, 0) && is_gimple_min_invariant (min))
6847 return min;
6848 }
6849 return NULL;
6850 }
6851
6852 /* Simplify a conditional using a relational operator to an equality
6853 test if the range information indicates only one value can satisfy
6854 the original conditional. */
6855
6856 static bool
6857 simplify_cond_using_ranges (gimple stmt)
6858 {
6859 tree op0 = gimple_cond_lhs (stmt);
6860 tree op1 = gimple_cond_rhs (stmt);
6861 enum tree_code cond_code = gimple_cond_code (stmt);
6862
6863 if (cond_code != NE_EXPR
6864 && cond_code != EQ_EXPR
6865 && TREE_CODE (op0) == SSA_NAME
6866 && INTEGRAL_TYPE_P (TREE_TYPE (op0))
6867 && is_gimple_min_invariant (op1))
6868 {
6869 value_range_t *vr = get_value_range (op0);
6870
6871 /* If we have range information for OP0, then we might be
6872 able to simplify this conditional. */
6873 if (vr->type == VR_RANGE)
6874 {
6875 tree new_tree = test_for_singularity (cond_code, op0, op1, vr);
6876
6877 if (new_tree)
6878 {
6879 if (dump_file)
6880 {
6881 fprintf (dump_file, "Simplified relational ");
6882 print_gimple_stmt (dump_file, stmt, 0, 0);
6883 fprintf (dump_file, " into ");
6884 }
6885
6886 gimple_cond_set_code (stmt, EQ_EXPR);
6887 gimple_cond_set_lhs (stmt, op0);
6888 gimple_cond_set_rhs (stmt, new_tree);
6889
6890 update_stmt (stmt);
6891
6892 if (dump_file)
6893 {
6894 print_gimple_stmt (dump_file, stmt, 0, 0);
6895 fprintf (dump_file, "\n");
6896 }
6897
6898 return true;
6899 }
6900
6901 /* Try again after inverting the condition. We only deal
6902 with integral types here, so no need to worry about
6903 issues with inverting FP comparisons. */
6904 cond_code = invert_tree_comparison (cond_code, false);
6905 new_tree = test_for_singularity (cond_code, op0, op1, vr);
6906
6907 if (new_tree)
6908 {
6909 if (dump_file)
6910 {
6911 fprintf (dump_file, "Simplified relational ");
6912 print_gimple_stmt (dump_file, stmt, 0, 0);
6913 fprintf (dump_file, " into ");
6914 }
6915
6916 gimple_cond_set_code (stmt, NE_EXPR);
6917 gimple_cond_set_lhs (stmt, op0);
6918 gimple_cond_set_rhs (stmt, new_tree);
6919
6920 update_stmt (stmt);
6921
6922 if (dump_file)
6923 {
6924 print_gimple_stmt (dump_file, stmt, 0, 0);
6925 fprintf (dump_file, "\n");
6926 }
6927
6928 return true;
6929 }
6930 }
6931 }
6932
6933 return false;
6934 }
6935
6936 /* Simplify a switch statement using the value range of the switch
6937 argument. */
6938
6939 static bool
6940 simplify_switch_using_ranges (gimple stmt)
6941 {
6942 tree op = gimple_switch_index (stmt);
6943 value_range_t *vr;
6944 bool take_default;
6945 edge e;
6946 edge_iterator ei;
6947 size_t i = 0, j = 0, n, n2;
6948 tree vec2;
6949 switch_update su;
6950
6951 if (TREE_CODE (op) == SSA_NAME)
6952 {
6953 vr = get_value_range (op);
6954
6955 /* We can only handle integer ranges. */
6956 if (vr->type != VR_RANGE
6957 || symbolic_range_p (vr))
6958 return false;
6959
6960 /* Find case label for min/max of the value range. */
6961 take_default = !find_case_label_range (stmt, vr->min, vr->max, &i, &j);
6962 }
6963 else if (TREE_CODE (op) == INTEGER_CST)
6964 {
6965 take_default = !find_case_label_index (stmt, 1, op, &i);
6966 if (take_default)
6967 {
6968 i = 1;
6969 j = 0;
6970 }
6971 else
6972 {
6973 j = i;
6974 }
6975 }
6976 else
6977 return false;
6978
6979 n = gimple_switch_num_labels (stmt);
6980
6981 /* Bail out if this is just all edges taken. */
6982 if (i == 1
6983 && j == n - 1
6984 && take_default)
6985 return false;
6986
6987 /* Build a new vector of taken case labels. */
6988 vec2 = make_tree_vec (j - i + 1 + (int)take_default);
6989 n2 = 0;
6990
6991 /* Add the default edge, if necessary. */
6992 if (take_default)
6993 TREE_VEC_ELT (vec2, n2++) = gimple_switch_default_label (stmt);
6994
6995 for (; i <= j; ++i, ++n2)
6996 TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, i);
6997
6998 /* Mark needed edges. */
6999 for (i = 0; i < n2; ++i)
7000 {
7001 e = find_edge (gimple_bb (stmt),
7002 label_to_block (CASE_LABEL (TREE_VEC_ELT (vec2, i))));
7003 e->aux = (void *)-1;
7004 }
7005
7006 /* Queue not needed edges for later removal. */
7007 FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
7008 {
7009 if (e->aux == (void *)-1)
7010 {
7011 e->aux = NULL;
7012 continue;
7013 }
7014
7015 if (dump_file && (dump_flags & TDF_DETAILS))
7016 {
7017 fprintf (dump_file, "removing unreachable case label\n");
7018 }
7019 VEC_safe_push (edge, heap, to_remove_edges, e);
7020 e->flags &= ~EDGE_EXECUTABLE;
7021 }
7022
7023 /* And queue an update for the stmt. */
7024 su.stmt = stmt;
7025 su.vec = vec2;
7026 VEC_safe_push (switch_update, heap, to_update_switch_stmts, &su);
7027 return false;
7028 }
7029
7030 /* Simplify STMT using ranges if possible. */
7031
7032 static bool
7033 simplify_stmt_using_ranges (gimple_stmt_iterator *gsi)
7034 {
7035 gimple stmt = gsi_stmt (*gsi);
7036 if (is_gimple_assign (stmt))
7037 {
7038 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
7039
7040 switch (rhs_code)
7041 {
7042 case EQ_EXPR:
7043 case NE_EXPR:
7044 case TRUTH_NOT_EXPR:
7045 case TRUTH_AND_EXPR:
7046 case TRUTH_OR_EXPR:
7047 case TRUTH_XOR_EXPR:
7048 /* Transform EQ_EXPR, NE_EXPR, TRUTH_NOT_EXPR into BIT_XOR_EXPR
7049 or identity if the RHS is zero or one, and the LHS are known
7050 to be boolean values. Transform all TRUTH_*_EXPR into
7051 BIT_*_EXPR if both arguments are known to be boolean values. */
7052 if (INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_rhs1 (stmt))))
7053 return simplify_truth_ops_using_ranges (gsi, stmt);
7054 break;
7055
7056 /* Transform TRUNC_DIV_EXPR and TRUNC_MOD_EXPR into RSHIFT_EXPR
7057 and BIT_AND_EXPR respectively if the first operand is greater
7058 than zero and the second operand is an exact power of two. */
7059 case TRUNC_DIV_EXPR:
7060 case TRUNC_MOD_EXPR:
7061 if (INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_rhs1 (stmt)))
7062 && integer_pow2p (gimple_assign_rhs2 (stmt)))
7063 return simplify_div_or_mod_using_ranges (stmt);
7064 break;
7065
7066 /* Transform ABS (X) into X or -X as appropriate. */
7067 case ABS_EXPR:
7068 if (TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME
7069 && INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_rhs1 (stmt))))
7070 return simplify_abs_using_ranges (stmt);
7071 break;
7072
7073 default:
7074 break;
7075 }
7076 }
7077 else if (gimple_code (stmt) == GIMPLE_COND)
7078 return simplify_cond_using_ranges (stmt);
7079 else if (gimple_code (stmt) == GIMPLE_SWITCH)
7080 return simplify_switch_using_ranges (stmt);
7081
7082 return false;
7083 }
7084
7085 /* If the statement pointed by SI has a predicate whose value can be
7086 computed using the value range information computed by VRP, compute
7087 its value and return true. Otherwise, return false. */
7088
7089 static bool
7090 fold_predicate_in (gimple_stmt_iterator *si)
7091 {
7092 bool assignment_p = false;
7093 tree val;
7094 gimple stmt = gsi_stmt (*si);
7095
7096 if (is_gimple_assign (stmt)
7097 && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison)
7098 {
7099 assignment_p = true;
7100 val = vrp_evaluate_conditional (gimple_assign_rhs_code (stmt),
7101 gimple_assign_rhs1 (stmt),
7102 gimple_assign_rhs2 (stmt),
7103 stmt);
7104 }
7105 else if (gimple_code (stmt) == GIMPLE_COND)
7106 val = vrp_evaluate_conditional (gimple_cond_code (stmt),
7107 gimple_cond_lhs (stmt),
7108 gimple_cond_rhs (stmt),
7109 stmt);
7110 else
7111 return false;
7112
7113 if (val)
7114 {
7115 if (assignment_p)
7116 val = fold_convert (gimple_expr_type (stmt), val);
7117
7118 if (dump_file)
7119 {
7120 fprintf (dump_file, "Folding predicate ");
7121 print_gimple_expr (dump_file, stmt, 0, 0);
7122 fprintf (dump_file, " to ");
7123 print_generic_expr (dump_file, val, 0);
7124 fprintf (dump_file, "\n");
7125 }
7126
7127 if (is_gimple_assign (stmt))
7128 gimple_assign_set_rhs_from_tree (si, val);
7129 else
7130 {
7131 gcc_assert (gimple_code (stmt) == GIMPLE_COND);
7132 if (integer_zerop (val))
7133 gimple_cond_make_false (stmt);
7134 else if (integer_onep (val))
7135 gimple_cond_make_true (stmt);
7136 else
7137 gcc_unreachable ();
7138 }
7139
7140 return true;
7141 }
7142
7143 return false;
7144 }
7145
7146 /* Callback for substitute_and_fold folding the stmt at *SI. */
7147
7148 static bool
7149 vrp_fold_stmt (gimple_stmt_iterator *si)
7150 {
7151 if (fold_predicate_in (si))
7152 return true;
7153
7154 return simplify_stmt_using_ranges (si);
7155 }
7156
7157 /* Stack of dest,src equivalency pairs that need to be restored after
7158 each attempt to thread a block's incoming edge to an outgoing edge.
7159
7160 A NULL entry is used to mark the end of pairs which need to be
7161 restored. */
7162 static VEC(tree,heap) *stack;
7163
7164 /* A trivial wrapper so that we can present the generic jump threading
7165 code with a simple API for simplifying statements. STMT is the
7166 statement we want to simplify, WITHIN_STMT provides the location
7167 for any overflow warnings. */
7168
7169 static tree
7170 simplify_stmt_for_jump_threading (gimple stmt, gimple within_stmt)
7171 {
7172 /* We only use VRP information to simplify conditionals. This is
7173 overly conservative, but it's unclear if doing more would be
7174 worth the compile time cost. */
7175 if (gimple_code (stmt) != GIMPLE_COND)
7176 return NULL;
7177
7178 return vrp_evaluate_conditional (gimple_cond_code (stmt),
7179 gimple_cond_lhs (stmt),
7180 gimple_cond_rhs (stmt), within_stmt);
7181 }
7182
7183 /* Blocks which have more than one predecessor and more than
7184 one successor present jump threading opportunities, i.e.,
7185 when the block is reached from a specific predecessor, we
7186 may be able to determine which of the outgoing edges will
7187 be traversed. When this optimization applies, we are able
7188 to avoid conditionals at runtime and we may expose secondary
7189 optimization opportunities.
7190
7191 This routine is effectively a driver for the generic jump
7192 threading code. It basically just presents the generic code
7193 with edges that may be suitable for jump threading.
7194
7195 Unlike DOM, we do not iterate VRP if jump threading was successful.
7196 While iterating may expose new opportunities for VRP, it is expected
7197 those opportunities would be very limited and the compile time cost
7198 to expose those opportunities would be significant.
7199
7200 As jump threading opportunities are discovered, they are registered
7201 for later realization. */
7202
7203 static void
7204 identify_jump_threads (void)
7205 {
7206 basic_block bb;
7207 gimple dummy;
7208 int i;
7209 edge e;
7210
7211 /* Ugh. When substituting values earlier in this pass we can
7212 wipe the dominance information. So rebuild the dominator
7213 information as we need it within the jump threading code. */
7214 calculate_dominance_info (CDI_DOMINATORS);
7215
7216 /* We do not allow VRP information to be used for jump threading
7217 across a back edge in the CFG. Otherwise it becomes too
7218 difficult to avoid eliminating loop exit tests. Of course
7219 EDGE_DFS_BACK is not accurate at this time so we have to
7220 recompute it. */
7221 mark_dfs_back_edges ();
7222
7223 /* Do not thread across edges we are about to remove. Just marking
7224 them as EDGE_DFS_BACK will do. */
7225 for (i = 0; VEC_iterate (edge, to_remove_edges, i, e); ++i)
7226 e->flags |= EDGE_DFS_BACK;
7227
7228 /* Allocate our unwinder stack to unwind any temporary equivalences
7229 that might be recorded. */
7230 stack = VEC_alloc (tree, heap, 20);
7231
7232 /* To avoid lots of silly node creation, we create a single
7233 conditional and just modify it in-place when attempting to
7234 thread jumps. */
7235 dummy = gimple_build_cond (EQ_EXPR,
7236 integer_zero_node, integer_zero_node,
7237 NULL, NULL);
7238
7239 /* Walk through all the blocks finding those which present a
7240 potential jump threading opportunity. We could set this up
7241 as a dominator walker and record data during the walk, but
7242 I doubt it's worth the effort for the classes of jump
7243 threading opportunities we are trying to identify at this
7244 point in compilation. */
7245 FOR_EACH_BB (bb)
7246 {
7247 gimple last;
7248
7249 /* If the generic jump threading code does not find this block
7250 interesting, then there is nothing to do. */
7251 if (! potentially_threadable_block (bb))
7252 continue;
7253
7254 /* We only care about blocks ending in a COND_EXPR. While there
7255 may be some value in handling SWITCH_EXPR here, I doubt it's
7256 terribly important. */
7257 last = gsi_stmt (gsi_last_bb (bb));
7258 if (gimple_code (last) != GIMPLE_COND)
7259 continue;
7260
7261 /* We're basically looking for any kind of conditional with
7262 integral type arguments. */
7263 if (TREE_CODE (gimple_cond_lhs (last)) == SSA_NAME
7264 && INTEGRAL_TYPE_P (TREE_TYPE (gimple_cond_lhs (last)))
7265 && (TREE_CODE (gimple_cond_rhs (last)) == SSA_NAME
7266 || is_gimple_min_invariant (gimple_cond_rhs (last)))
7267 && INTEGRAL_TYPE_P (TREE_TYPE (gimple_cond_rhs (last))))
7268 {
7269 edge_iterator ei;
7270
7271 /* We've got a block with multiple predecessors and multiple
7272 successors which also ends in a suitable conditional. For
7273 each predecessor, see if we can thread it to a specific
7274 successor. */
7275 FOR_EACH_EDGE (e, ei, bb->preds)
7276 {
7277 /* Do not thread across back edges or abnormal edges
7278 in the CFG. */
7279 if (e->flags & (EDGE_DFS_BACK | EDGE_COMPLEX))
7280 continue;
7281
7282 thread_across_edge (dummy, e, true, &stack,
7283 simplify_stmt_for_jump_threading);
7284 }
7285 }
7286 }
7287
7288 /* We do not actually update the CFG or SSA graphs at this point as
7289 ASSERT_EXPRs are still in the IL and cfg cleanup code does not yet
7290 handle ASSERT_EXPRs gracefully. */
7291 }
7292
7293 /* We identified all the jump threading opportunities earlier, but could
7294 not transform the CFG at that time. This routine transforms the
7295 CFG and arranges for the dominator tree to be rebuilt if necessary.
7296
7297 Note the SSA graph update will occur during the normal TODO
7298 processing by the pass manager. */
7299 static void
7300 finalize_jump_threads (void)
7301 {
7302 thread_through_all_blocks (false);
7303 VEC_free (tree, heap, stack);
7304 }
7305
7306
7307 /* Traverse all the blocks folding conditionals with known ranges. */
7308
7309 static void
7310 vrp_finalize (void)
7311 {
7312 size_t i;
7313 prop_value_t *single_val_range;
7314 bool do_value_subst_p;
7315
7316 if (dump_file)
7317 {
7318 fprintf (dump_file, "\nValue ranges after VRP:\n\n");
7319 dump_all_value_ranges (dump_file);
7320 fprintf (dump_file, "\n");
7321 }
7322
7323 /* We may have ended with ranges that have exactly one value. Those
7324 values can be substituted as any other const propagated
7325 value using substitute_and_fold. */
7326 single_val_range = XCNEWVEC (prop_value_t, num_ssa_names);
7327
7328 do_value_subst_p = false;
7329 for (i = 0; i < num_ssa_names; i++)
7330 if (vr_value[i]
7331 && vr_value[i]->type == VR_RANGE
7332 && vr_value[i]->min == vr_value[i]->max
7333 && is_gimple_min_invariant (vr_value[i]->min))
7334 {
7335 single_val_range[i].value = vr_value[i]->min;
7336 do_value_subst_p = true;
7337 }
7338
7339 if (!do_value_subst_p)
7340 {
7341 /* We found no single-valued ranges, don't waste time trying to
7342 do single value substitution in substitute_and_fold. */
7343 free (single_val_range);
7344 single_val_range = NULL;
7345 }
7346
7347 substitute_and_fold (single_val_range, vrp_fold_stmt);
7348
7349 if (warn_array_bounds)
7350 check_all_array_refs ();
7351
7352 /* We must identify jump threading opportunities before we release
7353 the datastructures built by VRP. */
7354 identify_jump_threads ();
7355
7356 /* Free allocated memory. */
7357 for (i = 0; i < num_ssa_names; i++)
7358 if (vr_value[i])
7359 {
7360 BITMAP_FREE (vr_value[i]->equiv);
7361 free (vr_value[i]);
7362 }
7363
7364 free (single_val_range);
7365 free (vr_value);
7366 free (vr_phi_edge_counts);
7367
7368 /* So that we can distinguish between VRP data being available
7369 and not available. */
7370 vr_value = NULL;
7371 vr_phi_edge_counts = NULL;
7372 }
7373
7374
7375 /* Main entry point to VRP (Value Range Propagation). This pass is
7376 loosely based on J. R. C. Patterson, ``Accurate Static Branch
7377 Prediction by Value Range Propagation,'' in SIGPLAN Conference on
7378 Programming Language Design and Implementation, pp. 67-78, 1995.
7379 Also available at http://citeseer.ist.psu.edu/patterson95accurate.html
7380
7381 This is essentially an SSA-CCP pass modified to deal with ranges
7382 instead of constants.
7383
7384 While propagating ranges, we may find that two or more SSA name
7385 have equivalent, though distinct ranges. For instance,
7386
7387 1 x_9 = p_3->a;
7388 2 p_4 = ASSERT_EXPR <p_3, p_3 != 0>
7389 3 if (p_4 == q_2)
7390 4 p_5 = ASSERT_EXPR <p_4, p_4 == q_2>;
7391 5 endif
7392 6 if (q_2)
7393
7394 In the code above, pointer p_5 has range [q_2, q_2], but from the
7395 code we can also determine that p_5 cannot be NULL and, if q_2 had
7396 a non-varying range, p_5's range should also be compatible with it.
7397
7398 These equivalences are created by two expressions: ASSERT_EXPR and
7399 copy operations. Since p_5 is an assertion on p_4, and p_4 was the
7400 result of another assertion, then we can use the fact that p_5 and
7401 p_4 are equivalent when evaluating p_5's range.
7402
7403 Together with value ranges, we also propagate these equivalences
7404 between names so that we can take advantage of information from
7405 multiple ranges when doing final replacement. Note that this
7406 equivalency relation is transitive but not symmetric.
7407
7408 In the example above, p_5 is equivalent to p_4, q_2 and p_3, but we
7409 cannot assert that q_2 is equivalent to p_5 because q_2 may be used
7410 in contexts where that assertion does not hold (e.g., in line 6).
7411
7412 TODO, the main difference between this pass and Patterson's is that
7413 we do not propagate edge probabilities. We only compute whether
7414 edges can be taken or not. That is, instead of having a spectrum
7415 of jump probabilities between 0 and 1, we only deal with 0, 1 and
7416 DON'T KNOW. In the future, it may be worthwhile to propagate
7417 probabilities to aid branch prediction. */
7418
7419 static unsigned int
7420 execute_vrp (void)
7421 {
7422 int i;
7423 edge e;
7424 switch_update *su;
7425
7426 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
7427 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
7428 scev_initialize ();
7429
7430 insert_range_assertions ();
7431
7432 to_remove_edges = VEC_alloc (edge, heap, 10);
7433 to_update_switch_stmts = VEC_alloc (switch_update, heap, 5);
7434 threadedge_initialize_values ();
7435
7436 vrp_initialize ();
7437 ssa_propagate (vrp_visit_stmt, vrp_visit_phi_node);
7438 vrp_finalize ();
7439
7440 /* ASSERT_EXPRs must be removed before finalizing jump threads
7441 as finalizing jump threads calls the CFG cleanup code which
7442 does not properly handle ASSERT_EXPRs. */
7443 remove_range_assertions ();
7444
7445 /* If we exposed any new variables, go ahead and put them into
7446 SSA form now, before we handle jump threading. This simplifies
7447 interactions between rewriting of _DECL nodes into SSA form
7448 and rewriting SSA_NAME nodes into SSA form after block
7449 duplication and CFG manipulation. */
7450 update_ssa (TODO_update_ssa);
7451
7452 finalize_jump_threads ();
7453
7454 /* Remove dead edges from SWITCH_EXPR optimization. This leaves the
7455 CFG in a broken state and requires a cfg_cleanup run. */
7456 for (i = 0; VEC_iterate (edge, to_remove_edges, i, e); ++i)
7457 remove_edge (e);
7458 /* Update SWITCH_EXPR case label vector. */
7459 for (i = 0; VEC_iterate (switch_update, to_update_switch_stmts, i, su); ++i)
7460 {
7461 size_t j;
7462 size_t n = TREE_VEC_LENGTH (su->vec);
7463 tree label;
7464 gimple_switch_set_num_labels (su->stmt, n);
7465 for (j = 0; j < n; j++)
7466 gimple_switch_set_label (su->stmt, j, TREE_VEC_ELT (su->vec, j));
7467 /* As we may have replaced the default label with a regular one
7468 make sure to make it a real default label again. This ensures
7469 optimal expansion. */
7470 label = gimple_switch_default_label (su->stmt);
7471 CASE_LOW (label) = NULL_TREE;
7472 CASE_HIGH (label) = NULL_TREE;
7473 }
7474
7475 if (VEC_length (edge, to_remove_edges) > 0)
7476 free_dominance_info (CDI_DOMINATORS);
7477
7478 VEC_free (edge, heap, to_remove_edges);
7479 VEC_free (switch_update, heap, to_update_switch_stmts);
7480 threadedge_finalize_values ();
7481
7482 scev_finalize ();
7483 loop_optimizer_finalize ();
7484 return 0;
7485 }
7486
7487 static bool
7488 gate_vrp (void)
7489 {
7490 return flag_tree_vrp != 0;
7491 }
7492
7493 struct gimple_opt_pass pass_vrp =
7494 {
7495 {
7496 GIMPLE_PASS,
7497 "vrp", /* name */
7498 gate_vrp, /* gate */
7499 execute_vrp, /* execute */
7500 NULL, /* sub */
7501 NULL, /* next */
7502 0, /* static_pass_number */
7503 TV_TREE_VRP, /* tv_id */
7504 PROP_ssa, /* properties_required */
7505 0, /* properties_provided */
7506 0, /* properties_destroyed */
7507 0, /* todo_flags_start */
7508 TODO_cleanup_cfg
7509 | TODO_ggc_collect
7510 | TODO_verify_ssa
7511 | TODO_dump_func
7512 | TODO_update_ssa /* todo_flags_finish */
7513 }
7514 };