tree-ssa-structalias.c (find_func_aliases): Handle pointer alignment via BIT_AND_EXPR.
[gcc.git] / gcc / tree-vrp.c
1 /* Support routines for Value Range Propagation (VRP).
2 Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
4 Contributed by Diego Novillo <dnovillo@redhat.com>.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
12
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "ggc.h"
27 #include "flags.h"
28 #include "tree.h"
29 #include "basic-block.h"
30 #include "tree-flow.h"
31 #include "tree-pass.h"
32 #include "tree-dump.h"
33 #include "timevar.h"
34 #include "tree-pretty-print.h"
35 #include "gimple-pretty-print.h"
36 #include "toplev.h"
37 #include "intl.h"
38 #include "cfgloop.h"
39 #include "tree-scalar-evolution.h"
40 #include "tree-ssa-propagate.h"
41 #include "tree-chrec.h"
42
43
44 /* Set of SSA names found live during the RPO traversal of the function
45 for still active basic-blocks. */
46 static sbitmap *live;
47
48 /* Return true if the SSA name NAME is live on the edge E. */
49
50 static bool
51 live_on_edge (edge e, tree name)
52 {
53 return (live[e->dest->index]
54 && TEST_BIT (live[e->dest->index], SSA_NAME_VERSION (name)));
55 }
56
57 /* Local functions. */
58 static int compare_values (tree val1, tree val2);
59 static int compare_values_warnv (tree val1, tree val2, bool *);
60 static void vrp_meet (value_range_t *, value_range_t *);
61 static tree vrp_evaluate_conditional_warnv_with_ops (enum tree_code,
62 tree, tree, bool, bool *,
63 bool *);
64
65 /* Location information for ASSERT_EXPRs. Each instance of this
66 structure describes an ASSERT_EXPR for an SSA name. Since a single
67 SSA name may have more than one assertion associated with it, these
68 locations are kept in a linked list attached to the corresponding
69 SSA name. */
70 struct assert_locus_d
71 {
72 /* Basic block where the assertion would be inserted. */
73 basic_block bb;
74
75 /* Some assertions need to be inserted on an edge (e.g., assertions
76 generated by COND_EXPRs). In those cases, BB will be NULL. */
77 edge e;
78
79 /* Pointer to the statement that generated this assertion. */
80 gimple_stmt_iterator si;
81
82 /* Predicate code for the ASSERT_EXPR. Must be COMPARISON_CLASS_P. */
83 enum tree_code comp_code;
84
85 /* Value being compared against. */
86 tree val;
87
88 /* Expression to compare. */
89 tree expr;
90
91 /* Next node in the linked list. */
92 struct assert_locus_d *next;
93 };
94
95 typedef struct assert_locus_d *assert_locus_t;
96
97 /* If bit I is present, it means that SSA name N_i has a list of
98 assertions that should be inserted in the IL. */
99 static bitmap need_assert_for;
100
101 /* Array of locations lists where to insert assertions. ASSERTS_FOR[I]
102 holds a list of ASSERT_LOCUS_T nodes that describe where
103 ASSERT_EXPRs for SSA name N_I should be inserted. */
104 static assert_locus_t *asserts_for;
105
106 /* Value range array. After propagation, VR_VALUE[I] holds the range
107 of values that SSA name N_I may take. */
108 static value_range_t **vr_value;
109
110 /* For a PHI node which sets SSA name N_I, VR_COUNTS[I] holds the
111 number of executable edges we saw the last time we visited the
112 node. */
113 static int *vr_phi_edge_counts;
114
115 typedef struct {
116 gimple stmt;
117 tree vec;
118 } switch_update;
119
120 static VEC (edge, heap) *to_remove_edges;
121 DEF_VEC_O(switch_update);
122 DEF_VEC_ALLOC_O(switch_update, heap);
123 static VEC (switch_update, heap) *to_update_switch_stmts;
124
125
126 /* Return the maximum value for TYPE. */
127
128 static inline tree
129 vrp_val_max (const_tree type)
130 {
131 if (!INTEGRAL_TYPE_P (type))
132 return NULL_TREE;
133
134 return TYPE_MAX_VALUE (type);
135 }
136
137 /* Return the minimum value for TYPE. */
138
139 static inline tree
140 vrp_val_min (const_tree type)
141 {
142 if (!INTEGRAL_TYPE_P (type))
143 return NULL_TREE;
144
145 return TYPE_MIN_VALUE (type);
146 }
147
148 /* Return whether VAL is equal to the maximum value of its type. This
149 will be true for a positive overflow infinity. We can't do a
150 simple equality comparison with TYPE_MAX_VALUE because C typedefs
151 and Ada subtypes can produce types whose TYPE_MAX_VALUE is not ==
152 to the integer constant with the same value in the type. */
153
154 static inline bool
155 vrp_val_is_max (const_tree val)
156 {
157 tree type_max = vrp_val_max (TREE_TYPE (val));
158 return (val == type_max
159 || (type_max != NULL_TREE
160 && operand_equal_p (val, type_max, 0)));
161 }
162
163 /* Return whether VAL is equal to the minimum value of its type. This
164 will be true for a negative overflow infinity. */
165
166 static inline bool
167 vrp_val_is_min (const_tree val)
168 {
169 tree type_min = vrp_val_min (TREE_TYPE (val));
170 return (val == type_min
171 || (type_min != NULL_TREE
172 && operand_equal_p (val, type_min, 0)));
173 }
174
175
176 /* Return whether TYPE should use an overflow infinity distinct from
177 TYPE_{MIN,MAX}_VALUE. We use an overflow infinity value to
178 represent a signed overflow during VRP computations. An infinity
179 is distinct from a half-range, which will go from some number to
180 TYPE_{MIN,MAX}_VALUE. */
181
182 static inline bool
183 needs_overflow_infinity (const_tree type)
184 {
185 return INTEGRAL_TYPE_P (type) && !TYPE_OVERFLOW_WRAPS (type);
186 }
187
188 /* Return whether TYPE can support our overflow infinity
189 representation: we use the TREE_OVERFLOW flag, which only exists
190 for constants. If TYPE doesn't support this, we don't optimize
191 cases which would require signed overflow--we drop them to
192 VARYING. */
193
194 static inline bool
195 supports_overflow_infinity (const_tree type)
196 {
197 tree min = vrp_val_min (type), max = vrp_val_max (type);
198 #ifdef ENABLE_CHECKING
199 gcc_assert (needs_overflow_infinity (type));
200 #endif
201 return (min != NULL_TREE
202 && CONSTANT_CLASS_P (min)
203 && max != NULL_TREE
204 && CONSTANT_CLASS_P (max));
205 }
206
207 /* VAL is the maximum or minimum value of a type. Return a
208 corresponding overflow infinity. */
209
210 static inline tree
211 make_overflow_infinity (tree val)
212 {
213 #ifdef ENABLE_CHECKING
214 gcc_assert (val != NULL_TREE && CONSTANT_CLASS_P (val));
215 #endif
216 val = copy_node (val);
217 TREE_OVERFLOW (val) = 1;
218 return val;
219 }
220
221 /* Return a negative overflow infinity for TYPE. */
222
223 static inline tree
224 negative_overflow_infinity (tree type)
225 {
226 #ifdef ENABLE_CHECKING
227 gcc_assert (supports_overflow_infinity (type));
228 #endif
229 return make_overflow_infinity (vrp_val_min (type));
230 }
231
232 /* Return a positive overflow infinity for TYPE. */
233
234 static inline tree
235 positive_overflow_infinity (tree type)
236 {
237 #ifdef ENABLE_CHECKING
238 gcc_assert (supports_overflow_infinity (type));
239 #endif
240 return make_overflow_infinity (vrp_val_max (type));
241 }
242
243 /* Return whether VAL is a negative overflow infinity. */
244
245 static inline bool
246 is_negative_overflow_infinity (const_tree val)
247 {
248 return (needs_overflow_infinity (TREE_TYPE (val))
249 && CONSTANT_CLASS_P (val)
250 && TREE_OVERFLOW (val)
251 && vrp_val_is_min (val));
252 }
253
254 /* Return whether VAL is a positive overflow infinity. */
255
256 static inline bool
257 is_positive_overflow_infinity (const_tree val)
258 {
259 return (needs_overflow_infinity (TREE_TYPE (val))
260 && CONSTANT_CLASS_P (val)
261 && TREE_OVERFLOW (val)
262 && vrp_val_is_max (val));
263 }
264
265 /* Return whether VAL is a positive or negative overflow infinity. */
266
267 static inline bool
268 is_overflow_infinity (const_tree val)
269 {
270 return (needs_overflow_infinity (TREE_TYPE (val))
271 && CONSTANT_CLASS_P (val)
272 && TREE_OVERFLOW (val)
273 && (vrp_val_is_min (val) || vrp_val_is_max (val)));
274 }
275
276 /* Return whether STMT has a constant rhs that is_overflow_infinity. */
277
278 static inline bool
279 stmt_overflow_infinity (gimple stmt)
280 {
281 if (is_gimple_assign (stmt)
282 && get_gimple_rhs_class (gimple_assign_rhs_code (stmt)) ==
283 GIMPLE_SINGLE_RHS)
284 return is_overflow_infinity (gimple_assign_rhs1 (stmt));
285 return false;
286 }
287
288 /* If VAL is now an overflow infinity, return VAL. Otherwise, return
289 the same value with TREE_OVERFLOW clear. This can be used to avoid
290 confusing a regular value with an overflow value. */
291
292 static inline tree
293 avoid_overflow_infinity (tree val)
294 {
295 if (!is_overflow_infinity (val))
296 return val;
297
298 if (vrp_val_is_max (val))
299 return vrp_val_max (TREE_TYPE (val));
300 else
301 {
302 #ifdef ENABLE_CHECKING
303 gcc_assert (vrp_val_is_min (val));
304 #endif
305 return vrp_val_min (TREE_TYPE (val));
306 }
307 }
308
309
310 /* Return true if ARG is marked with the nonnull attribute in the
311 current function signature. */
312
313 static bool
314 nonnull_arg_p (const_tree arg)
315 {
316 tree t, attrs, fntype;
317 unsigned HOST_WIDE_INT arg_num;
318
319 gcc_assert (TREE_CODE (arg) == PARM_DECL && POINTER_TYPE_P (TREE_TYPE (arg)));
320
321 /* The static chain decl is always non null. */
322 if (arg == cfun->static_chain_decl)
323 return true;
324
325 fntype = TREE_TYPE (current_function_decl);
326 attrs = lookup_attribute ("nonnull", TYPE_ATTRIBUTES (fntype));
327
328 /* If "nonnull" wasn't specified, we know nothing about the argument. */
329 if (attrs == NULL_TREE)
330 return false;
331
332 /* If "nonnull" applies to all the arguments, then ARG is non-null. */
333 if (TREE_VALUE (attrs) == NULL_TREE)
334 return true;
335
336 /* Get the position number for ARG in the function signature. */
337 for (arg_num = 1, t = DECL_ARGUMENTS (current_function_decl);
338 t;
339 t = TREE_CHAIN (t), arg_num++)
340 {
341 if (t == arg)
342 break;
343 }
344
345 gcc_assert (t == arg);
346
347 /* Now see if ARG_NUM is mentioned in the nonnull list. */
348 for (t = TREE_VALUE (attrs); t; t = TREE_CHAIN (t))
349 {
350 if (compare_tree_int (TREE_VALUE (t), arg_num) == 0)
351 return true;
352 }
353
354 return false;
355 }
356
357
358 /* Set value range VR to VR_VARYING. */
359
360 static inline void
361 set_value_range_to_varying (value_range_t *vr)
362 {
363 vr->type = VR_VARYING;
364 vr->min = vr->max = NULL_TREE;
365 if (vr->equiv)
366 bitmap_clear (vr->equiv);
367 }
368
369
370 /* Set value range VR to {T, MIN, MAX, EQUIV}. */
371
372 static void
373 set_value_range (value_range_t *vr, enum value_range_type t, tree min,
374 tree max, bitmap equiv)
375 {
376 #if defined ENABLE_CHECKING
377 /* Check the validity of the range. */
378 if (t == VR_RANGE || t == VR_ANTI_RANGE)
379 {
380 int cmp;
381
382 gcc_assert (min && max);
383
384 if (INTEGRAL_TYPE_P (TREE_TYPE (min)) && t == VR_ANTI_RANGE)
385 gcc_assert (!vrp_val_is_min (min) || !vrp_val_is_max (max));
386
387 cmp = compare_values (min, max);
388 gcc_assert (cmp == 0 || cmp == -1 || cmp == -2);
389
390 if (needs_overflow_infinity (TREE_TYPE (min)))
391 gcc_assert (!is_overflow_infinity (min)
392 || !is_overflow_infinity (max));
393 }
394
395 if (t == VR_UNDEFINED || t == VR_VARYING)
396 gcc_assert (min == NULL_TREE && max == NULL_TREE);
397
398 if (t == VR_UNDEFINED || t == VR_VARYING)
399 gcc_assert (equiv == NULL || bitmap_empty_p (equiv));
400 #endif
401
402 vr->type = t;
403 vr->min = min;
404 vr->max = max;
405
406 /* Since updating the equivalence set involves deep copying the
407 bitmaps, only do it if absolutely necessary. */
408 if (vr->equiv == NULL
409 && equiv != NULL)
410 vr->equiv = BITMAP_ALLOC (NULL);
411
412 if (equiv != vr->equiv)
413 {
414 if (equiv && !bitmap_empty_p (equiv))
415 bitmap_copy (vr->equiv, equiv);
416 else
417 bitmap_clear (vr->equiv);
418 }
419 }
420
421
422 /* Set value range VR to the canonical form of {T, MIN, MAX, EQUIV}.
423 This means adjusting T, MIN and MAX representing the case of a
424 wrapping range with MAX < MIN covering [MIN, type_max] U [type_min, MAX]
425 as anti-rage ~[MAX+1, MIN-1]. Likewise for wrapping anti-ranges.
426 In corner cases where MAX+1 or MIN-1 wraps this will fall back
427 to varying.
428 This routine exists to ease canonicalization in the case where we
429 extract ranges from var + CST op limit. */
430
431 static void
432 set_and_canonicalize_value_range (value_range_t *vr, enum value_range_type t,
433 tree min, tree max, bitmap equiv)
434 {
435 /* Nothing to canonicalize for symbolic or unknown or varying ranges. */
436 if ((t != VR_RANGE
437 && t != VR_ANTI_RANGE)
438 || TREE_CODE (min) != INTEGER_CST
439 || TREE_CODE (max) != INTEGER_CST)
440 {
441 set_value_range (vr, t, min, max, equiv);
442 return;
443 }
444
445 /* Wrong order for min and max, to swap them and the VR type we need
446 to adjust them. */
447 if (tree_int_cst_lt (max, min))
448 {
449 tree one = build_int_cst (TREE_TYPE (min), 1);
450 tree tmp = int_const_binop (PLUS_EXPR, max, one, 0);
451 max = int_const_binop (MINUS_EXPR, min, one, 0);
452 min = tmp;
453
454 /* There's one corner case, if we had [C+1, C] before we now have
455 that again. But this represents an empty value range, so drop
456 to varying in this case. */
457 if (tree_int_cst_lt (max, min))
458 {
459 set_value_range_to_varying (vr);
460 return;
461 }
462
463 t = t == VR_RANGE ? VR_ANTI_RANGE : VR_RANGE;
464 }
465
466 /* Anti-ranges that can be represented as ranges should be so. */
467 if (t == VR_ANTI_RANGE)
468 {
469 bool is_min = vrp_val_is_min (min);
470 bool is_max = vrp_val_is_max (max);
471
472 if (is_min && is_max)
473 {
474 /* We cannot deal with empty ranges, drop to varying. */
475 set_value_range_to_varying (vr);
476 return;
477 }
478 else if (is_min
479 /* As a special exception preserve non-null ranges. */
480 && !(TYPE_UNSIGNED (TREE_TYPE (min))
481 && integer_zerop (max)))
482 {
483 tree one = build_int_cst (TREE_TYPE (max), 1);
484 min = int_const_binop (PLUS_EXPR, max, one, 0);
485 max = vrp_val_max (TREE_TYPE (max));
486 t = VR_RANGE;
487 }
488 else if (is_max)
489 {
490 tree one = build_int_cst (TREE_TYPE (min), 1);
491 max = int_const_binop (MINUS_EXPR, min, one, 0);
492 min = vrp_val_min (TREE_TYPE (min));
493 t = VR_RANGE;
494 }
495 }
496
497 set_value_range (vr, t, min, max, equiv);
498 }
499
500 /* Copy value range FROM into value range TO. */
501
502 static inline void
503 copy_value_range (value_range_t *to, value_range_t *from)
504 {
505 set_value_range (to, from->type, from->min, from->max, from->equiv);
506 }
507
508 /* Set value range VR to a single value. This function is only called
509 with values we get from statements, and exists to clear the
510 TREE_OVERFLOW flag so that we don't think we have an overflow
511 infinity when we shouldn't. */
512
513 static inline void
514 set_value_range_to_value (value_range_t *vr, tree val, bitmap equiv)
515 {
516 gcc_assert (is_gimple_min_invariant (val));
517 val = avoid_overflow_infinity (val);
518 set_value_range (vr, VR_RANGE, val, val, equiv);
519 }
520
521 /* Set value range VR to a non-negative range of type TYPE.
522 OVERFLOW_INFINITY indicates whether to use an overflow infinity
523 rather than TYPE_MAX_VALUE; this should be true if we determine
524 that the range is nonnegative based on the assumption that signed
525 overflow does not occur. */
526
527 static inline void
528 set_value_range_to_nonnegative (value_range_t *vr, tree type,
529 bool overflow_infinity)
530 {
531 tree zero;
532
533 if (overflow_infinity && !supports_overflow_infinity (type))
534 {
535 set_value_range_to_varying (vr);
536 return;
537 }
538
539 zero = build_int_cst (type, 0);
540 set_value_range (vr, VR_RANGE, zero,
541 (overflow_infinity
542 ? positive_overflow_infinity (type)
543 : TYPE_MAX_VALUE (type)),
544 vr->equiv);
545 }
546
547 /* Set value range VR to a non-NULL range of type TYPE. */
548
549 static inline void
550 set_value_range_to_nonnull (value_range_t *vr, tree type)
551 {
552 tree zero = build_int_cst (type, 0);
553 set_value_range (vr, VR_ANTI_RANGE, zero, zero, vr->equiv);
554 }
555
556
557 /* Set value range VR to a NULL range of type TYPE. */
558
559 static inline void
560 set_value_range_to_null (value_range_t *vr, tree type)
561 {
562 set_value_range_to_value (vr, build_int_cst (type, 0), vr->equiv);
563 }
564
565
566 /* Set value range VR to a range of a truthvalue of type TYPE. */
567
568 static inline void
569 set_value_range_to_truthvalue (value_range_t *vr, tree type)
570 {
571 if (TYPE_PRECISION (type) == 1)
572 set_value_range_to_varying (vr);
573 else
574 set_value_range (vr, VR_RANGE,
575 build_int_cst (type, 0), build_int_cst (type, 1),
576 vr->equiv);
577 }
578
579
580 /* Set value range VR to VR_UNDEFINED. */
581
582 static inline void
583 set_value_range_to_undefined (value_range_t *vr)
584 {
585 vr->type = VR_UNDEFINED;
586 vr->min = vr->max = NULL_TREE;
587 if (vr->equiv)
588 bitmap_clear (vr->equiv);
589 }
590
591
592 /* If abs (min) < abs (max), set VR to [-max, max], if
593 abs (min) >= abs (max), set VR to [-min, min]. */
594
595 static void
596 abs_extent_range (value_range_t *vr, tree min, tree max)
597 {
598 int cmp;
599
600 gcc_assert (TREE_CODE (min) == INTEGER_CST);
601 gcc_assert (TREE_CODE (max) == INTEGER_CST);
602 gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (min)));
603 gcc_assert (!TYPE_UNSIGNED (TREE_TYPE (min)));
604 min = fold_unary (ABS_EXPR, TREE_TYPE (min), min);
605 max = fold_unary (ABS_EXPR, TREE_TYPE (max), max);
606 if (TREE_OVERFLOW (min) || TREE_OVERFLOW (max))
607 {
608 set_value_range_to_varying (vr);
609 return;
610 }
611 cmp = compare_values (min, max);
612 if (cmp == -1)
613 min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), max);
614 else if (cmp == 0 || cmp == 1)
615 {
616 max = min;
617 min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), min);
618 }
619 else
620 {
621 set_value_range_to_varying (vr);
622 return;
623 }
624 set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL);
625 }
626
627
628 /* Return value range information for VAR.
629
630 If we have no values ranges recorded (ie, VRP is not running), then
631 return NULL. Otherwise create an empty range if none existed for VAR. */
632
633 static value_range_t *
634 get_value_range (const_tree var)
635 {
636 value_range_t *vr;
637 tree sym;
638 unsigned ver = SSA_NAME_VERSION (var);
639
640 /* If we have no recorded ranges, then return NULL. */
641 if (! vr_value)
642 return NULL;
643
644 vr = vr_value[ver];
645 if (vr)
646 return vr;
647
648 /* Create a default value range. */
649 vr_value[ver] = vr = XCNEW (value_range_t);
650
651 /* Defer allocating the equivalence set. */
652 vr->equiv = NULL;
653
654 /* If VAR is a default definition, the variable can take any value
655 in VAR's type. */
656 sym = SSA_NAME_VAR (var);
657 if (SSA_NAME_IS_DEFAULT_DEF (var))
658 {
659 /* Try to use the "nonnull" attribute to create ~[0, 0]
660 anti-ranges for pointers. Note that this is only valid with
661 default definitions of PARM_DECLs. */
662 if (TREE_CODE (sym) == PARM_DECL
663 && POINTER_TYPE_P (TREE_TYPE (sym))
664 && nonnull_arg_p (sym))
665 set_value_range_to_nonnull (vr, TREE_TYPE (sym));
666 else
667 set_value_range_to_varying (vr);
668 }
669
670 return vr;
671 }
672
673 /* Return true, if VAL1 and VAL2 are equal values for VRP purposes. */
674
675 static inline bool
676 vrp_operand_equal_p (const_tree val1, const_tree val2)
677 {
678 if (val1 == val2)
679 return true;
680 if (!val1 || !val2 || !operand_equal_p (val1, val2, 0))
681 return false;
682 if (is_overflow_infinity (val1))
683 return is_overflow_infinity (val2);
684 return true;
685 }
686
687 /* Return true, if the bitmaps B1 and B2 are equal. */
688
689 static inline bool
690 vrp_bitmap_equal_p (const_bitmap b1, const_bitmap b2)
691 {
692 return (b1 == b2
693 || (b1 && b2
694 && bitmap_equal_p (b1, b2)));
695 }
696
697 /* Update the value range and equivalence set for variable VAR to
698 NEW_VR. Return true if NEW_VR is different from VAR's previous
699 value.
700
701 NOTE: This function assumes that NEW_VR is a temporary value range
702 object created for the sole purpose of updating VAR's range. The
703 storage used by the equivalence set from NEW_VR will be freed by
704 this function. Do not call update_value_range when NEW_VR
705 is the range object associated with another SSA name. */
706
707 static inline bool
708 update_value_range (const_tree var, value_range_t *new_vr)
709 {
710 value_range_t *old_vr;
711 bool is_new;
712
713 /* Update the value range, if necessary. */
714 old_vr = get_value_range (var);
715 is_new = old_vr->type != new_vr->type
716 || !vrp_operand_equal_p (old_vr->min, new_vr->min)
717 || !vrp_operand_equal_p (old_vr->max, new_vr->max)
718 || !vrp_bitmap_equal_p (old_vr->equiv, new_vr->equiv);
719
720 if (is_new)
721 set_value_range (old_vr, new_vr->type, new_vr->min, new_vr->max,
722 new_vr->equiv);
723
724 BITMAP_FREE (new_vr->equiv);
725
726 return is_new;
727 }
728
729
730 /* Add VAR and VAR's equivalence set to EQUIV. This is the central
731 point where equivalence processing can be turned on/off. */
732
733 static void
734 add_equivalence (bitmap *equiv, const_tree var)
735 {
736 unsigned ver = SSA_NAME_VERSION (var);
737 value_range_t *vr = vr_value[ver];
738
739 if (*equiv == NULL)
740 *equiv = BITMAP_ALLOC (NULL);
741 bitmap_set_bit (*equiv, ver);
742 if (vr && vr->equiv)
743 bitmap_ior_into (*equiv, vr->equiv);
744 }
745
746
747 /* Return true if VR is ~[0, 0]. */
748
749 static inline bool
750 range_is_nonnull (value_range_t *vr)
751 {
752 return vr->type == VR_ANTI_RANGE
753 && integer_zerop (vr->min)
754 && integer_zerop (vr->max);
755 }
756
757
758 /* Return true if VR is [0, 0]. */
759
760 static inline bool
761 range_is_null (value_range_t *vr)
762 {
763 return vr->type == VR_RANGE
764 && integer_zerop (vr->min)
765 && integer_zerop (vr->max);
766 }
767
768 /* Return true if max and min of VR are INTEGER_CST. It's not necessary
769 a singleton. */
770
771 static inline bool
772 range_int_cst_p (value_range_t *vr)
773 {
774 return (vr->type == VR_RANGE
775 && TREE_CODE (vr->max) == INTEGER_CST
776 && TREE_CODE (vr->min) == INTEGER_CST
777 && !TREE_OVERFLOW (vr->max)
778 && !TREE_OVERFLOW (vr->min));
779 }
780
781 /* Return true if VR is a INTEGER_CST singleton. */
782
783 static inline bool
784 range_int_cst_singleton_p (value_range_t *vr)
785 {
786 return (range_int_cst_p (vr)
787 && tree_int_cst_equal (vr->min, vr->max));
788 }
789
790 /* Return true if value range VR involves at least one symbol. */
791
792 static inline bool
793 symbolic_range_p (value_range_t *vr)
794 {
795 return (!is_gimple_min_invariant (vr->min)
796 || !is_gimple_min_invariant (vr->max));
797 }
798
799 /* Return true if value range VR uses an overflow infinity. */
800
801 static inline bool
802 overflow_infinity_range_p (value_range_t *vr)
803 {
804 return (vr->type == VR_RANGE
805 && (is_overflow_infinity (vr->min)
806 || is_overflow_infinity (vr->max)));
807 }
808
809 /* Return false if we can not make a valid comparison based on VR;
810 this will be the case if it uses an overflow infinity and overflow
811 is not undefined (i.e., -fno-strict-overflow is in effect).
812 Otherwise return true, and set *STRICT_OVERFLOW_P to true if VR
813 uses an overflow infinity. */
814
815 static bool
816 usable_range_p (value_range_t *vr, bool *strict_overflow_p)
817 {
818 gcc_assert (vr->type == VR_RANGE);
819 if (is_overflow_infinity (vr->min))
820 {
821 *strict_overflow_p = true;
822 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr->min)))
823 return false;
824 }
825 if (is_overflow_infinity (vr->max))
826 {
827 *strict_overflow_p = true;
828 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr->max)))
829 return false;
830 }
831 return true;
832 }
833
834
835 /* Like tree_expr_nonnegative_warnv_p, but this function uses value
836 ranges obtained so far. */
837
838 static bool
839 vrp_expr_computes_nonnegative (tree expr, bool *strict_overflow_p)
840 {
841 return (tree_expr_nonnegative_warnv_p (expr, strict_overflow_p)
842 || (TREE_CODE (expr) == SSA_NAME
843 && ssa_name_nonnegative_p (expr)));
844 }
845
846 /* Return true if the result of assignment STMT is know to be non-negative.
847 If the return value is based on the assumption that signed overflow is
848 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
849 *STRICT_OVERFLOW_P.*/
850
851 static bool
852 gimple_assign_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p)
853 {
854 enum tree_code code = gimple_assign_rhs_code (stmt);
855 switch (get_gimple_rhs_class (code))
856 {
857 case GIMPLE_UNARY_RHS:
858 return tree_unary_nonnegative_warnv_p (gimple_assign_rhs_code (stmt),
859 gimple_expr_type (stmt),
860 gimple_assign_rhs1 (stmt),
861 strict_overflow_p);
862 case GIMPLE_BINARY_RHS:
863 return tree_binary_nonnegative_warnv_p (gimple_assign_rhs_code (stmt),
864 gimple_expr_type (stmt),
865 gimple_assign_rhs1 (stmt),
866 gimple_assign_rhs2 (stmt),
867 strict_overflow_p);
868 case GIMPLE_TERNARY_RHS:
869 return false;
870 case GIMPLE_SINGLE_RHS:
871 return tree_single_nonnegative_warnv_p (gimple_assign_rhs1 (stmt),
872 strict_overflow_p);
873 case GIMPLE_INVALID_RHS:
874 gcc_unreachable ();
875 default:
876 gcc_unreachable ();
877 }
878 }
879
880 /* Return true if return value of call STMT is know to be non-negative.
881 If the return value is based on the assumption that signed overflow is
882 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
883 *STRICT_OVERFLOW_P.*/
884
885 static bool
886 gimple_call_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p)
887 {
888 tree arg0 = gimple_call_num_args (stmt) > 0 ?
889 gimple_call_arg (stmt, 0) : NULL_TREE;
890 tree arg1 = gimple_call_num_args (stmt) > 1 ?
891 gimple_call_arg (stmt, 1) : NULL_TREE;
892
893 return tree_call_nonnegative_warnv_p (gimple_expr_type (stmt),
894 gimple_call_fndecl (stmt),
895 arg0,
896 arg1,
897 strict_overflow_p);
898 }
899
900 /* Return true if STMT is know to to compute a non-negative value.
901 If the return value is based on the assumption that signed overflow is
902 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
903 *STRICT_OVERFLOW_P.*/
904
905 static bool
906 gimple_stmt_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p)
907 {
908 switch (gimple_code (stmt))
909 {
910 case GIMPLE_ASSIGN:
911 return gimple_assign_nonnegative_warnv_p (stmt, strict_overflow_p);
912 case GIMPLE_CALL:
913 return gimple_call_nonnegative_warnv_p (stmt, strict_overflow_p);
914 default:
915 gcc_unreachable ();
916 }
917 }
918
919 /* Return true if the result of assignment STMT is know to be non-zero.
920 If the return value is based on the assumption that signed overflow is
921 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
922 *STRICT_OVERFLOW_P.*/
923
924 static bool
925 gimple_assign_nonzero_warnv_p (gimple stmt, bool *strict_overflow_p)
926 {
927 enum tree_code code = gimple_assign_rhs_code (stmt);
928 switch (get_gimple_rhs_class (code))
929 {
930 case GIMPLE_UNARY_RHS:
931 return tree_unary_nonzero_warnv_p (gimple_assign_rhs_code (stmt),
932 gimple_expr_type (stmt),
933 gimple_assign_rhs1 (stmt),
934 strict_overflow_p);
935 case GIMPLE_BINARY_RHS:
936 return tree_binary_nonzero_warnv_p (gimple_assign_rhs_code (stmt),
937 gimple_expr_type (stmt),
938 gimple_assign_rhs1 (stmt),
939 gimple_assign_rhs2 (stmt),
940 strict_overflow_p);
941 case GIMPLE_TERNARY_RHS:
942 return false;
943 case GIMPLE_SINGLE_RHS:
944 return tree_single_nonzero_warnv_p (gimple_assign_rhs1 (stmt),
945 strict_overflow_p);
946 case GIMPLE_INVALID_RHS:
947 gcc_unreachable ();
948 default:
949 gcc_unreachable ();
950 }
951 }
952
953 /* Return true if STMT is know to to compute a non-zero value.
954 If the return value is based on the assumption that signed overflow is
955 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
956 *STRICT_OVERFLOW_P.*/
957
958 static bool
959 gimple_stmt_nonzero_warnv_p (gimple stmt, bool *strict_overflow_p)
960 {
961 switch (gimple_code (stmt))
962 {
963 case GIMPLE_ASSIGN:
964 return gimple_assign_nonzero_warnv_p (stmt, strict_overflow_p);
965 case GIMPLE_CALL:
966 return gimple_alloca_call_p (stmt);
967 default:
968 gcc_unreachable ();
969 }
970 }
971
972 /* Like tree_expr_nonzero_warnv_p, but this function uses value ranges
973 obtained so far. */
974
975 static bool
976 vrp_stmt_computes_nonzero (gimple stmt, bool *strict_overflow_p)
977 {
978 if (gimple_stmt_nonzero_warnv_p (stmt, strict_overflow_p))
979 return true;
980
981 /* If we have an expression of the form &X->a, then the expression
982 is nonnull if X is nonnull. */
983 if (is_gimple_assign (stmt)
984 && gimple_assign_rhs_code (stmt) == ADDR_EXPR)
985 {
986 tree expr = gimple_assign_rhs1 (stmt);
987 tree base = get_base_address (TREE_OPERAND (expr, 0));
988
989 if (base != NULL_TREE
990 && TREE_CODE (base) == MEM_REF
991 && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME)
992 {
993 value_range_t *vr = get_value_range (TREE_OPERAND (base, 0));
994 if (range_is_nonnull (vr))
995 return true;
996 }
997 }
998
999 return false;
1000 }
1001
1002 /* Returns true if EXPR is a valid value (as expected by compare_values) --
1003 a gimple invariant, or SSA_NAME +- CST. */
1004
1005 static bool
1006 valid_value_p (tree expr)
1007 {
1008 if (TREE_CODE (expr) == SSA_NAME)
1009 return true;
1010
1011 if (TREE_CODE (expr) == PLUS_EXPR
1012 || TREE_CODE (expr) == MINUS_EXPR)
1013 return (TREE_CODE (TREE_OPERAND (expr, 0)) == SSA_NAME
1014 && TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST);
1015
1016 return is_gimple_min_invariant (expr);
1017 }
1018
1019 /* Return
1020 1 if VAL < VAL2
1021 0 if !(VAL < VAL2)
1022 -2 if those are incomparable. */
1023 static inline int
1024 operand_less_p (tree val, tree val2)
1025 {
1026 /* LT is folded faster than GE and others. Inline the common case. */
1027 if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
1028 {
1029 if (TYPE_UNSIGNED (TREE_TYPE (val)))
1030 return INT_CST_LT_UNSIGNED (val, val2);
1031 else
1032 {
1033 if (INT_CST_LT (val, val2))
1034 return 1;
1035 }
1036 }
1037 else
1038 {
1039 tree tcmp;
1040
1041 fold_defer_overflow_warnings ();
1042
1043 tcmp = fold_binary_to_constant (LT_EXPR, boolean_type_node, val, val2);
1044
1045 fold_undefer_and_ignore_overflow_warnings ();
1046
1047 if (!tcmp
1048 || TREE_CODE (tcmp) != INTEGER_CST)
1049 return -2;
1050
1051 if (!integer_zerop (tcmp))
1052 return 1;
1053 }
1054
1055 /* val >= val2, not considering overflow infinity. */
1056 if (is_negative_overflow_infinity (val))
1057 return is_negative_overflow_infinity (val2) ? 0 : 1;
1058 else if (is_positive_overflow_infinity (val2))
1059 return is_positive_overflow_infinity (val) ? 0 : 1;
1060
1061 return 0;
1062 }
1063
1064 /* Compare two values VAL1 and VAL2. Return
1065
1066 -2 if VAL1 and VAL2 cannot be compared at compile-time,
1067 -1 if VAL1 < VAL2,
1068 0 if VAL1 == VAL2,
1069 +1 if VAL1 > VAL2, and
1070 +2 if VAL1 != VAL2
1071
1072 This is similar to tree_int_cst_compare but supports pointer values
1073 and values that cannot be compared at compile time.
1074
1075 If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to
1076 true if the return value is only valid if we assume that signed
1077 overflow is undefined. */
1078
1079 static int
1080 compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p)
1081 {
1082 if (val1 == val2)
1083 return 0;
1084
1085 /* Below we rely on the fact that VAL1 and VAL2 are both pointers or
1086 both integers. */
1087 gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1))
1088 == POINTER_TYPE_P (TREE_TYPE (val2)));
1089 /* Convert the two values into the same type. This is needed because
1090 sizetype causes sign extension even for unsigned types. */
1091 val2 = fold_convert (TREE_TYPE (val1), val2);
1092 STRIP_USELESS_TYPE_CONVERSION (val2);
1093
1094 if ((TREE_CODE (val1) == SSA_NAME
1095 || TREE_CODE (val1) == PLUS_EXPR
1096 || TREE_CODE (val1) == MINUS_EXPR)
1097 && (TREE_CODE (val2) == SSA_NAME
1098 || TREE_CODE (val2) == PLUS_EXPR
1099 || TREE_CODE (val2) == MINUS_EXPR))
1100 {
1101 tree n1, c1, n2, c2;
1102 enum tree_code code1, code2;
1103
1104 /* If VAL1 and VAL2 are of the form 'NAME [+-] CST' or 'NAME',
1105 return -1 or +1 accordingly. If VAL1 and VAL2 don't use the
1106 same name, return -2. */
1107 if (TREE_CODE (val1) == SSA_NAME)
1108 {
1109 code1 = SSA_NAME;
1110 n1 = val1;
1111 c1 = NULL_TREE;
1112 }
1113 else
1114 {
1115 code1 = TREE_CODE (val1);
1116 n1 = TREE_OPERAND (val1, 0);
1117 c1 = TREE_OPERAND (val1, 1);
1118 if (tree_int_cst_sgn (c1) == -1)
1119 {
1120 if (is_negative_overflow_infinity (c1))
1121 return -2;
1122 c1 = fold_unary_to_constant (NEGATE_EXPR, TREE_TYPE (c1), c1);
1123 if (!c1)
1124 return -2;
1125 code1 = code1 == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR;
1126 }
1127 }
1128
1129 if (TREE_CODE (val2) == SSA_NAME)
1130 {
1131 code2 = SSA_NAME;
1132 n2 = val2;
1133 c2 = NULL_TREE;
1134 }
1135 else
1136 {
1137 code2 = TREE_CODE (val2);
1138 n2 = TREE_OPERAND (val2, 0);
1139 c2 = TREE_OPERAND (val2, 1);
1140 if (tree_int_cst_sgn (c2) == -1)
1141 {
1142 if (is_negative_overflow_infinity (c2))
1143 return -2;
1144 c2 = fold_unary_to_constant (NEGATE_EXPR, TREE_TYPE (c2), c2);
1145 if (!c2)
1146 return -2;
1147 code2 = code2 == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR;
1148 }
1149 }
1150
1151 /* Both values must use the same name. */
1152 if (n1 != n2)
1153 return -2;
1154
1155 if (code1 == SSA_NAME
1156 && code2 == SSA_NAME)
1157 /* NAME == NAME */
1158 return 0;
1159
1160 /* If overflow is defined we cannot simplify more. */
1161 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1)))
1162 return -2;
1163
1164 if (strict_overflow_p != NULL
1165 && (code1 == SSA_NAME || !TREE_NO_WARNING (val1))
1166 && (code2 == SSA_NAME || !TREE_NO_WARNING (val2)))
1167 *strict_overflow_p = true;
1168
1169 if (code1 == SSA_NAME)
1170 {
1171 if (code2 == PLUS_EXPR)
1172 /* NAME < NAME + CST */
1173 return -1;
1174 else if (code2 == MINUS_EXPR)
1175 /* NAME > NAME - CST */
1176 return 1;
1177 }
1178 else if (code1 == PLUS_EXPR)
1179 {
1180 if (code2 == SSA_NAME)
1181 /* NAME + CST > NAME */
1182 return 1;
1183 else if (code2 == PLUS_EXPR)
1184 /* NAME + CST1 > NAME + CST2, if CST1 > CST2 */
1185 return compare_values_warnv (c1, c2, strict_overflow_p);
1186 else if (code2 == MINUS_EXPR)
1187 /* NAME + CST1 > NAME - CST2 */
1188 return 1;
1189 }
1190 else if (code1 == MINUS_EXPR)
1191 {
1192 if (code2 == SSA_NAME)
1193 /* NAME - CST < NAME */
1194 return -1;
1195 else if (code2 == PLUS_EXPR)
1196 /* NAME - CST1 < NAME + CST2 */
1197 return -1;
1198 else if (code2 == MINUS_EXPR)
1199 /* NAME - CST1 > NAME - CST2, if CST1 < CST2. Notice that
1200 C1 and C2 are swapped in the call to compare_values. */
1201 return compare_values_warnv (c2, c1, strict_overflow_p);
1202 }
1203
1204 gcc_unreachable ();
1205 }
1206
1207 /* We cannot compare non-constants. */
1208 if (!is_gimple_min_invariant (val1) || !is_gimple_min_invariant (val2))
1209 return -2;
1210
1211 if (!POINTER_TYPE_P (TREE_TYPE (val1)))
1212 {
1213 /* We cannot compare overflowed values, except for overflow
1214 infinities. */
1215 if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2))
1216 {
1217 if (strict_overflow_p != NULL)
1218 *strict_overflow_p = true;
1219 if (is_negative_overflow_infinity (val1))
1220 return is_negative_overflow_infinity (val2) ? 0 : -1;
1221 else if (is_negative_overflow_infinity (val2))
1222 return 1;
1223 else if (is_positive_overflow_infinity (val1))
1224 return is_positive_overflow_infinity (val2) ? 0 : 1;
1225 else if (is_positive_overflow_infinity (val2))
1226 return -1;
1227 return -2;
1228 }
1229
1230 return tree_int_cst_compare (val1, val2);
1231 }
1232 else
1233 {
1234 tree t;
1235
1236 /* First see if VAL1 and VAL2 are not the same. */
1237 if (val1 == val2 || operand_equal_p (val1, val2, 0))
1238 return 0;
1239
1240 /* If VAL1 is a lower address than VAL2, return -1. */
1241 if (operand_less_p (val1, val2) == 1)
1242 return -1;
1243
1244 /* If VAL1 is a higher address than VAL2, return +1. */
1245 if (operand_less_p (val2, val1) == 1)
1246 return 1;
1247
1248 /* If VAL1 is different than VAL2, return +2.
1249 For integer constants we either have already returned -1 or 1
1250 or they are equivalent. We still might succeed in proving
1251 something about non-trivial operands. */
1252 if (TREE_CODE (val1) != INTEGER_CST
1253 || TREE_CODE (val2) != INTEGER_CST)
1254 {
1255 t = fold_binary_to_constant (NE_EXPR, boolean_type_node, val1, val2);
1256 if (t && integer_onep (t))
1257 return 2;
1258 }
1259
1260 return -2;
1261 }
1262 }
1263
1264 /* Compare values like compare_values_warnv, but treat comparisons of
1265 nonconstants which rely on undefined overflow as incomparable. */
1266
1267 static int
1268 compare_values (tree val1, tree val2)
1269 {
1270 bool sop;
1271 int ret;
1272
1273 sop = false;
1274 ret = compare_values_warnv (val1, val2, &sop);
1275 if (sop
1276 && (!is_gimple_min_invariant (val1) || !is_gimple_min_invariant (val2)))
1277 ret = -2;
1278 return ret;
1279 }
1280
1281
1282 /* Return 1 if VAL is inside value range VR (VR->MIN <= VAL <= VR->MAX),
1283 0 if VAL is not inside VR,
1284 -2 if we cannot tell either way.
1285
1286 FIXME, the current semantics of this functions are a bit quirky
1287 when taken in the context of VRP. In here we do not care
1288 about VR's type. If VR is the anti-range ~[3, 5] the call
1289 value_inside_range (4, VR) will return 1.
1290
1291 This is counter-intuitive in a strict sense, but the callers
1292 currently expect this. They are calling the function
1293 merely to determine whether VR->MIN <= VAL <= VR->MAX. The
1294 callers are applying the VR_RANGE/VR_ANTI_RANGE semantics
1295 themselves.
1296
1297 This also applies to value_ranges_intersect_p and
1298 range_includes_zero_p. The semantics of VR_RANGE and
1299 VR_ANTI_RANGE should be encoded here, but that also means
1300 adapting the users of these functions to the new semantics.
1301
1302 Benchmark compile/20001226-1.c compilation time after changing this
1303 function. */
1304
1305 static inline int
1306 value_inside_range (tree val, value_range_t * vr)
1307 {
1308 int cmp1, cmp2;
1309
1310 cmp1 = operand_less_p (val, vr->min);
1311 if (cmp1 == -2)
1312 return -2;
1313 if (cmp1 == 1)
1314 return 0;
1315
1316 cmp2 = operand_less_p (vr->max, val);
1317 if (cmp2 == -2)
1318 return -2;
1319
1320 return !cmp2;
1321 }
1322
1323
1324 /* Return true if value ranges VR0 and VR1 have a non-empty
1325 intersection.
1326
1327 Benchmark compile/20001226-1.c compilation time after changing this
1328 function.
1329 */
1330
1331 static inline bool
1332 value_ranges_intersect_p (value_range_t *vr0, value_range_t *vr1)
1333 {
1334 /* The value ranges do not intersect if the maximum of the first range is
1335 less than the minimum of the second range or vice versa.
1336 When those relations are unknown, we can't do any better. */
1337 if (operand_less_p (vr0->max, vr1->min) != 0)
1338 return false;
1339 if (operand_less_p (vr1->max, vr0->min) != 0)
1340 return false;
1341 return true;
1342 }
1343
1344
1345 /* Return true if VR includes the value zero, false otherwise. FIXME,
1346 currently this will return false for an anti-range like ~[-4, 3].
1347 This will be wrong when the semantics of value_inside_range are
1348 modified (currently the users of this function expect these
1349 semantics). */
1350
1351 static inline bool
1352 range_includes_zero_p (value_range_t *vr)
1353 {
1354 tree zero;
1355
1356 gcc_assert (vr->type != VR_UNDEFINED
1357 && vr->type != VR_VARYING
1358 && !symbolic_range_p (vr));
1359
1360 zero = build_int_cst (TREE_TYPE (vr->min), 0);
1361 return (value_inside_range (zero, vr) == 1);
1362 }
1363
1364 /* Return true if T, an SSA_NAME, is known to be nonnegative. Return
1365 false otherwise or if no value range information is available. */
1366
1367 bool
1368 ssa_name_nonnegative_p (const_tree t)
1369 {
1370 value_range_t *vr = get_value_range (t);
1371
1372 if (INTEGRAL_TYPE_P (t)
1373 && TYPE_UNSIGNED (t))
1374 return true;
1375
1376 if (!vr)
1377 return false;
1378
1379 /* Testing for VR_ANTI_RANGE is not useful here as any anti-range
1380 which would return a useful value should be encoded as a VR_RANGE. */
1381 if (vr->type == VR_RANGE)
1382 {
1383 int result = compare_values (vr->min, integer_zero_node);
1384
1385 return (result == 0 || result == 1);
1386 }
1387 return false;
1388 }
1389
1390 /* If OP has a value range with a single constant value return that,
1391 otherwise return NULL_TREE. This returns OP itself if OP is a
1392 constant. */
1393
1394 static tree
1395 op_with_constant_singleton_value_range (tree op)
1396 {
1397 value_range_t *vr;
1398
1399 if (is_gimple_min_invariant (op))
1400 return op;
1401
1402 if (TREE_CODE (op) != SSA_NAME)
1403 return NULL_TREE;
1404
1405 vr = get_value_range (op);
1406 if (vr->type == VR_RANGE
1407 && operand_equal_p (vr->min, vr->max, 0)
1408 && is_gimple_min_invariant (vr->min))
1409 return vr->min;
1410
1411 return NULL_TREE;
1412 }
1413
1414
1415 /* Extract value range information from an ASSERT_EXPR EXPR and store
1416 it in *VR_P. */
1417
1418 static void
1419 extract_range_from_assert (value_range_t *vr_p, tree expr)
1420 {
1421 tree var, cond, limit, min, max, type;
1422 value_range_t *var_vr, *limit_vr;
1423 enum tree_code cond_code;
1424
1425 var = ASSERT_EXPR_VAR (expr);
1426 cond = ASSERT_EXPR_COND (expr);
1427
1428 gcc_assert (COMPARISON_CLASS_P (cond));
1429
1430 /* Find VAR in the ASSERT_EXPR conditional. */
1431 if (var == TREE_OPERAND (cond, 0)
1432 || TREE_CODE (TREE_OPERAND (cond, 0)) == PLUS_EXPR
1433 || TREE_CODE (TREE_OPERAND (cond, 0)) == NOP_EXPR)
1434 {
1435 /* If the predicate is of the form VAR COMP LIMIT, then we just
1436 take LIMIT from the RHS and use the same comparison code. */
1437 cond_code = TREE_CODE (cond);
1438 limit = TREE_OPERAND (cond, 1);
1439 cond = TREE_OPERAND (cond, 0);
1440 }
1441 else
1442 {
1443 /* If the predicate is of the form LIMIT COMP VAR, then we need
1444 to flip around the comparison code to create the proper range
1445 for VAR. */
1446 cond_code = swap_tree_comparison (TREE_CODE (cond));
1447 limit = TREE_OPERAND (cond, 0);
1448 cond = TREE_OPERAND (cond, 1);
1449 }
1450
1451 limit = avoid_overflow_infinity (limit);
1452
1453 type = TREE_TYPE (limit);
1454 gcc_assert (limit != var);
1455
1456 /* For pointer arithmetic, we only keep track of pointer equality
1457 and inequality. */
1458 if (POINTER_TYPE_P (type) && cond_code != NE_EXPR && cond_code != EQ_EXPR)
1459 {
1460 set_value_range_to_varying (vr_p);
1461 return;
1462 }
1463
1464 /* If LIMIT is another SSA name and LIMIT has a range of its own,
1465 try to use LIMIT's range to avoid creating symbolic ranges
1466 unnecessarily. */
1467 limit_vr = (TREE_CODE (limit) == SSA_NAME) ? get_value_range (limit) : NULL;
1468
1469 /* LIMIT's range is only interesting if it has any useful information. */
1470 if (limit_vr
1471 && (limit_vr->type == VR_UNDEFINED
1472 || limit_vr->type == VR_VARYING
1473 || symbolic_range_p (limit_vr)))
1474 limit_vr = NULL;
1475
1476 /* Initially, the new range has the same set of equivalences of
1477 VAR's range. This will be revised before returning the final
1478 value. Since assertions may be chained via mutually exclusive
1479 predicates, we will need to trim the set of equivalences before
1480 we are done. */
1481 gcc_assert (vr_p->equiv == NULL);
1482 add_equivalence (&vr_p->equiv, var);
1483
1484 /* Extract a new range based on the asserted comparison for VAR and
1485 LIMIT's value range. Notice that if LIMIT has an anti-range, we
1486 will only use it for equality comparisons (EQ_EXPR). For any
1487 other kind of assertion, we cannot derive a range from LIMIT's
1488 anti-range that can be used to describe the new range. For
1489 instance, ASSERT_EXPR <x_2, x_2 <= b_4>. If b_4 is ~[2, 10],
1490 then b_4 takes on the ranges [-INF, 1] and [11, +INF]. There is
1491 no single range for x_2 that could describe LE_EXPR, so we might
1492 as well build the range [b_4, +INF] for it.
1493 One special case we handle is extracting a range from a
1494 range test encoded as (unsigned)var + CST <= limit. */
1495 if (TREE_CODE (cond) == NOP_EXPR
1496 || TREE_CODE (cond) == PLUS_EXPR)
1497 {
1498 if (TREE_CODE (cond) == PLUS_EXPR)
1499 {
1500 min = fold_build1 (NEGATE_EXPR, TREE_TYPE (TREE_OPERAND (cond, 1)),
1501 TREE_OPERAND (cond, 1));
1502 max = int_const_binop (PLUS_EXPR, limit, min, 0);
1503 cond = TREE_OPERAND (cond, 0);
1504 }
1505 else
1506 {
1507 min = build_int_cst (TREE_TYPE (var), 0);
1508 max = limit;
1509 }
1510
1511 /* Make sure to not set TREE_OVERFLOW on the final type
1512 conversion. We are willingly interpreting large positive
1513 unsigned values as negative singed values here. */
1514 min = force_fit_type_double (TREE_TYPE (var), tree_to_double_int (min),
1515 0, false);
1516 max = force_fit_type_double (TREE_TYPE (var), tree_to_double_int (max),
1517 0, false);
1518
1519 /* We can transform a max, min range to an anti-range or
1520 vice-versa. Use set_and_canonicalize_value_range which does
1521 this for us. */
1522 if (cond_code == LE_EXPR)
1523 set_and_canonicalize_value_range (vr_p, VR_RANGE,
1524 min, max, vr_p->equiv);
1525 else if (cond_code == GT_EXPR)
1526 set_and_canonicalize_value_range (vr_p, VR_ANTI_RANGE,
1527 min, max, vr_p->equiv);
1528 else
1529 gcc_unreachable ();
1530 }
1531 else if (cond_code == EQ_EXPR)
1532 {
1533 enum value_range_type range_type;
1534
1535 if (limit_vr)
1536 {
1537 range_type = limit_vr->type;
1538 min = limit_vr->min;
1539 max = limit_vr->max;
1540 }
1541 else
1542 {
1543 range_type = VR_RANGE;
1544 min = limit;
1545 max = limit;
1546 }
1547
1548 set_value_range (vr_p, range_type, min, max, vr_p->equiv);
1549
1550 /* When asserting the equality VAR == LIMIT and LIMIT is another
1551 SSA name, the new range will also inherit the equivalence set
1552 from LIMIT. */
1553 if (TREE_CODE (limit) == SSA_NAME)
1554 add_equivalence (&vr_p->equiv, limit);
1555 }
1556 else if (cond_code == NE_EXPR)
1557 {
1558 /* As described above, when LIMIT's range is an anti-range and
1559 this assertion is an inequality (NE_EXPR), then we cannot
1560 derive anything from the anti-range. For instance, if
1561 LIMIT's range was ~[0, 0], the assertion 'VAR != LIMIT' does
1562 not imply that VAR's range is [0, 0]. So, in the case of
1563 anti-ranges, we just assert the inequality using LIMIT and
1564 not its anti-range.
1565
1566 If LIMIT_VR is a range, we can only use it to build a new
1567 anti-range if LIMIT_VR is a single-valued range. For
1568 instance, if LIMIT_VR is [0, 1], the predicate
1569 VAR != [0, 1] does not mean that VAR's range is ~[0, 1].
1570 Rather, it means that for value 0 VAR should be ~[0, 0]
1571 and for value 1, VAR should be ~[1, 1]. We cannot
1572 represent these ranges.
1573
1574 The only situation in which we can build a valid
1575 anti-range is when LIMIT_VR is a single-valued range
1576 (i.e., LIMIT_VR->MIN == LIMIT_VR->MAX). In that case,
1577 build the anti-range ~[LIMIT_VR->MIN, LIMIT_VR->MAX]. */
1578 if (limit_vr
1579 && limit_vr->type == VR_RANGE
1580 && compare_values (limit_vr->min, limit_vr->max) == 0)
1581 {
1582 min = limit_vr->min;
1583 max = limit_vr->max;
1584 }
1585 else
1586 {
1587 /* In any other case, we cannot use LIMIT's range to build a
1588 valid anti-range. */
1589 min = max = limit;
1590 }
1591
1592 /* If MIN and MAX cover the whole range for their type, then
1593 just use the original LIMIT. */
1594 if (INTEGRAL_TYPE_P (type)
1595 && vrp_val_is_min (min)
1596 && vrp_val_is_max (max))
1597 min = max = limit;
1598
1599 set_value_range (vr_p, VR_ANTI_RANGE, min, max, vr_p->equiv);
1600 }
1601 else if (cond_code == LE_EXPR || cond_code == LT_EXPR)
1602 {
1603 min = TYPE_MIN_VALUE (type);
1604
1605 if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE)
1606 max = limit;
1607 else
1608 {
1609 /* If LIMIT_VR is of the form [N1, N2], we need to build the
1610 range [MIN, N2] for LE_EXPR and [MIN, N2 - 1] for
1611 LT_EXPR. */
1612 max = limit_vr->max;
1613 }
1614
1615 /* If the maximum value forces us to be out of bounds, simply punt.
1616 It would be pointless to try and do anything more since this
1617 all should be optimized away above us. */
1618 if ((cond_code == LT_EXPR
1619 && compare_values (max, min) == 0)
1620 || (CONSTANT_CLASS_P (max) && TREE_OVERFLOW (max)))
1621 set_value_range_to_varying (vr_p);
1622 else
1623 {
1624 /* For LT_EXPR, we create the range [MIN, MAX - 1]. */
1625 if (cond_code == LT_EXPR)
1626 {
1627 tree one = build_int_cst (type, 1);
1628 max = fold_build2 (MINUS_EXPR, type, max, one);
1629 if (EXPR_P (max))
1630 TREE_NO_WARNING (max) = 1;
1631 }
1632
1633 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1634 }
1635 }
1636 else if (cond_code == GE_EXPR || cond_code == GT_EXPR)
1637 {
1638 max = TYPE_MAX_VALUE (type);
1639
1640 if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE)
1641 min = limit;
1642 else
1643 {
1644 /* If LIMIT_VR is of the form [N1, N2], we need to build the
1645 range [N1, MAX] for GE_EXPR and [N1 + 1, MAX] for
1646 GT_EXPR. */
1647 min = limit_vr->min;
1648 }
1649
1650 /* If the minimum value forces us to be out of bounds, simply punt.
1651 It would be pointless to try and do anything more since this
1652 all should be optimized away above us. */
1653 if ((cond_code == GT_EXPR
1654 && compare_values (min, max) == 0)
1655 || (CONSTANT_CLASS_P (min) && TREE_OVERFLOW (min)))
1656 set_value_range_to_varying (vr_p);
1657 else
1658 {
1659 /* For GT_EXPR, we create the range [MIN + 1, MAX]. */
1660 if (cond_code == GT_EXPR)
1661 {
1662 tree one = build_int_cst (type, 1);
1663 min = fold_build2 (PLUS_EXPR, type, min, one);
1664 if (EXPR_P (min))
1665 TREE_NO_WARNING (min) = 1;
1666 }
1667
1668 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1669 }
1670 }
1671 else
1672 gcc_unreachable ();
1673
1674 /* If VAR already had a known range, it may happen that the new
1675 range we have computed and VAR's range are not compatible. For
1676 instance,
1677
1678 if (p_5 == NULL)
1679 p_6 = ASSERT_EXPR <p_5, p_5 == NULL>;
1680 x_7 = p_6->fld;
1681 p_8 = ASSERT_EXPR <p_6, p_6 != NULL>;
1682
1683 While the above comes from a faulty program, it will cause an ICE
1684 later because p_8 and p_6 will have incompatible ranges and at
1685 the same time will be considered equivalent. A similar situation
1686 would arise from
1687
1688 if (i_5 > 10)
1689 i_6 = ASSERT_EXPR <i_5, i_5 > 10>;
1690 if (i_5 < 5)
1691 i_7 = ASSERT_EXPR <i_6, i_6 < 5>;
1692
1693 Again i_6 and i_7 will have incompatible ranges. It would be
1694 pointless to try and do anything with i_7's range because
1695 anything dominated by 'if (i_5 < 5)' will be optimized away.
1696 Note, due to the wa in which simulation proceeds, the statement
1697 i_7 = ASSERT_EXPR <...> we would never be visited because the
1698 conditional 'if (i_5 < 5)' always evaluates to false. However,
1699 this extra check does not hurt and may protect against future
1700 changes to VRP that may get into a situation similar to the
1701 NULL pointer dereference example.
1702
1703 Note that these compatibility tests are only needed when dealing
1704 with ranges or a mix of range and anti-range. If VAR_VR and VR_P
1705 are both anti-ranges, they will always be compatible, because two
1706 anti-ranges will always have a non-empty intersection. */
1707
1708 var_vr = get_value_range (var);
1709
1710 /* We may need to make adjustments when VR_P and VAR_VR are numeric
1711 ranges or anti-ranges. */
1712 if (vr_p->type == VR_VARYING
1713 || vr_p->type == VR_UNDEFINED
1714 || var_vr->type == VR_VARYING
1715 || var_vr->type == VR_UNDEFINED
1716 || symbolic_range_p (vr_p)
1717 || symbolic_range_p (var_vr))
1718 return;
1719
1720 if (var_vr->type == VR_RANGE && vr_p->type == VR_RANGE)
1721 {
1722 /* If the two ranges have a non-empty intersection, we can
1723 refine the resulting range. Since the assert expression
1724 creates an equivalency and at the same time it asserts a
1725 predicate, we can take the intersection of the two ranges to
1726 get better precision. */
1727 if (value_ranges_intersect_p (var_vr, vr_p))
1728 {
1729 /* Use the larger of the two minimums. */
1730 if (compare_values (vr_p->min, var_vr->min) == -1)
1731 min = var_vr->min;
1732 else
1733 min = vr_p->min;
1734
1735 /* Use the smaller of the two maximums. */
1736 if (compare_values (vr_p->max, var_vr->max) == 1)
1737 max = var_vr->max;
1738 else
1739 max = vr_p->max;
1740
1741 set_value_range (vr_p, vr_p->type, min, max, vr_p->equiv);
1742 }
1743 else
1744 {
1745 /* The two ranges do not intersect, set the new range to
1746 VARYING, because we will not be able to do anything
1747 meaningful with it. */
1748 set_value_range_to_varying (vr_p);
1749 }
1750 }
1751 else if ((var_vr->type == VR_RANGE && vr_p->type == VR_ANTI_RANGE)
1752 || (var_vr->type == VR_ANTI_RANGE && vr_p->type == VR_RANGE))
1753 {
1754 /* A range and an anti-range will cancel each other only if
1755 their ends are the same. For instance, in the example above,
1756 p_8's range ~[0, 0] and p_6's range [0, 0] are incompatible,
1757 so VR_P should be set to VR_VARYING. */
1758 if (compare_values (var_vr->min, vr_p->min) == 0
1759 && compare_values (var_vr->max, vr_p->max) == 0)
1760 set_value_range_to_varying (vr_p);
1761 else
1762 {
1763 tree min, max, anti_min, anti_max, real_min, real_max;
1764 int cmp;
1765
1766 /* We want to compute the logical AND of the two ranges;
1767 there are three cases to consider.
1768
1769
1770 1. The VR_ANTI_RANGE range is completely within the
1771 VR_RANGE and the endpoints of the ranges are
1772 different. In that case the resulting range
1773 should be whichever range is more precise.
1774 Typically that will be the VR_RANGE.
1775
1776 2. The VR_ANTI_RANGE is completely disjoint from
1777 the VR_RANGE. In this case the resulting range
1778 should be the VR_RANGE.
1779
1780 3. There is some overlap between the VR_ANTI_RANGE
1781 and the VR_RANGE.
1782
1783 3a. If the high limit of the VR_ANTI_RANGE resides
1784 within the VR_RANGE, then the result is a new
1785 VR_RANGE starting at the high limit of the
1786 VR_ANTI_RANGE + 1 and extending to the
1787 high limit of the original VR_RANGE.
1788
1789 3b. If the low limit of the VR_ANTI_RANGE resides
1790 within the VR_RANGE, then the result is a new
1791 VR_RANGE starting at the low limit of the original
1792 VR_RANGE and extending to the low limit of the
1793 VR_ANTI_RANGE - 1. */
1794 if (vr_p->type == VR_ANTI_RANGE)
1795 {
1796 anti_min = vr_p->min;
1797 anti_max = vr_p->max;
1798 real_min = var_vr->min;
1799 real_max = var_vr->max;
1800 }
1801 else
1802 {
1803 anti_min = var_vr->min;
1804 anti_max = var_vr->max;
1805 real_min = vr_p->min;
1806 real_max = vr_p->max;
1807 }
1808
1809
1810 /* Case 1, VR_ANTI_RANGE completely within VR_RANGE,
1811 not including any endpoints. */
1812 if (compare_values (anti_max, real_max) == -1
1813 && compare_values (anti_min, real_min) == 1)
1814 {
1815 /* If the range is covering the whole valid range of
1816 the type keep the anti-range. */
1817 if (!vrp_val_is_min (real_min)
1818 || !vrp_val_is_max (real_max))
1819 set_value_range (vr_p, VR_RANGE, real_min,
1820 real_max, vr_p->equiv);
1821 }
1822 /* Case 2, VR_ANTI_RANGE completely disjoint from
1823 VR_RANGE. */
1824 else if (compare_values (anti_min, real_max) == 1
1825 || compare_values (anti_max, real_min) == -1)
1826 {
1827 set_value_range (vr_p, VR_RANGE, real_min,
1828 real_max, vr_p->equiv);
1829 }
1830 /* Case 3a, the anti-range extends into the low
1831 part of the real range. Thus creating a new
1832 low for the real range. */
1833 else if (((cmp = compare_values (anti_max, real_min)) == 1
1834 || cmp == 0)
1835 && compare_values (anti_max, real_max) == -1)
1836 {
1837 gcc_assert (!is_positive_overflow_infinity (anti_max));
1838 if (needs_overflow_infinity (TREE_TYPE (anti_max))
1839 && vrp_val_is_max (anti_max))
1840 {
1841 if (!supports_overflow_infinity (TREE_TYPE (var_vr->min)))
1842 {
1843 set_value_range_to_varying (vr_p);
1844 return;
1845 }
1846 min = positive_overflow_infinity (TREE_TYPE (var_vr->min));
1847 }
1848 else if (!POINTER_TYPE_P (TREE_TYPE (var_vr->min)))
1849 min = fold_build2 (PLUS_EXPR, TREE_TYPE (var_vr->min),
1850 anti_max,
1851 build_int_cst (TREE_TYPE (var_vr->min), 1));
1852 else
1853 min = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (var_vr->min),
1854 anti_max, size_int (1));
1855 max = real_max;
1856 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1857 }
1858 /* Case 3b, the anti-range extends into the high
1859 part of the real range. Thus creating a new
1860 higher for the real range. */
1861 else if (compare_values (anti_min, real_min) == 1
1862 && ((cmp = compare_values (anti_min, real_max)) == -1
1863 || cmp == 0))
1864 {
1865 gcc_assert (!is_negative_overflow_infinity (anti_min));
1866 if (needs_overflow_infinity (TREE_TYPE (anti_min))
1867 && vrp_val_is_min (anti_min))
1868 {
1869 if (!supports_overflow_infinity (TREE_TYPE (var_vr->min)))
1870 {
1871 set_value_range_to_varying (vr_p);
1872 return;
1873 }
1874 max = negative_overflow_infinity (TREE_TYPE (var_vr->min));
1875 }
1876 else if (!POINTER_TYPE_P (TREE_TYPE (var_vr->min)))
1877 max = fold_build2 (MINUS_EXPR, TREE_TYPE (var_vr->min),
1878 anti_min,
1879 build_int_cst (TREE_TYPE (var_vr->min), 1));
1880 else
1881 max = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (var_vr->min),
1882 anti_min,
1883 size_int (-1));
1884 min = real_min;
1885 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1886 }
1887 }
1888 }
1889 }
1890
1891
1892 /* Extract range information from SSA name VAR and store it in VR. If
1893 VAR has an interesting range, use it. Otherwise, create the
1894 range [VAR, VAR] and return it. This is useful in situations where
1895 we may have conditionals testing values of VARYING names. For
1896 instance,
1897
1898 x_3 = y_5;
1899 if (x_3 > y_5)
1900 ...
1901
1902 Even if y_5 is deemed VARYING, we can determine that x_3 > y_5 is
1903 always false. */
1904
1905 static void
1906 extract_range_from_ssa_name (value_range_t *vr, tree var)
1907 {
1908 value_range_t *var_vr = get_value_range (var);
1909
1910 if (var_vr->type != VR_UNDEFINED && var_vr->type != VR_VARYING)
1911 copy_value_range (vr, var_vr);
1912 else
1913 set_value_range (vr, VR_RANGE, var, var, NULL);
1914
1915 add_equivalence (&vr->equiv, var);
1916 }
1917
1918
1919 /* Wrapper around int_const_binop. If the operation overflows and we
1920 are not using wrapping arithmetic, then adjust the result to be
1921 -INF or +INF depending on CODE, VAL1 and VAL2. This can return
1922 NULL_TREE if we need to use an overflow infinity representation but
1923 the type does not support it. */
1924
1925 static tree
1926 vrp_int_const_binop (enum tree_code code, tree val1, tree val2)
1927 {
1928 tree res;
1929
1930 res = int_const_binop (code, val1, val2, 0);
1931
1932 /* If we are using unsigned arithmetic, operate symbolically
1933 on -INF and +INF as int_const_binop only handles signed overflow. */
1934 if (TYPE_UNSIGNED (TREE_TYPE (val1)))
1935 {
1936 int checkz = compare_values (res, val1);
1937 bool overflow = false;
1938
1939 /* Ensure that res = val1 [+*] val2 >= val1
1940 or that res = val1 - val2 <= val1. */
1941 if ((code == PLUS_EXPR
1942 && !(checkz == 1 || checkz == 0))
1943 || (code == MINUS_EXPR
1944 && !(checkz == 0 || checkz == -1)))
1945 {
1946 overflow = true;
1947 }
1948 /* Checking for multiplication overflow is done by dividing the
1949 output of the multiplication by the first input of the
1950 multiplication. If the result of that division operation is
1951 not equal to the second input of the multiplication, then the
1952 multiplication overflowed. */
1953 else if (code == MULT_EXPR && !integer_zerop (val1))
1954 {
1955 tree tmp = int_const_binop (TRUNC_DIV_EXPR,
1956 res,
1957 val1, 0);
1958 int check = compare_values (tmp, val2);
1959
1960 if (check != 0)
1961 overflow = true;
1962 }
1963
1964 if (overflow)
1965 {
1966 res = copy_node (res);
1967 TREE_OVERFLOW (res) = 1;
1968 }
1969
1970 }
1971 else if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (val1)))
1972 /* If the singed operation wraps then int_const_binop has done
1973 everything we want. */
1974 ;
1975 else if ((TREE_OVERFLOW (res)
1976 && !TREE_OVERFLOW (val1)
1977 && !TREE_OVERFLOW (val2))
1978 || is_overflow_infinity (val1)
1979 || is_overflow_infinity (val2))
1980 {
1981 /* If the operation overflowed but neither VAL1 nor VAL2 are
1982 overflown, return -INF or +INF depending on the operation
1983 and the combination of signs of the operands. */
1984 int sgn1 = tree_int_cst_sgn (val1);
1985 int sgn2 = tree_int_cst_sgn (val2);
1986
1987 if (needs_overflow_infinity (TREE_TYPE (res))
1988 && !supports_overflow_infinity (TREE_TYPE (res)))
1989 return NULL_TREE;
1990
1991 /* We have to punt on adding infinities of different signs,
1992 since we can't tell what the sign of the result should be.
1993 Likewise for subtracting infinities of the same sign. */
1994 if (((code == PLUS_EXPR && sgn1 != sgn2)
1995 || (code == MINUS_EXPR && sgn1 == sgn2))
1996 && is_overflow_infinity (val1)
1997 && is_overflow_infinity (val2))
1998 return NULL_TREE;
1999
2000 /* Don't try to handle division or shifting of infinities. */
2001 if ((code == TRUNC_DIV_EXPR
2002 || code == FLOOR_DIV_EXPR
2003 || code == CEIL_DIV_EXPR
2004 || code == EXACT_DIV_EXPR
2005 || code == ROUND_DIV_EXPR
2006 || code == RSHIFT_EXPR)
2007 && (is_overflow_infinity (val1)
2008 || is_overflow_infinity (val2)))
2009 return NULL_TREE;
2010
2011 /* Notice that we only need to handle the restricted set of
2012 operations handled by extract_range_from_binary_expr.
2013 Among them, only multiplication, addition and subtraction
2014 can yield overflow without overflown operands because we
2015 are working with integral types only... except in the
2016 case VAL1 = -INF and VAL2 = -1 which overflows to +INF
2017 for division too. */
2018
2019 /* For multiplication, the sign of the overflow is given
2020 by the comparison of the signs of the operands. */
2021 if ((code == MULT_EXPR && sgn1 == sgn2)
2022 /* For addition, the operands must be of the same sign
2023 to yield an overflow. Its sign is therefore that
2024 of one of the operands, for example the first. For
2025 infinite operands X + -INF is negative, not positive. */
2026 || (code == PLUS_EXPR
2027 && (sgn1 >= 0
2028 ? !is_negative_overflow_infinity (val2)
2029 : is_positive_overflow_infinity (val2)))
2030 /* For subtraction, non-infinite operands must be of
2031 different signs to yield an overflow. Its sign is
2032 therefore that of the first operand or the opposite of
2033 that of the second operand. A first operand of 0 counts
2034 as positive here, for the corner case 0 - (-INF), which
2035 overflows, but must yield +INF. For infinite operands 0
2036 - INF is negative, not positive. */
2037 || (code == MINUS_EXPR
2038 && (sgn1 >= 0
2039 ? !is_positive_overflow_infinity (val2)
2040 : is_negative_overflow_infinity (val2)))
2041 /* We only get in here with positive shift count, so the
2042 overflow direction is the same as the sign of val1.
2043 Actually rshift does not overflow at all, but we only
2044 handle the case of shifting overflowed -INF and +INF. */
2045 || (code == RSHIFT_EXPR
2046 && sgn1 >= 0)
2047 /* For division, the only case is -INF / -1 = +INF. */
2048 || code == TRUNC_DIV_EXPR
2049 || code == FLOOR_DIV_EXPR
2050 || code == CEIL_DIV_EXPR
2051 || code == EXACT_DIV_EXPR
2052 || code == ROUND_DIV_EXPR)
2053 return (needs_overflow_infinity (TREE_TYPE (res))
2054 ? positive_overflow_infinity (TREE_TYPE (res))
2055 : TYPE_MAX_VALUE (TREE_TYPE (res)));
2056 else
2057 return (needs_overflow_infinity (TREE_TYPE (res))
2058 ? negative_overflow_infinity (TREE_TYPE (res))
2059 : TYPE_MIN_VALUE (TREE_TYPE (res)));
2060 }
2061
2062 return res;
2063 }
2064
2065
2066 /* Extract range information from a binary expression EXPR based on
2067 the ranges of each of its operands and the expression code. */
2068
2069 static void
2070 extract_range_from_binary_expr (value_range_t *vr,
2071 enum tree_code code,
2072 tree expr_type, tree op0, tree op1)
2073 {
2074 enum value_range_type type;
2075 tree min, max;
2076 int cmp;
2077 value_range_t vr0 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
2078 value_range_t vr1 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
2079
2080 /* Not all binary expressions can be applied to ranges in a
2081 meaningful way. Handle only arithmetic operations. */
2082 if (code != PLUS_EXPR
2083 && code != MINUS_EXPR
2084 && code != POINTER_PLUS_EXPR
2085 && code != MULT_EXPR
2086 && code != TRUNC_DIV_EXPR
2087 && code != FLOOR_DIV_EXPR
2088 && code != CEIL_DIV_EXPR
2089 && code != EXACT_DIV_EXPR
2090 && code != ROUND_DIV_EXPR
2091 && code != TRUNC_MOD_EXPR
2092 && code != RSHIFT_EXPR
2093 && code != MIN_EXPR
2094 && code != MAX_EXPR
2095 && code != BIT_AND_EXPR
2096 && code != BIT_IOR_EXPR
2097 && code != TRUTH_AND_EXPR
2098 && code != TRUTH_OR_EXPR)
2099 {
2100 /* We can still do constant propagation here. */
2101 tree const_op0 = op_with_constant_singleton_value_range (op0);
2102 tree const_op1 = op_with_constant_singleton_value_range (op1);
2103 if (const_op0 || const_op1)
2104 {
2105 tree tem = fold_binary (code, expr_type,
2106 const_op0 ? const_op0 : op0,
2107 const_op1 ? const_op1 : op1);
2108 if (tem
2109 && is_gimple_min_invariant (tem)
2110 && !is_overflow_infinity (tem))
2111 {
2112 set_value_range (vr, VR_RANGE, tem, tem, NULL);
2113 return;
2114 }
2115 }
2116 set_value_range_to_varying (vr);
2117 return;
2118 }
2119
2120 /* Get value ranges for each operand. For constant operands, create
2121 a new value range with the operand to simplify processing. */
2122 if (TREE_CODE (op0) == SSA_NAME)
2123 vr0 = *(get_value_range (op0));
2124 else if (is_gimple_min_invariant (op0))
2125 set_value_range_to_value (&vr0, op0, NULL);
2126 else
2127 set_value_range_to_varying (&vr0);
2128
2129 if (TREE_CODE (op1) == SSA_NAME)
2130 vr1 = *(get_value_range (op1));
2131 else if (is_gimple_min_invariant (op1))
2132 set_value_range_to_value (&vr1, op1, NULL);
2133 else
2134 set_value_range_to_varying (&vr1);
2135
2136 /* If either range is UNDEFINED, so is the result. */
2137 if (vr0.type == VR_UNDEFINED || vr1.type == VR_UNDEFINED)
2138 {
2139 set_value_range_to_undefined (vr);
2140 return;
2141 }
2142
2143 /* The type of the resulting value range defaults to VR0.TYPE. */
2144 type = vr0.type;
2145
2146 /* Refuse to operate on VARYING ranges, ranges of different kinds
2147 and symbolic ranges. As an exception, we allow BIT_AND_EXPR
2148 because we may be able to derive a useful range even if one of
2149 the operands is VR_VARYING or symbolic range. Similarly for
2150 divisions. TODO, we may be able to derive anti-ranges in
2151 some cases. */
2152 if (code != BIT_AND_EXPR
2153 && code != TRUTH_AND_EXPR
2154 && code != TRUTH_OR_EXPR
2155 && code != TRUNC_DIV_EXPR
2156 && code != FLOOR_DIV_EXPR
2157 && code != CEIL_DIV_EXPR
2158 && code != EXACT_DIV_EXPR
2159 && code != ROUND_DIV_EXPR
2160 && code != TRUNC_MOD_EXPR
2161 && (vr0.type == VR_VARYING
2162 || vr1.type == VR_VARYING
2163 || vr0.type != vr1.type
2164 || symbolic_range_p (&vr0)
2165 || symbolic_range_p (&vr1)))
2166 {
2167 set_value_range_to_varying (vr);
2168 return;
2169 }
2170
2171 /* Now evaluate the expression to determine the new range. */
2172 if (POINTER_TYPE_P (expr_type)
2173 || POINTER_TYPE_P (TREE_TYPE (op0))
2174 || POINTER_TYPE_P (TREE_TYPE (op1)))
2175 {
2176 if (code == MIN_EXPR || code == MAX_EXPR)
2177 {
2178 /* For MIN/MAX expressions with pointers, we only care about
2179 nullness, if both are non null, then the result is nonnull.
2180 If both are null, then the result is null. Otherwise they
2181 are varying. */
2182 if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1))
2183 set_value_range_to_nonnull (vr, expr_type);
2184 else if (range_is_null (&vr0) && range_is_null (&vr1))
2185 set_value_range_to_null (vr, expr_type);
2186 else
2187 set_value_range_to_varying (vr);
2188
2189 return;
2190 }
2191 if (code == POINTER_PLUS_EXPR)
2192 {
2193 /* For pointer types, we are really only interested in asserting
2194 whether the expression evaluates to non-NULL. */
2195 if (range_is_nonnull (&vr0) || range_is_nonnull (&vr1))
2196 set_value_range_to_nonnull (vr, expr_type);
2197 else if (range_is_null (&vr0) && range_is_null (&vr1))
2198 set_value_range_to_null (vr, expr_type);
2199 else
2200 set_value_range_to_varying (vr);
2201 }
2202 else if (code == BIT_AND_EXPR)
2203 {
2204 /* For pointer types, we are really only interested in asserting
2205 whether the expression evaluates to non-NULL. */
2206 if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1))
2207 set_value_range_to_nonnull (vr, expr_type);
2208 else if (range_is_null (&vr0) || range_is_null (&vr1))
2209 set_value_range_to_null (vr, expr_type);
2210 else
2211 set_value_range_to_varying (vr);
2212 }
2213 else
2214 gcc_unreachable ();
2215
2216 return;
2217 }
2218
2219 /* For integer ranges, apply the operation to each end of the
2220 range and see what we end up with. */
2221 if (code == TRUTH_AND_EXPR
2222 || code == TRUTH_OR_EXPR)
2223 {
2224 /* If one of the operands is zero, we know that the whole
2225 expression evaluates zero. */
2226 if (code == TRUTH_AND_EXPR
2227 && ((vr0.type == VR_RANGE
2228 && integer_zerop (vr0.min)
2229 && integer_zerop (vr0.max))
2230 || (vr1.type == VR_RANGE
2231 && integer_zerop (vr1.min)
2232 && integer_zerop (vr1.max))))
2233 {
2234 type = VR_RANGE;
2235 min = max = build_int_cst (expr_type, 0);
2236 }
2237 /* If one of the operands is one, we know that the whole
2238 expression evaluates one. */
2239 else if (code == TRUTH_OR_EXPR
2240 && ((vr0.type == VR_RANGE
2241 && integer_onep (vr0.min)
2242 && integer_onep (vr0.max))
2243 || (vr1.type == VR_RANGE
2244 && integer_onep (vr1.min)
2245 && integer_onep (vr1.max))))
2246 {
2247 type = VR_RANGE;
2248 min = max = build_int_cst (expr_type, 1);
2249 }
2250 else if (vr0.type != VR_VARYING
2251 && vr1.type != VR_VARYING
2252 && vr0.type == vr1.type
2253 && !symbolic_range_p (&vr0)
2254 && !overflow_infinity_range_p (&vr0)
2255 && !symbolic_range_p (&vr1)
2256 && !overflow_infinity_range_p (&vr1))
2257 {
2258 /* Boolean expressions cannot be folded with int_const_binop. */
2259 min = fold_binary (code, expr_type, vr0.min, vr1.min);
2260 max = fold_binary (code, expr_type, vr0.max, vr1.max);
2261 }
2262 else
2263 {
2264 /* The result of a TRUTH_*_EXPR is always true or false. */
2265 set_value_range_to_truthvalue (vr, expr_type);
2266 return;
2267 }
2268 }
2269 else if (code == PLUS_EXPR
2270 || code == MIN_EXPR
2271 || code == MAX_EXPR)
2272 {
2273 /* If we have a PLUS_EXPR with two VR_ANTI_RANGEs, drop to
2274 VR_VARYING. It would take more effort to compute a precise
2275 range for such a case. For example, if we have op0 == 1 and
2276 op1 == -1 with their ranges both being ~[0,0], we would have
2277 op0 + op1 == 0, so we cannot claim that the sum is in ~[0,0].
2278 Note that we are guaranteed to have vr0.type == vr1.type at
2279 this point. */
2280 if (code == PLUS_EXPR && vr0.type == VR_ANTI_RANGE)
2281 {
2282 set_value_range_to_varying (vr);
2283 return;
2284 }
2285
2286 /* For operations that make the resulting range directly
2287 proportional to the original ranges, apply the operation to
2288 the same end of each range. */
2289 min = vrp_int_const_binop (code, vr0.min, vr1.min);
2290 max = vrp_int_const_binop (code, vr0.max, vr1.max);
2291
2292 /* If both additions overflowed the range kind is still correct.
2293 This happens regularly with subtracting something in unsigned
2294 arithmetic.
2295 ??? See PR30318 for all the cases we do not handle. */
2296 if (code == PLUS_EXPR
2297 && (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
2298 && (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
2299 {
2300 min = build_int_cst_wide (TREE_TYPE (min),
2301 TREE_INT_CST_LOW (min),
2302 TREE_INT_CST_HIGH (min));
2303 max = build_int_cst_wide (TREE_TYPE (max),
2304 TREE_INT_CST_LOW (max),
2305 TREE_INT_CST_HIGH (max));
2306 }
2307 }
2308 else if (code == MULT_EXPR
2309 || code == TRUNC_DIV_EXPR
2310 || code == FLOOR_DIV_EXPR
2311 || code == CEIL_DIV_EXPR
2312 || code == EXACT_DIV_EXPR
2313 || code == ROUND_DIV_EXPR
2314 || code == RSHIFT_EXPR)
2315 {
2316 tree val[4];
2317 size_t i;
2318 bool sop;
2319
2320 /* If we have an unsigned MULT_EXPR with two VR_ANTI_RANGEs,
2321 drop to VR_VARYING. It would take more effort to compute a
2322 precise range for such a case. For example, if we have
2323 op0 == 65536 and op1 == 65536 with their ranges both being
2324 ~[0,0] on a 32-bit machine, we would have op0 * op1 == 0, so
2325 we cannot claim that the product is in ~[0,0]. Note that we
2326 are guaranteed to have vr0.type == vr1.type at this
2327 point. */
2328 if (code == MULT_EXPR
2329 && vr0.type == VR_ANTI_RANGE
2330 && !TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (op0)))
2331 {
2332 set_value_range_to_varying (vr);
2333 return;
2334 }
2335
2336 /* If we have a RSHIFT_EXPR with any shift values outside [0..prec-1],
2337 then drop to VR_VARYING. Outside of this range we get undefined
2338 behavior from the shift operation. We cannot even trust
2339 SHIFT_COUNT_TRUNCATED at this stage, because that applies to rtl
2340 shifts, and the operation at the tree level may be widened. */
2341 if (code == RSHIFT_EXPR)
2342 {
2343 if (vr1.type == VR_ANTI_RANGE
2344 || !vrp_expr_computes_nonnegative (op1, &sop)
2345 || (operand_less_p
2346 (build_int_cst (TREE_TYPE (vr1.max),
2347 TYPE_PRECISION (expr_type) - 1),
2348 vr1.max) != 0))
2349 {
2350 set_value_range_to_varying (vr);
2351 return;
2352 }
2353 }
2354
2355 else if ((code == TRUNC_DIV_EXPR
2356 || code == FLOOR_DIV_EXPR
2357 || code == CEIL_DIV_EXPR
2358 || code == EXACT_DIV_EXPR
2359 || code == ROUND_DIV_EXPR)
2360 && (vr0.type != VR_RANGE || symbolic_range_p (&vr0)))
2361 {
2362 /* For division, if op1 has VR_RANGE but op0 does not, something
2363 can be deduced just from that range. Say [min, max] / [4, max]
2364 gives [min / 4, max / 4] range. */
2365 if (vr1.type == VR_RANGE
2366 && !symbolic_range_p (&vr1)
2367 && !range_includes_zero_p (&vr1))
2368 {
2369 vr0.type = type = VR_RANGE;
2370 vr0.min = vrp_val_min (TREE_TYPE (op0));
2371 vr0.max = vrp_val_max (TREE_TYPE (op1));
2372 }
2373 else
2374 {
2375 set_value_range_to_varying (vr);
2376 return;
2377 }
2378 }
2379
2380 /* For divisions, if op0 is VR_RANGE, we can deduce a range
2381 even if op1 is VR_VARYING, VR_ANTI_RANGE, symbolic or can
2382 include 0. */
2383 if ((code == TRUNC_DIV_EXPR
2384 || code == FLOOR_DIV_EXPR
2385 || code == CEIL_DIV_EXPR
2386 || code == EXACT_DIV_EXPR
2387 || code == ROUND_DIV_EXPR)
2388 && vr0.type == VR_RANGE
2389 && (vr1.type != VR_RANGE
2390 || symbolic_range_p (&vr1)
2391 || range_includes_zero_p (&vr1)))
2392 {
2393 tree zero = build_int_cst (TREE_TYPE (vr0.min), 0);
2394 int cmp;
2395
2396 sop = false;
2397 min = NULL_TREE;
2398 max = NULL_TREE;
2399 if (vrp_expr_computes_nonnegative (op1, &sop) && !sop)
2400 {
2401 /* For unsigned division or when divisor is known
2402 to be non-negative, the range has to cover
2403 all numbers from 0 to max for positive max
2404 and all numbers from min to 0 for negative min. */
2405 cmp = compare_values (vr0.max, zero);
2406 if (cmp == -1)
2407 max = zero;
2408 else if (cmp == 0 || cmp == 1)
2409 max = vr0.max;
2410 else
2411 type = VR_VARYING;
2412 cmp = compare_values (vr0.min, zero);
2413 if (cmp == 1)
2414 min = zero;
2415 else if (cmp == 0 || cmp == -1)
2416 min = vr0.min;
2417 else
2418 type = VR_VARYING;
2419 }
2420 else
2421 {
2422 /* Otherwise the range is -max .. max or min .. -min
2423 depending on which bound is bigger in absolute value,
2424 as the division can change the sign. */
2425 abs_extent_range (vr, vr0.min, vr0.max);
2426 return;
2427 }
2428 if (type == VR_VARYING)
2429 {
2430 set_value_range_to_varying (vr);
2431 return;
2432 }
2433 }
2434
2435 /* Multiplications and divisions are a bit tricky to handle,
2436 depending on the mix of signs we have in the two ranges, we
2437 need to operate on different values to get the minimum and
2438 maximum values for the new range. One approach is to figure
2439 out all the variations of range combinations and do the
2440 operations.
2441
2442 However, this involves several calls to compare_values and it
2443 is pretty convoluted. It's simpler to do the 4 operations
2444 (MIN0 OP MIN1, MIN0 OP MAX1, MAX0 OP MIN1 and MAX0 OP MAX0 OP
2445 MAX1) and then figure the smallest and largest values to form
2446 the new range. */
2447 else
2448 {
2449 gcc_assert ((vr0.type == VR_RANGE
2450 || (code == MULT_EXPR && vr0.type == VR_ANTI_RANGE))
2451 && vr0.type == vr1.type);
2452
2453 /* Compute the 4 cross operations. */
2454 sop = false;
2455 val[0] = vrp_int_const_binop (code, vr0.min, vr1.min);
2456 if (val[0] == NULL_TREE)
2457 sop = true;
2458
2459 if (vr1.max == vr1.min)
2460 val[1] = NULL_TREE;
2461 else
2462 {
2463 val[1] = vrp_int_const_binop (code, vr0.min, vr1.max);
2464 if (val[1] == NULL_TREE)
2465 sop = true;
2466 }
2467
2468 if (vr0.max == vr0.min)
2469 val[2] = NULL_TREE;
2470 else
2471 {
2472 val[2] = vrp_int_const_binop (code, vr0.max, vr1.min);
2473 if (val[2] == NULL_TREE)
2474 sop = true;
2475 }
2476
2477 if (vr0.min == vr0.max || vr1.min == vr1.max)
2478 val[3] = NULL_TREE;
2479 else
2480 {
2481 val[3] = vrp_int_const_binop (code, vr0.max, vr1.max);
2482 if (val[3] == NULL_TREE)
2483 sop = true;
2484 }
2485
2486 if (sop)
2487 {
2488 set_value_range_to_varying (vr);
2489 return;
2490 }
2491
2492 /* Set MIN to the minimum of VAL[i] and MAX to the maximum
2493 of VAL[i]. */
2494 min = val[0];
2495 max = val[0];
2496 for (i = 1; i < 4; i++)
2497 {
2498 if (!is_gimple_min_invariant (min)
2499 || (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
2500 || !is_gimple_min_invariant (max)
2501 || (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
2502 break;
2503
2504 if (val[i])
2505 {
2506 if (!is_gimple_min_invariant (val[i])
2507 || (TREE_OVERFLOW (val[i])
2508 && !is_overflow_infinity (val[i])))
2509 {
2510 /* If we found an overflowed value, set MIN and MAX
2511 to it so that we set the resulting range to
2512 VARYING. */
2513 min = max = val[i];
2514 break;
2515 }
2516
2517 if (compare_values (val[i], min) == -1)
2518 min = val[i];
2519
2520 if (compare_values (val[i], max) == 1)
2521 max = val[i];
2522 }
2523 }
2524 }
2525 }
2526 else if (code == TRUNC_MOD_EXPR)
2527 {
2528 bool sop = false;
2529 if (vr1.type != VR_RANGE
2530 || symbolic_range_p (&vr1)
2531 || range_includes_zero_p (&vr1)
2532 || vrp_val_is_min (vr1.min))
2533 {
2534 set_value_range_to_varying (vr);
2535 return;
2536 }
2537 type = VR_RANGE;
2538 /* Compute MAX <|vr1.min|, |vr1.max|> - 1. */
2539 max = fold_unary_to_constant (ABS_EXPR, TREE_TYPE (vr1.min), vr1.min);
2540 if (tree_int_cst_lt (max, vr1.max))
2541 max = vr1.max;
2542 max = int_const_binop (MINUS_EXPR, max, integer_one_node, 0);
2543 /* If the dividend is non-negative the modulus will be
2544 non-negative as well. */
2545 if (TYPE_UNSIGNED (TREE_TYPE (max))
2546 || (vrp_expr_computes_nonnegative (op0, &sop) && !sop))
2547 min = build_int_cst (TREE_TYPE (max), 0);
2548 else
2549 min = fold_unary_to_constant (NEGATE_EXPR, TREE_TYPE (max), max);
2550 }
2551 else if (code == MINUS_EXPR)
2552 {
2553 /* If we have a MINUS_EXPR with two VR_ANTI_RANGEs, drop to
2554 VR_VARYING. It would take more effort to compute a precise
2555 range for such a case. For example, if we have op0 == 1 and
2556 op1 == 1 with their ranges both being ~[0,0], we would have
2557 op0 - op1 == 0, so we cannot claim that the difference is in
2558 ~[0,0]. Note that we are guaranteed to have
2559 vr0.type == vr1.type at this point. */
2560 if (vr0.type == VR_ANTI_RANGE)
2561 {
2562 set_value_range_to_varying (vr);
2563 return;
2564 }
2565
2566 /* For MINUS_EXPR, apply the operation to the opposite ends of
2567 each range. */
2568 min = vrp_int_const_binop (code, vr0.min, vr1.max);
2569 max = vrp_int_const_binop (code, vr0.max, vr1.min);
2570 }
2571 else if (code == BIT_AND_EXPR)
2572 {
2573 bool vr0_int_cst_singleton_p, vr1_int_cst_singleton_p;
2574
2575 vr0_int_cst_singleton_p = range_int_cst_singleton_p (&vr0);
2576 vr1_int_cst_singleton_p = range_int_cst_singleton_p (&vr1);
2577
2578 if (vr0_int_cst_singleton_p && vr1_int_cst_singleton_p)
2579 min = max = int_const_binop (code, vr0.max, vr1.max, 0);
2580 else if (vr0_int_cst_singleton_p
2581 && tree_int_cst_sgn (vr0.max) >= 0)
2582 {
2583 min = build_int_cst (expr_type, 0);
2584 max = vr0.max;
2585 }
2586 else if (vr1_int_cst_singleton_p
2587 && tree_int_cst_sgn (vr1.max) >= 0)
2588 {
2589 type = VR_RANGE;
2590 min = build_int_cst (expr_type, 0);
2591 max = vr1.max;
2592 }
2593 else
2594 {
2595 set_value_range_to_varying (vr);
2596 return;
2597 }
2598 }
2599 else if (code == BIT_IOR_EXPR)
2600 {
2601 if (range_int_cst_p (&vr0)
2602 && range_int_cst_p (&vr1)
2603 && tree_int_cst_sgn (vr0.min) >= 0
2604 && tree_int_cst_sgn (vr1.min) >= 0)
2605 {
2606 double_int vr0_max = tree_to_double_int (vr0.max);
2607 double_int vr1_max = tree_to_double_int (vr1.max);
2608 double_int ior_max;
2609
2610 /* Set all bits to the right of the most significant one to 1.
2611 For example, [0, 4] | [4, 4] = [4, 7]. */
2612 ior_max.low = vr0_max.low | vr1_max.low;
2613 ior_max.high = vr0_max.high | vr1_max.high;
2614 if (ior_max.high != 0)
2615 {
2616 ior_max.low = ~(unsigned HOST_WIDE_INT)0u;
2617 ior_max.high |= ((HOST_WIDE_INT) 1
2618 << floor_log2 (ior_max.high)) - 1;
2619 }
2620 else if (ior_max.low != 0)
2621 ior_max.low |= ((unsigned HOST_WIDE_INT) 1u
2622 << floor_log2 (ior_max.low)) - 1;
2623
2624 /* Both of these endpoints are conservative. */
2625 min = vrp_int_const_binop (MAX_EXPR, vr0.min, vr1.min);
2626 max = double_int_to_tree (expr_type, ior_max);
2627 }
2628 else
2629 {
2630 set_value_range_to_varying (vr);
2631 return;
2632 }
2633 }
2634 else
2635 gcc_unreachable ();
2636
2637 /* If either MIN or MAX overflowed, then set the resulting range to
2638 VARYING. But we do accept an overflow infinity
2639 representation. */
2640 if (min == NULL_TREE
2641 || !is_gimple_min_invariant (min)
2642 || (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
2643 || max == NULL_TREE
2644 || !is_gimple_min_invariant (max)
2645 || (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
2646 {
2647 set_value_range_to_varying (vr);
2648 return;
2649 }
2650
2651 /* We punt if:
2652 1) [-INF, +INF]
2653 2) [-INF, +-INF(OVF)]
2654 3) [+-INF(OVF), +INF]
2655 4) [+-INF(OVF), +-INF(OVF)]
2656 We learn nothing when we have INF and INF(OVF) on both sides.
2657 Note that we do accept [-INF, -INF] and [+INF, +INF] without
2658 overflow. */
2659 if ((vrp_val_is_min (min) || is_overflow_infinity (min))
2660 && (vrp_val_is_max (max) || is_overflow_infinity (max)))
2661 {
2662 set_value_range_to_varying (vr);
2663 return;
2664 }
2665
2666 cmp = compare_values (min, max);
2667 if (cmp == -2 || cmp == 1)
2668 {
2669 /* If the new range has its limits swapped around (MIN > MAX),
2670 then the operation caused one of them to wrap around, mark
2671 the new range VARYING. */
2672 set_value_range_to_varying (vr);
2673 }
2674 else
2675 set_value_range (vr, type, min, max, NULL);
2676 }
2677
2678
2679 /* Extract range information from a unary expression EXPR based on
2680 the range of its operand and the expression code. */
2681
2682 static void
2683 extract_range_from_unary_expr (value_range_t *vr, enum tree_code code,
2684 tree type, tree op0)
2685 {
2686 tree min, max;
2687 int cmp;
2688 value_range_t vr0 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
2689
2690 /* Refuse to operate on certain unary expressions for which we
2691 cannot easily determine a resulting range. */
2692 if (code == FIX_TRUNC_EXPR
2693 || code == FLOAT_EXPR
2694 || code == BIT_NOT_EXPR
2695 || code == CONJ_EXPR)
2696 {
2697 /* We can still do constant propagation here. */
2698 if ((op0 = op_with_constant_singleton_value_range (op0)) != NULL_TREE)
2699 {
2700 tree tem = fold_unary (code, type, op0);
2701 if (tem
2702 && is_gimple_min_invariant (tem)
2703 && !is_overflow_infinity (tem))
2704 {
2705 set_value_range (vr, VR_RANGE, tem, tem, NULL);
2706 return;
2707 }
2708 }
2709 set_value_range_to_varying (vr);
2710 return;
2711 }
2712
2713 /* Get value ranges for the operand. For constant operands, create
2714 a new value range with the operand to simplify processing. */
2715 if (TREE_CODE (op0) == SSA_NAME)
2716 vr0 = *(get_value_range (op0));
2717 else if (is_gimple_min_invariant (op0))
2718 set_value_range_to_value (&vr0, op0, NULL);
2719 else
2720 set_value_range_to_varying (&vr0);
2721
2722 /* If VR0 is UNDEFINED, so is the result. */
2723 if (vr0.type == VR_UNDEFINED)
2724 {
2725 set_value_range_to_undefined (vr);
2726 return;
2727 }
2728
2729 /* Refuse to operate on symbolic ranges, or if neither operand is
2730 a pointer or integral type. */
2731 if ((!INTEGRAL_TYPE_P (TREE_TYPE (op0))
2732 && !POINTER_TYPE_P (TREE_TYPE (op0)))
2733 || (vr0.type != VR_VARYING
2734 && symbolic_range_p (&vr0)))
2735 {
2736 set_value_range_to_varying (vr);
2737 return;
2738 }
2739
2740 /* If the expression involves pointers, we are only interested in
2741 determining if it evaluates to NULL [0, 0] or non-NULL (~[0, 0]). */
2742 if (POINTER_TYPE_P (type) || POINTER_TYPE_P (TREE_TYPE (op0)))
2743 {
2744 bool sop;
2745
2746 sop = false;
2747 if (range_is_nonnull (&vr0)
2748 || (tree_unary_nonzero_warnv_p (code, type, op0, &sop)
2749 && !sop))
2750 set_value_range_to_nonnull (vr, type);
2751 else if (range_is_null (&vr0))
2752 set_value_range_to_null (vr, type);
2753 else
2754 set_value_range_to_varying (vr);
2755
2756 return;
2757 }
2758
2759 /* Handle unary expressions on integer ranges. */
2760 if (CONVERT_EXPR_CODE_P (code)
2761 && INTEGRAL_TYPE_P (type)
2762 && INTEGRAL_TYPE_P (TREE_TYPE (op0)))
2763 {
2764 tree inner_type = TREE_TYPE (op0);
2765 tree outer_type = type;
2766
2767 /* If VR0 is varying and we increase the type precision, assume
2768 a full range for the following transformation. */
2769 if (vr0.type == VR_VARYING
2770 && TYPE_PRECISION (inner_type) < TYPE_PRECISION (outer_type))
2771 {
2772 vr0.type = VR_RANGE;
2773 vr0.min = TYPE_MIN_VALUE (inner_type);
2774 vr0.max = TYPE_MAX_VALUE (inner_type);
2775 }
2776
2777 /* If VR0 is a constant range or anti-range and the conversion is
2778 not truncating we can convert the min and max values and
2779 canonicalize the resulting range. Otherwise we can do the
2780 conversion if the size of the range is less than what the
2781 precision of the target type can represent and the range is
2782 not an anti-range. */
2783 if ((vr0.type == VR_RANGE
2784 || vr0.type == VR_ANTI_RANGE)
2785 && TREE_CODE (vr0.min) == INTEGER_CST
2786 && TREE_CODE (vr0.max) == INTEGER_CST
2787 && (!is_overflow_infinity (vr0.min)
2788 || (vr0.type == VR_RANGE
2789 && TYPE_PRECISION (outer_type) > TYPE_PRECISION (inner_type)
2790 && needs_overflow_infinity (outer_type)
2791 && supports_overflow_infinity (outer_type)))
2792 && (!is_overflow_infinity (vr0.max)
2793 || (vr0.type == VR_RANGE
2794 && TYPE_PRECISION (outer_type) > TYPE_PRECISION (inner_type)
2795 && needs_overflow_infinity (outer_type)
2796 && supports_overflow_infinity (outer_type)))
2797 && (TYPE_PRECISION (outer_type) >= TYPE_PRECISION (inner_type)
2798 || (vr0.type == VR_RANGE
2799 && integer_zerop (int_const_binop (RSHIFT_EXPR,
2800 int_const_binop (MINUS_EXPR, vr0.max, vr0.min, 0),
2801 size_int (TYPE_PRECISION (outer_type)), 0)))))
2802 {
2803 tree new_min, new_max;
2804 new_min = force_fit_type_double (outer_type,
2805 tree_to_double_int (vr0.min),
2806 0, false);
2807 new_max = force_fit_type_double (outer_type,
2808 tree_to_double_int (vr0.max),
2809 0, false);
2810 if (is_overflow_infinity (vr0.min))
2811 new_min = negative_overflow_infinity (outer_type);
2812 if (is_overflow_infinity (vr0.max))
2813 new_max = positive_overflow_infinity (outer_type);
2814 set_and_canonicalize_value_range (vr, vr0.type,
2815 new_min, new_max, NULL);
2816 return;
2817 }
2818
2819 set_value_range_to_varying (vr);
2820 return;
2821 }
2822
2823 /* Conversion of a VR_VARYING value to a wider type can result
2824 in a usable range. So wait until after we've handled conversions
2825 before dropping the result to VR_VARYING if we had a source
2826 operand that is VR_VARYING. */
2827 if (vr0.type == VR_VARYING)
2828 {
2829 set_value_range_to_varying (vr);
2830 return;
2831 }
2832
2833 /* Apply the operation to each end of the range and see what we end
2834 up with. */
2835 if (code == NEGATE_EXPR
2836 && !TYPE_UNSIGNED (type))
2837 {
2838 /* NEGATE_EXPR flips the range around. We need to treat
2839 TYPE_MIN_VALUE specially. */
2840 if (is_positive_overflow_infinity (vr0.max))
2841 min = negative_overflow_infinity (type);
2842 else if (is_negative_overflow_infinity (vr0.max))
2843 min = positive_overflow_infinity (type);
2844 else if (!vrp_val_is_min (vr0.max))
2845 min = fold_unary_to_constant (code, type, vr0.max);
2846 else if (needs_overflow_infinity (type))
2847 {
2848 if (supports_overflow_infinity (type)
2849 && !is_overflow_infinity (vr0.min)
2850 && !vrp_val_is_min (vr0.min))
2851 min = positive_overflow_infinity (type);
2852 else
2853 {
2854 set_value_range_to_varying (vr);
2855 return;
2856 }
2857 }
2858 else
2859 min = TYPE_MIN_VALUE (type);
2860
2861 if (is_positive_overflow_infinity (vr0.min))
2862 max = negative_overflow_infinity (type);
2863 else if (is_negative_overflow_infinity (vr0.min))
2864 max = positive_overflow_infinity (type);
2865 else if (!vrp_val_is_min (vr0.min))
2866 max = fold_unary_to_constant (code, type, vr0.min);
2867 else if (needs_overflow_infinity (type))
2868 {
2869 if (supports_overflow_infinity (type))
2870 max = positive_overflow_infinity (type);
2871 else
2872 {
2873 set_value_range_to_varying (vr);
2874 return;
2875 }
2876 }
2877 else
2878 max = TYPE_MIN_VALUE (type);
2879 }
2880 else if (code == NEGATE_EXPR
2881 && TYPE_UNSIGNED (type))
2882 {
2883 if (!range_includes_zero_p (&vr0))
2884 {
2885 max = fold_unary_to_constant (code, type, vr0.min);
2886 min = fold_unary_to_constant (code, type, vr0.max);
2887 }
2888 else
2889 {
2890 if (range_is_null (&vr0))
2891 set_value_range_to_null (vr, type);
2892 else
2893 set_value_range_to_varying (vr);
2894 return;
2895 }
2896 }
2897 else if (code == ABS_EXPR
2898 && !TYPE_UNSIGNED (type))
2899 {
2900 /* -TYPE_MIN_VALUE = TYPE_MIN_VALUE with flag_wrapv so we can't get a
2901 useful range. */
2902 if (!TYPE_OVERFLOW_UNDEFINED (type)
2903 && ((vr0.type == VR_RANGE
2904 && vrp_val_is_min (vr0.min))
2905 || (vr0.type == VR_ANTI_RANGE
2906 && !vrp_val_is_min (vr0.min)
2907 && !range_includes_zero_p (&vr0))))
2908 {
2909 set_value_range_to_varying (vr);
2910 return;
2911 }
2912
2913 /* ABS_EXPR may flip the range around, if the original range
2914 included negative values. */
2915 if (is_overflow_infinity (vr0.min))
2916 min = positive_overflow_infinity (type);
2917 else if (!vrp_val_is_min (vr0.min))
2918 min = fold_unary_to_constant (code, type, vr0.min);
2919 else if (!needs_overflow_infinity (type))
2920 min = TYPE_MAX_VALUE (type);
2921 else if (supports_overflow_infinity (type))
2922 min = positive_overflow_infinity (type);
2923 else
2924 {
2925 set_value_range_to_varying (vr);
2926 return;
2927 }
2928
2929 if (is_overflow_infinity (vr0.max))
2930 max = positive_overflow_infinity (type);
2931 else if (!vrp_val_is_min (vr0.max))
2932 max = fold_unary_to_constant (code, type, vr0.max);
2933 else if (!needs_overflow_infinity (type))
2934 max = TYPE_MAX_VALUE (type);
2935 else if (supports_overflow_infinity (type)
2936 /* We shouldn't generate [+INF, +INF] as set_value_range
2937 doesn't like this and ICEs. */
2938 && !is_positive_overflow_infinity (min))
2939 max = positive_overflow_infinity (type);
2940 else
2941 {
2942 set_value_range_to_varying (vr);
2943 return;
2944 }
2945
2946 cmp = compare_values (min, max);
2947
2948 /* If a VR_ANTI_RANGEs contains zero, then we have
2949 ~[-INF, min(MIN, MAX)]. */
2950 if (vr0.type == VR_ANTI_RANGE)
2951 {
2952 if (range_includes_zero_p (&vr0))
2953 {
2954 /* Take the lower of the two values. */
2955 if (cmp != 1)
2956 max = min;
2957
2958 /* Create ~[-INF, min (abs(MIN), abs(MAX))]
2959 or ~[-INF + 1, min (abs(MIN), abs(MAX))] when
2960 flag_wrapv is set and the original anti-range doesn't include
2961 TYPE_MIN_VALUE, remember -TYPE_MIN_VALUE = TYPE_MIN_VALUE. */
2962 if (TYPE_OVERFLOW_WRAPS (type))
2963 {
2964 tree type_min_value = TYPE_MIN_VALUE (type);
2965
2966 min = (vr0.min != type_min_value
2967 ? int_const_binop (PLUS_EXPR, type_min_value,
2968 integer_one_node, 0)
2969 : type_min_value);
2970 }
2971 else
2972 {
2973 if (overflow_infinity_range_p (&vr0))
2974 min = negative_overflow_infinity (type);
2975 else
2976 min = TYPE_MIN_VALUE (type);
2977 }
2978 }
2979 else
2980 {
2981 /* All else has failed, so create the range [0, INF], even for
2982 flag_wrapv since TYPE_MIN_VALUE is in the original
2983 anti-range. */
2984 vr0.type = VR_RANGE;
2985 min = build_int_cst (type, 0);
2986 if (needs_overflow_infinity (type))
2987 {
2988 if (supports_overflow_infinity (type))
2989 max = positive_overflow_infinity (type);
2990 else
2991 {
2992 set_value_range_to_varying (vr);
2993 return;
2994 }
2995 }
2996 else
2997 max = TYPE_MAX_VALUE (type);
2998 }
2999 }
3000
3001 /* If the range contains zero then we know that the minimum value in the
3002 range will be zero. */
3003 else if (range_includes_zero_p (&vr0))
3004 {
3005 if (cmp == 1)
3006 max = min;
3007 min = build_int_cst (type, 0);
3008 }
3009 else
3010 {
3011 /* If the range was reversed, swap MIN and MAX. */
3012 if (cmp == 1)
3013 {
3014 tree t = min;
3015 min = max;
3016 max = t;
3017 }
3018 }
3019 }
3020 else
3021 {
3022 /* Otherwise, operate on each end of the range. */
3023 min = fold_unary_to_constant (code, type, vr0.min);
3024 max = fold_unary_to_constant (code, type, vr0.max);
3025
3026 if (needs_overflow_infinity (type))
3027 {
3028 gcc_assert (code != NEGATE_EXPR && code != ABS_EXPR);
3029
3030 /* If both sides have overflowed, we don't know
3031 anything. */
3032 if ((is_overflow_infinity (vr0.min)
3033 || TREE_OVERFLOW (min))
3034 && (is_overflow_infinity (vr0.max)
3035 || TREE_OVERFLOW (max)))
3036 {
3037 set_value_range_to_varying (vr);
3038 return;
3039 }
3040
3041 if (is_overflow_infinity (vr0.min))
3042 min = vr0.min;
3043 else if (TREE_OVERFLOW (min))
3044 {
3045 if (supports_overflow_infinity (type))
3046 min = (tree_int_cst_sgn (min) >= 0
3047 ? positive_overflow_infinity (TREE_TYPE (min))
3048 : negative_overflow_infinity (TREE_TYPE (min)));
3049 else
3050 {
3051 set_value_range_to_varying (vr);
3052 return;
3053 }
3054 }
3055
3056 if (is_overflow_infinity (vr0.max))
3057 max = vr0.max;
3058 else if (TREE_OVERFLOW (max))
3059 {
3060 if (supports_overflow_infinity (type))
3061 max = (tree_int_cst_sgn (max) >= 0
3062 ? positive_overflow_infinity (TREE_TYPE (max))
3063 : negative_overflow_infinity (TREE_TYPE (max)));
3064 else
3065 {
3066 set_value_range_to_varying (vr);
3067 return;
3068 }
3069 }
3070 }
3071 }
3072
3073 cmp = compare_values (min, max);
3074 if (cmp == -2 || cmp == 1)
3075 {
3076 /* If the new range has its limits swapped around (MIN > MAX),
3077 then the operation caused one of them to wrap around, mark
3078 the new range VARYING. */
3079 set_value_range_to_varying (vr);
3080 }
3081 else
3082 set_value_range (vr, vr0.type, min, max, NULL);
3083 }
3084
3085
3086 /* Extract range information from a conditional expression EXPR based on
3087 the ranges of each of its operands and the expression code. */
3088
3089 static void
3090 extract_range_from_cond_expr (value_range_t *vr, tree expr)
3091 {
3092 tree op0, op1;
3093 value_range_t vr0 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
3094 value_range_t vr1 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
3095
3096 /* Get value ranges for each operand. For constant operands, create
3097 a new value range with the operand to simplify processing. */
3098 op0 = COND_EXPR_THEN (expr);
3099 if (TREE_CODE (op0) == SSA_NAME)
3100 vr0 = *(get_value_range (op0));
3101 else if (is_gimple_min_invariant (op0))
3102 set_value_range_to_value (&vr0, op0, NULL);
3103 else
3104 set_value_range_to_varying (&vr0);
3105
3106 op1 = COND_EXPR_ELSE (expr);
3107 if (TREE_CODE (op1) == SSA_NAME)
3108 vr1 = *(get_value_range (op1));
3109 else if (is_gimple_min_invariant (op1))
3110 set_value_range_to_value (&vr1, op1, NULL);
3111 else
3112 set_value_range_to_varying (&vr1);
3113
3114 /* The resulting value range is the union of the operand ranges */
3115 vrp_meet (&vr0, &vr1);
3116 copy_value_range (vr, &vr0);
3117 }
3118
3119
3120 /* Extract range information from a comparison expression EXPR based
3121 on the range of its operand and the expression code. */
3122
3123 static void
3124 extract_range_from_comparison (value_range_t *vr, enum tree_code code,
3125 tree type, tree op0, tree op1)
3126 {
3127 bool sop = false;
3128 tree val;
3129
3130 val = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, false, &sop,
3131 NULL);
3132
3133 /* A disadvantage of using a special infinity as an overflow
3134 representation is that we lose the ability to record overflow
3135 when we don't have an infinity. So we have to ignore a result
3136 which relies on overflow. */
3137
3138 if (val && !is_overflow_infinity (val) && !sop)
3139 {
3140 /* Since this expression was found on the RHS of an assignment,
3141 its type may be different from _Bool. Convert VAL to EXPR's
3142 type. */
3143 val = fold_convert (type, val);
3144 if (is_gimple_min_invariant (val))
3145 set_value_range_to_value (vr, val, vr->equiv);
3146 else
3147 set_value_range (vr, VR_RANGE, val, val, vr->equiv);
3148 }
3149 else
3150 /* The result of a comparison is always true or false. */
3151 set_value_range_to_truthvalue (vr, type);
3152 }
3153
3154 /* Try to derive a nonnegative or nonzero range out of STMT relying
3155 primarily on generic routines in fold in conjunction with range data.
3156 Store the result in *VR */
3157
3158 static void
3159 extract_range_basic (value_range_t *vr, gimple stmt)
3160 {
3161 bool sop = false;
3162 tree type = gimple_expr_type (stmt);
3163
3164 if (INTEGRAL_TYPE_P (type)
3165 && gimple_stmt_nonnegative_warnv_p (stmt, &sop))
3166 set_value_range_to_nonnegative (vr, type,
3167 sop || stmt_overflow_infinity (stmt));
3168 else if (vrp_stmt_computes_nonzero (stmt, &sop)
3169 && !sop)
3170 set_value_range_to_nonnull (vr, type);
3171 else
3172 set_value_range_to_varying (vr);
3173 }
3174
3175
3176 /* Try to compute a useful range out of assignment STMT and store it
3177 in *VR. */
3178
3179 static void
3180 extract_range_from_assignment (value_range_t *vr, gimple stmt)
3181 {
3182 enum tree_code code = gimple_assign_rhs_code (stmt);
3183
3184 if (code == ASSERT_EXPR)
3185 extract_range_from_assert (vr, gimple_assign_rhs1 (stmt));
3186 else if (code == SSA_NAME)
3187 extract_range_from_ssa_name (vr, gimple_assign_rhs1 (stmt));
3188 else if (TREE_CODE_CLASS (code) == tcc_binary
3189 || code == TRUTH_AND_EXPR
3190 || code == TRUTH_OR_EXPR
3191 || code == TRUTH_XOR_EXPR)
3192 extract_range_from_binary_expr (vr, gimple_assign_rhs_code (stmt),
3193 gimple_expr_type (stmt),
3194 gimple_assign_rhs1 (stmt),
3195 gimple_assign_rhs2 (stmt));
3196 else if (TREE_CODE_CLASS (code) == tcc_unary)
3197 extract_range_from_unary_expr (vr, gimple_assign_rhs_code (stmt),
3198 gimple_expr_type (stmt),
3199 gimple_assign_rhs1 (stmt));
3200 else if (code == COND_EXPR)
3201 extract_range_from_cond_expr (vr, gimple_assign_rhs1 (stmt));
3202 else if (TREE_CODE_CLASS (code) == tcc_comparison)
3203 extract_range_from_comparison (vr, gimple_assign_rhs_code (stmt),
3204 gimple_expr_type (stmt),
3205 gimple_assign_rhs1 (stmt),
3206 gimple_assign_rhs2 (stmt));
3207 else if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS
3208 && is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
3209 set_value_range_to_value (vr, gimple_assign_rhs1 (stmt), NULL);
3210 else
3211 set_value_range_to_varying (vr);
3212
3213 if (vr->type == VR_VARYING)
3214 extract_range_basic (vr, stmt);
3215 }
3216
3217 /* Given a range VR, a LOOP and a variable VAR, determine whether it
3218 would be profitable to adjust VR using scalar evolution information
3219 for VAR. If so, update VR with the new limits. */
3220
3221 static void
3222 adjust_range_with_scev (value_range_t *vr, struct loop *loop,
3223 gimple stmt, tree var)
3224 {
3225 tree init, step, chrec, tmin, tmax, min, max, type, tem;
3226 enum ev_direction dir;
3227
3228 /* TODO. Don't adjust anti-ranges. An anti-range may provide
3229 better opportunities than a regular range, but I'm not sure. */
3230 if (vr->type == VR_ANTI_RANGE)
3231 return;
3232
3233 chrec = instantiate_parameters (loop, analyze_scalar_evolution (loop, var));
3234
3235 /* Like in PR19590, scev can return a constant function. */
3236 if (is_gimple_min_invariant (chrec))
3237 {
3238 set_value_range_to_value (vr, chrec, vr->equiv);
3239 return;
3240 }
3241
3242 if (TREE_CODE (chrec) != POLYNOMIAL_CHREC)
3243 return;
3244
3245 init = initial_condition_in_loop_num (chrec, loop->num);
3246 tem = op_with_constant_singleton_value_range (init);
3247 if (tem)
3248 init = tem;
3249 step = evolution_part_in_loop_num (chrec, loop->num);
3250 tem = op_with_constant_singleton_value_range (step);
3251 if (tem)
3252 step = tem;
3253
3254 /* If STEP is symbolic, we can't know whether INIT will be the
3255 minimum or maximum value in the range. Also, unless INIT is
3256 a simple expression, compare_values and possibly other functions
3257 in tree-vrp won't be able to handle it. */
3258 if (step == NULL_TREE
3259 || !is_gimple_min_invariant (step)
3260 || !valid_value_p (init))
3261 return;
3262
3263 dir = scev_direction (chrec);
3264 if (/* Do not adjust ranges if we do not know whether the iv increases
3265 or decreases, ... */
3266 dir == EV_DIR_UNKNOWN
3267 /* ... or if it may wrap. */
3268 || scev_probably_wraps_p (init, step, stmt, get_chrec_loop (chrec),
3269 true))
3270 return;
3271
3272 /* We use TYPE_MIN_VALUE and TYPE_MAX_VALUE here instead of
3273 negative_overflow_infinity and positive_overflow_infinity,
3274 because we have concluded that the loop probably does not
3275 wrap. */
3276
3277 type = TREE_TYPE (var);
3278 if (POINTER_TYPE_P (type) || !TYPE_MIN_VALUE (type))
3279 tmin = lower_bound_in_type (type, type);
3280 else
3281 tmin = TYPE_MIN_VALUE (type);
3282 if (POINTER_TYPE_P (type) || !TYPE_MAX_VALUE (type))
3283 tmax = upper_bound_in_type (type, type);
3284 else
3285 tmax = TYPE_MAX_VALUE (type);
3286
3287 if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED)
3288 {
3289 min = tmin;
3290 max = tmax;
3291
3292 /* For VARYING or UNDEFINED ranges, just about anything we get
3293 from scalar evolutions should be better. */
3294
3295 if (dir == EV_DIR_DECREASES)
3296 max = init;
3297 else
3298 min = init;
3299
3300 /* If we would create an invalid range, then just assume we
3301 know absolutely nothing. This may be over-conservative,
3302 but it's clearly safe, and should happen only in unreachable
3303 parts of code, or for invalid programs. */
3304 if (compare_values (min, max) == 1)
3305 return;
3306
3307 set_value_range (vr, VR_RANGE, min, max, vr->equiv);
3308 }
3309 else if (vr->type == VR_RANGE)
3310 {
3311 min = vr->min;
3312 max = vr->max;
3313
3314 if (dir == EV_DIR_DECREASES)
3315 {
3316 /* INIT is the maximum value. If INIT is lower than VR->MAX
3317 but no smaller than VR->MIN, set VR->MAX to INIT. */
3318 if (compare_values (init, max) == -1)
3319 {
3320 max = init;
3321
3322 /* If we just created an invalid range with the minimum
3323 greater than the maximum, we fail conservatively.
3324 This should happen only in unreachable
3325 parts of code, or for invalid programs. */
3326 if (compare_values (min, max) == 1)
3327 return;
3328 }
3329
3330 /* According to the loop information, the variable does not
3331 overflow. If we think it does, probably because of an
3332 overflow due to arithmetic on a different INF value,
3333 reset now. */
3334 if (is_negative_overflow_infinity (min))
3335 min = tmin;
3336 }
3337 else
3338 {
3339 /* If INIT is bigger than VR->MIN, set VR->MIN to INIT. */
3340 if (compare_values (init, min) == 1)
3341 {
3342 min = init;
3343
3344 /* Again, avoid creating invalid range by failing. */
3345 if (compare_values (min, max) == 1)
3346 return;
3347 }
3348
3349 if (is_positive_overflow_infinity (max))
3350 max = tmax;
3351 }
3352
3353 set_value_range (vr, VR_RANGE, min, max, vr->equiv);
3354 }
3355 }
3356
3357 /* Return true if VAR may overflow at STMT. This checks any available
3358 loop information to see if we can determine that VAR does not
3359 overflow. */
3360
3361 static bool
3362 vrp_var_may_overflow (tree var, gimple stmt)
3363 {
3364 struct loop *l;
3365 tree chrec, init, step;
3366
3367 if (current_loops == NULL)
3368 return true;
3369
3370 l = loop_containing_stmt (stmt);
3371 if (l == NULL
3372 || !loop_outer (l))
3373 return true;
3374
3375 chrec = instantiate_parameters (l, analyze_scalar_evolution (l, var));
3376 if (TREE_CODE (chrec) != POLYNOMIAL_CHREC)
3377 return true;
3378
3379 init = initial_condition_in_loop_num (chrec, l->num);
3380 step = evolution_part_in_loop_num (chrec, l->num);
3381
3382 if (step == NULL_TREE
3383 || !is_gimple_min_invariant (step)
3384 || !valid_value_p (init))
3385 return true;
3386
3387 /* If we get here, we know something useful about VAR based on the
3388 loop information. If it wraps, it may overflow. */
3389
3390 if (scev_probably_wraps_p (init, step, stmt, get_chrec_loop (chrec),
3391 true))
3392 return true;
3393
3394 if (dump_file && (dump_flags & TDF_DETAILS) != 0)
3395 {
3396 print_generic_expr (dump_file, var, 0);
3397 fprintf (dump_file, ": loop information indicates does not overflow\n");
3398 }
3399
3400 return false;
3401 }
3402
3403
3404 /* Given two numeric value ranges VR0, VR1 and a comparison code COMP:
3405
3406 - Return BOOLEAN_TRUE_NODE if VR0 COMP VR1 always returns true for
3407 all the values in the ranges.
3408
3409 - Return BOOLEAN_FALSE_NODE if the comparison always returns false.
3410
3411 - Return NULL_TREE if it is not always possible to determine the
3412 value of the comparison.
3413
3414 Also set *STRICT_OVERFLOW_P to indicate whether a range with an
3415 overflow infinity was used in the test. */
3416
3417
3418 static tree
3419 compare_ranges (enum tree_code comp, value_range_t *vr0, value_range_t *vr1,
3420 bool *strict_overflow_p)
3421 {
3422 /* VARYING or UNDEFINED ranges cannot be compared. */
3423 if (vr0->type == VR_VARYING
3424 || vr0->type == VR_UNDEFINED
3425 || vr1->type == VR_VARYING
3426 || vr1->type == VR_UNDEFINED)
3427 return NULL_TREE;
3428
3429 /* Anti-ranges need to be handled separately. */
3430 if (vr0->type == VR_ANTI_RANGE || vr1->type == VR_ANTI_RANGE)
3431 {
3432 /* If both are anti-ranges, then we cannot compute any
3433 comparison. */
3434 if (vr0->type == VR_ANTI_RANGE && vr1->type == VR_ANTI_RANGE)
3435 return NULL_TREE;
3436
3437 /* These comparisons are never statically computable. */
3438 if (comp == GT_EXPR
3439 || comp == GE_EXPR
3440 || comp == LT_EXPR
3441 || comp == LE_EXPR)
3442 return NULL_TREE;
3443
3444 /* Equality can be computed only between a range and an
3445 anti-range. ~[VAL1, VAL2] == [VAL1, VAL2] is always false. */
3446 if (vr0->type == VR_RANGE)
3447 {
3448 /* To simplify processing, make VR0 the anti-range. */
3449 value_range_t *tmp = vr0;
3450 vr0 = vr1;
3451 vr1 = tmp;
3452 }
3453
3454 gcc_assert (comp == NE_EXPR || comp == EQ_EXPR);
3455
3456 if (compare_values_warnv (vr0->min, vr1->min, strict_overflow_p) == 0
3457 && compare_values_warnv (vr0->max, vr1->max, strict_overflow_p) == 0)
3458 return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node;
3459
3460 return NULL_TREE;
3461 }
3462
3463 if (!usable_range_p (vr0, strict_overflow_p)
3464 || !usable_range_p (vr1, strict_overflow_p))
3465 return NULL_TREE;
3466
3467 /* Simplify processing. If COMP is GT_EXPR or GE_EXPR, switch the
3468 operands around and change the comparison code. */
3469 if (comp == GT_EXPR || comp == GE_EXPR)
3470 {
3471 value_range_t *tmp;
3472 comp = (comp == GT_EXPR) ? LT_EXPR : LE_EXPR;
3473 tmp = vr0;
3474 vr0 = vr1;
3475 vr1 = tmp;
3476 }
3477
3478 if (comp == EQ_EXPR)
3479 {
3480 /* Equality may only be computed if both ranges represent
3481 exactly one value. */
3482 if (compare_values_warnv (vr0->min, vr0->max, strict_overflow_p) == 0
3483 && compare_values_warnv (vr1->min, vr1->max, strict_overflow_p) == 0)
3484 {
3485 int cmp_min = compare_values_warnv (vr0->min, vr1->min,
3486 strict_overflow_p);
3487 int cmp_max = compare_values_warnv (vr0->max, vr1->max,
3488 strict_overflow_p);
3489 if (cmp_min == 0 && cmp_max == 0)
3490 return boolean_true_node;
3491 else if (cmp_min != -2 && cmp_max != -2)
3492 return boolean_false_node;
3493 }
3494 /* If [V0_MIN, V1_MAX] < [V1_MIN, V1_MAX] then V0 != V1. */
3495 else if (compare_values_warnv (vr0->min, vr1->max,
3496 strict_overflow_p) == 1
3497 || compare_values_warnv (vr1->min, vr0->max,
3498 strict_overflow_p) == 1)
3499 return boolean_false_node;
3500
3501 return NULL_TREE;
3502 }
3503 else if (comp == NE_EXPR)
3504 {
3505 int cmp1, cmp2;
3506
3507 /* If VR0 is completely to the left or completely to the right
3508 of VR1, they are always different. Notice that we need to
3509 make sure that both comparisons yield similar results to
3510 avoid comparing values that cannot be compared at
3511 compile-time. */
3512 cmp1 = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p);
3513 cmp2 = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p);
3514 if ((cmp1 == -1 && cmp2 == -1) || (cmp1 == 1 && cmp2 == 1))
3515 return boolean_true_node;
3516
3517 /* If VR0 and VR1 represent a single value and are identical,
3518 return false. */
3519 else if (compare_values_warnv (vr0->min, vr0->max,
3520 strict_overflow_p) == 0
3521 && compare_values_warnv (vr1->min, vr1->max,
3522 strict_overflow_p) == 0
3523 && compare_values_warnv (vr0->min, vr1->min,
3524 strict_overflow_p) == 0
3525 && compare_values_warnv (vr0->max, vr1->max,
3526 strict_overflow_p) == 0)
3527 return boolean_false_node;
3528
3529 /* Otherwise, they may or may not be different. */
3530 else
3531 return NULL_TREE;
3532 }
3533 else if (comp == LT_EXPR || comp == LE_EXPR)
3534 {
3535 int tst;
3536
3537 /* If VR0 is to the left of VR1, return true. */
3538 tst = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p);
3539 if ((comp == LT_EXPR && tst == -1)
3540 || (comp == LE_EXPR && (tst == -1 || tst == 0)))
3541 {
3542 if (overflow_infinity_range_p (vr0)
3543 || overflow_infinity_range_p (vr1))
3544 *strict_overflow_p = true;
3545 return boolean_true_node;
3546 }
3547
3548 /* If VR0 is to the right of VR1, return false. */
3549 tst = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p);
3550 if ((comp == LT_EXPR && (tst == 0 || tst == 1))
3551 || (comp == LE_EXPR && tst == 1))
3552 {
3553 if (overflow_infinity_range_p (vr0)
3554 || overflow_infinity_range_p (vr1))
3555 *strict_overflow_p = true;
3556 return boolean_false_node;
3557 }
3558
3559 /* Otherwise, we don't know. */
3560 return NULL_TREE;
3561 }
3562
3563 gcc_unreachable ();
3564 }
3565
3566
3567 /* Given a value range VR, a value VAL and a comparison code COMP, return
3568 BOOLEAN_TRUE_NODE if VR COMP VAL always returns true for all the
3569 values in VR. Return BOOLEAN_FALSE_NODE if the comparison
3570 always returns false. Return NULL_TREE if it is not always
3571 possible to determine the value of the comparison. Also set
3572 *STRICT_OVERFLOW_P to indicate whether a range with an overflow
3573 infinity was used in the test. */
3574
3575 static tree
3576 compare_range_with_value (enum tree_code comp, value_range_t *vr, tree val,
3577 bool *strict_overflow_p)
3578 {
3579 if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED)
3580 return NULL_TREE;
3581
3582 /* Anti-ranges need to be handled separately. */
3583 if (vr->type == VR_ANTI_RANGE)
3584 {
3585 /* For anti-ranges, the only predicates that we can compute at
3586 compile time are equality and inequality. */
3587 if (comp == GT_EXPR
3588 || comp == GE_EXPR
3589 || comp == LT_EXPR
3590 || comp == LE_EXPR)
3591 return NULL_TREE;
3592
3593 /* ~[VAL_1, VAL_2] OP VAL is known if VAL_1 <= VAL <= VAL_2. */
3594 if (value_inside_range (val, vr) == 1)
3595 return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node;
3596
3597 return NULL_TREE;
3598 }
3599
3600 if (!usable_range_p (vr, strict_overflow_p))
3601 return NULL_TREE;
3602
3603 if (comp == EQ_EXPR)
3604 {
3605 /* EQ_EXPR may only be computed if VR represents exactly
3606 one value. */
3607 if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0)
3608 {
3609 int cmp = compare_values_warnv (vr->min, val, strict_overflow_p);
3610 if (cmp == 0)
3611 return boolean_true_node;
3612 else if (cmp == -1 || cmp == 1 || cmp == 2)
3613 return boolean_false_node;
3614 }
3615 else if (compare_values_warnv (val, vr->min, strict_overflow_p) == -1
3616 || compare_values_warnv (vr->max, val, strict_overflow_p) == -1)
3617 return boolean_false_node;
3618
3619 return NULL_TREE;
3620 }
3621 else if (comp == NE_EXPR)
3622 {
3623 /* If VAL is not inside VR, then they are always different. */
3624 if (compare_values_warnv (vr->max, val, strict_overflow_p) == -1
3625 || compare_values_warnv (vr->min, val, strict_overflow_p) == 1)
3626 return boolean_true_node;
3627
3628 /* If VR represents exactly one value equal to VAL, then return
3629 false. */
3630 if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0
3631 && compare_values_warnv (vr->min, val, strict_overflow_p) == 0)
3632 return boolean_false_node;
3633
3634 /* Otherwise, they may or may not be different. */
3635 return NULL_TREE;
3636 }
3637 else if (comp == LT_EXPR || comp == LE_EXPR)
3638 {
3639 int tst;
3640
3641 /* If VR is to the left of VAL, return true. */
3642 tst = compare_values_warnv (vr->max, val, strict_overflow_p);
3643 if ((comp == LT_EXPR && tst == -1)
3644 || (comp == LE_EXPR && (tst == -1 || tst == 0)))
3645 {
3646 if (overflow_infinity_range_p (vr))
3647 *strict_overflow_p = true;
3648 return boolean_true_node;
3649 }
3650
3651 /* If VR is to the right of VAL, return false. */
3652 tst = compare_values_warnv (vr->min, val, strict_overflow_p);
3653 if ((comp == LT_EXPR && (tst == 0 || tst == 1))
3654 || (comp == LE_EXPR && tst == 1))
3655 {
3656 if (overflow_infinity_range_p (vr))
3657 *strict_overflow_p = true;
3658 return boolean_false_node;
3659 }
3660
3661 /* Otherwise, we don't know. */
3662 return NULL_TREE;
3663 }
3664 else if (comp == GT_EXPR || comp == GE_EXPR)
3665 {
3666 int tst;
3667
3668 /* If VR is to the right of VAL, return true. */
3669 tst = compare_values_warnv (vr->min, val, strict_overflow_p);
3670 if ((comp == GT_EXPR && tst == 1)
3671 || (comp == GE_EXPR && (tst == 0 || tst == 1)))
3672 {
3673 if (overflow_infinity_range_p (vr))
3674 *strict_overflow_p = true;
3675 return boolean_true_node;
3676 }
3677
3678 /* If VR is to the left of VAL, return false. */
3679 tst = compare_values_warnv (vr->max, val, strict_overflow_p);
3680 if ((comp == GT_EXPR && (tst == -1 || tst == 0))
3681 || (comp == GE_EXPR && tst == -1))
3682 {
3683 if (overflow_infinity_range_p (vr))
3684 *strict_overflow_p = true;
3685 return boolean_false_node;
3686 }
3687
3688 /* Otherwise, we don't know. */
3689 return NULL_TREE;
3690 }
3691
3692 gcc_unreachable ();
3693 }
3694
3695
3696 /* Debugging dumps. */
3697
3698 void dump_value_range (FILE *, value_range_t *);
3699 void debug_value_range (value_range_t *);
3700 void dump_all_value_ranges (FILE *);
3701 void debug_all_value_ranges (void);
3702 void dump_vr_equiv (FILE *, bitmap);
3703 void debug_vr_equiv (bitmap);
3704
3705
3706 /* Dump value range VR to FILE. */
3707
3708 void
3709 dump_value_range (FILE *file, value_range_t *vr)
3710 {
3711 if (vr == NULL)
3712 fprintf (file, "[]");
3713 else if (vr->type == VR_UNDEFINED)
3714 fprintf (file, "UNDEFINED");
3715 else if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
3716 {
3717 tree type = TREE_TYPE (vr->min);
3718
3719 fprintf (file, "%s[", (vr->type == VR_ANTI_RANGE) ? "~" : "");
3720
3721 if (is_negative_overflow_infinity (vr->min))
3722 fprintf (file, "-INF(OVF)");
3723 else if (INTEGRAL_TYPE_P (type)
3724 && !TYPE_UNSIGNED (type)
3725 && vrp_val_is_min (vr->min))
3726 fprintf (file, "-INF");
3727 else
3728 print_generic_expr (file, vr->min, 0);
3729
3730 fprintf (file, ", ");
3731
3732 if (is_positive_overflow_infinity (vr->max))
3733 fprintf (file, "+INF(OVF)");
3734 else if (INTEGRAL_TYPE_P (type)
3735 && vrp_val_is_max (vr->max))
3736 fprintf (file, "+INF");
3737 else
3738 print_generic_expr (file, vr->max, 0);
3739
3740 fprintf (file, "]");
3741
3742 if (vr->equiv)
3743 {
3744 bitmap_iterator bi;
3745 unsigned i, c = 0;
3746
3747 fprintf (file, " EQUIVALENCES: { ");
3748
3749 EXECUTE_IF_SET_IN_BITMAP (vr->equiv, 0, i, bi)
3750 {
3751 print_generic_expr (file, ssa_name (i), 0);
3752 fprintf (file, " ");
3753 c++;
3754 }
3755
3756 fprintf (file, "} (%u elements)", c);
3757 }
3758 }
3759 else if (vr->type == VR_VARYING)
3760 fprintf (file, "VARYING");
3761 else
3762 fprintf (file, "INVALID RANGE");
3763 }
3764
3765
3766 /* Dump value range VR to stderr. */
3767
3768 DEBUG_FUNCTION void
3769 debug_value_range (value_range_t *vr)
3770 {
3771 dump_value_range (stderr, vr);
3772 fprintf (stderr, "\n");
3773 }
3774
3775
3776 /* Dump value ranges of all SSA_NAMEs to FILE. */
3777
3778 void
3779 dump_all_value_ranges (FILE *file)
3780 {
3781 size_t i;
3782
3783 for (i = 0; i < num_ssa_names; i++)
3784 {
3785 if (vr_value[i])
3786 {
3787 print_generic_expr (file, ssa_name (i), 0);
3788 fprintf (file, ": ");
3789 dump_value_range (file, vr_value[i]);
3790 fprintf (file, "\n");
3791 }
3792 }
3793
3794 fprintf (file, "\n");
3795 }
3796
3797
3798 /* Dump all value ranges to stderr. */
3799
3800 DEBUG_FUNCTION void
3801 debug_all_value_ranges (void)
3802 {
3803 dump_all_value_ranges (stderr);
3804 }
3805
3806
3807 /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
3808 create a new SSA name N and return the assertion assignment
3809 'V = ASSERT_EXPR <V, V OP W>'. */
3810
3811 static gimple
3812 build_assert_expr_for (tree cond, tree v)
3813 {
3814 tree n;
3815 gimple assertion;
3816
3817 gcc_assert (TREE_CODE (v) == SSA_NAME);
3818 n = duplicate_ssa_name (v, NULL);
3819
3820 if (COMPARISON_CLASS_P (cond))
3821 {
3822 tree a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond);
3823 assertion = gimple_build_assign (n, a);
3824 }
3825 else if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
3826 {
3827 /* Given !V, build the assignment N = false. */
3828 tree op0 = TREE_OPERAND (cond, 0);
3829 gcc_assert (op0 == v);
3830 assertion = gimple_build_assign (n, boolean_false_node);
3831 }
3832 else if (TREE_CODE (cond) == SSA_NAME)
3833 {
3834 /* Given V, build the assignment N = true. */
3835 gcc_assert (v == cond);
3836 assertion = gimple_build_assign (n, boolean_true_node);
3837 }
3838 else
3839 gcc_unreachable ();
3840
3841 SSA_NAME_DEF_STMT (n) = assertion;
3842
3843 /* The new ASSERT_EXPR, creates a new SSA name that replaces the
3844 operand of the ASSERT_EXPR. Register the new name and the old one
3845 in the replacement table so that we can fix the SSA web after
3846 adding all the ASSERT_EXPRs. */
3847 register_new_name_mapping (n, v);
3848
3849 return assertion;
3850 }
3851
3852
3853 /* Return false if EXPR is a predicate expression involving floating
3854 point values. */
3855
3856 static inline bool
3857 fp_predicate (gimple stmt)
3858 {
3859 GIMPLE_CHECK (stmt, GIMPLE_COND);
3860
3861 return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)));
3862 }
3863
3864
3865 /* If the range of values taken by OP can be inferred after STMT executes,
3866 return the comparison code (COMP_CODE_P) and value (VAL_P) that
3867 describes the inferred range. Return true if a range could be
3868 inferred. */
3869
3870 static bool
3871 infer_value_range (gimple stmt, tree op, enum tree_code *comp_code_p, tree *val_p)
3872 {
3873 *val_p = NULL_TREE;
3874 *comp_code_p = ERROR_MARK;
3875
3876 /* Do not attempt to infer anything in names that flow through
3877 abnormal edges. */
3878 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op))
3879 return false;
3880
3881 /* Similarly, don't infer anything from statements that may throw
3882 exceptions. */
3883 if (stmt_could_throw_p (stmt))
3884 return false;
3885
3886 /* If STMT is the last statement of a basic block with no
3887 successors, there is no point inferring anything about any of its
3888 operands. We would not be able to find a proper insertion point
3889 for the assertion, anyway. */
3890 if (stmt_ends_bb_p (stmt) && EDGE_COUNT (gimple_bb (stmt)->succs) == 0)
3891 return false;
3892
3893 /* We can only assume that a pointer dereference will yield
3894 non-NULL if -fdelete-null-pointer-checks is enabled. */
3895 if (flag_delete_null_pointer_checks
3896 && POINTER_TYPE_P (TREE_TYPE (op))
3897 && gimple_code (stmt) != GIMPLE_ASM)
3898 {
3899 unsigned num_uses, num_loads, num_stores;
3900
3901 count_uses_and_derefs (op, stmt, &num_uses, &num_loads, &num_stores);
3902 if (num_loads + num_stores > 0)
3903 {
3904 *val_p = build_int_cst (TREE_TYPE (op), 0);
3905 *comp_code_p = NE_EXPR;
3906 return true;
3907 }
3908 }
3909
3910 return false;
3911 }
3912
3913
3914 void dump_asserts_for (FILE *, tree);
3915 void debug_asserts_for (tree);
3916 void dump_all_asserts (FILE *);
3917 void debug_all_asserts (void);
3918
3919 /* Dump all the registered assertions for NAME to FILE. */
3920
3921 void
3922 dump_asserts_for (FILE *file, tree name)
3923 {
3924 assert_locus_t loc;
3925
3926 fprintf (file, "Assertions to be inserted for ");
3927 print_generic_expr (file, name, 0);
3928 fprintf (file, "\n");
3929
3930 loc = asserts_for[SSA_NAME_VERSION (name)];
3931 while (loc)
3932 {
3933 fprintf (file, "\t");
3934 print_gimple_stmt (file, gsi_stmt (loc->si), 0, 0);
3935 fprintf (file, "\n\tBB #%d", loc->bb->index);
3936 if (loc->e)
3937 {
3938 fprintf (file, "\n\tEDGE %d->%d", loc->e->src->index,
3939 loc->e->dest->index);
3940 dump_edge_info (file, loc->e, 0);
3941 }
3942 fprintf (file, "\n\tPREDICATE: ");
3943 print_generic_expr (file, name, 0);
3944 fprintf (file, " %s ", tree_code_name[(int)loc->comp_code]);
3945 print_generic_expr (file, loc->val, 0);
3946 fprintf (file, "\n\n");
3947 loc = loc->next;
3948 }
3949
3950 fprintf (file, "\n");
3951 }
3952
3953
3954 /* Dump all the registered assertions for NAME to stderr. */
3955
3956 DEBUG_FUNCTION void
3957 debug_asserts_for (tree name)
3958 {
3959 dump_asserts_for (stderr, name);
3960 }
3961
3962
3963 /* Dump all the registered assertions for all the names to FILE. */
3964
3965 void
3966 dump_all_asserts (FILE *file)
3967 {
3968 unsigned i;
3969 bitmap_iterator bi;
3970
3971 fprintf (file, "\nASSERT_EXPRs to be inserted\n\n");
3972 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
3973 dump_asserts_for (file, ssa_name (i));
3974 fprintf (file, "\n");
3975 }
3976
3977
3978 /* Dump all the registered assertions for all the names to stderr. */
3979
3980 DEBUG_FUNCTION void
3981 debug_all_asserts (void)
3982 {
3983 dump_all_asserts (stderr);
3984 }
3985
3986
3987 /* If NAME doesn't have an ASSERT_EXPR registered for asserting
3988 'EXPR COMP_CODE VAL' at a location that dominates block BB or
3989 E->DEST, then register this location as a possible insertion point
3990 for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
3991
3992 BB, E and SI provide the exact insertion point for the new
3993 ASSERT_EXPR. If BB is NULL, then the ASSERT_EXPR is to be inserted
3994 on edge E. Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
3995 BB. If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
3996 must not be NULL. */
3997
3998 static void
3999 register_new_assert_for (tree name, tree expr,
4000 enum tree_code comp_code,
4001 tree val,
4002 basic_block bb,
4003 edge e,
4004 gimple_stmt_iterator si)
4005 {
4006 assert_locus_t n, loc, last_loc;
4007 basic_block dest_bb;
4008
4009 #if defined ENABLE_CHECKING
4010 gcc_assert (bb == NULL || e == NULL);
4011
4012 if (e == NULL)
4013 gcc_assert (gimple_code (gsi_stmt (si)) != GIMPLE_COND
4014 && gimple_code (gsi_stmt (si)) != GIMPLE_SWITCH);
4015 #endif
4016
4017 /* Never build an assert comparing against an integer constant with
4018 TREE_OVERFLOW set. This confuses our undefined overflow warning
4019 machinery. */
4020 if (TREE_CODE (val) == INTEGER_CST
4021 && TREE_OVERFLOW (val))
4022 val = build_int_cst_wide (TREE_TYPE (val),
4023 TREE_INT_CST_LOW (val), TREE_INT_CST_HIGH (val));
4024
4025 /* The new assertion A will be inserted at BB or E. We need to
4026 determine if the new location is dominated by a previously
4027 registered location for A. If we are doing an edge insertion,
4028 assume that A will be inserted at E->DEST. Note that this is not
4029 necessarily true.
4030
4031 If E is a critical edge, it will be split. But even if E is
4032 split, the new block will dominate the same set of blocks that
4033 E->DEST dominates.
4034
4035 The reverse, however, is not true, blocks dominated by E->DEST
4036 will not be dominated by the new block created to split E. So,
4037 if the insertion location is on a critical edge, we will not use
4038 the new location to move another assertion previously registered
4039 at a block dominated by E->DEST. */
4040 dest_bb = (bb) ? bb : e->dest;
4041
4042 /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and
4043 VAL at a block dominating DEST_BB, then we don't need to insert a new
4044 one. Similarly, if the same assertion already exists at a block
4045 dominated by DEST_BB and the new location is not on a critical
4046 edge, then update the existing location for the assertion (i.e.,
4047 move the assertion up in the dominance tree).
4048
4049 Note, this is implemented as a simple linked list because there
4050 should not be more than a handful of assertions registered per
4051 name. If this becomes a performance problem, a table hashed by
4052 COMP_CODE and VAL could be implemented. */
4053 loc = asserts_for[SSA_NAME_VERSION (name)];
4054 last_loc = loc;
4055 while (loc)
4056 {
4057 if (loc->comp_code == comp_code
4058 && (loc->val == val
4059 || operand_equal_p (loc->val, val, 0))
4060 && (loc->expr == expr
4061 || operand_equal_p (loc->expr, expr, 0)))
4062 {
4063 /* If the assertion NAME COMP_CODE VAL has already been
4064 registered at a basic block that dominates DEST_BB, then
4065 we don't need to insert the same assertion again. Note
4066 that we don't check strict dominance here to avoid
4067 replicating the same assertion inside the same basic
4068 block more than once (e.g., when a pointer is
4069 dereferenced several times inside a block).
4070
4071 An exception to this rule are edge insertions. If the
4072 new assertion is to be inserted on edge E, then it will
4073 dominate all the other insertions that we may want to
4074 insert in DEST_BB. So, if we are doing an edge
4075 insertion, don't do this dominance check. */
4076 if (e == NULL
4077 && dominated_by_p (CDI_DOMINATORS, dest_bb, loc->bb))
4078 return;
4079
4080 /* Otherwise, if E is not a critical edge and DEST_BB
4081 dominates the existing location for the assertion, move
4082 the assertion up in the dominance tree by updating its
4083 location information. */
4084 if ((e == NULL || !EDGE_CRITICAL_P (e))
4085 && dominated_by_p (CDI_DOMINATORS, loc->bb, dest_bb))
4086 {
4087 loc->bb = dest_bb;
4088 loc->e = e;
4089 loc->si = si;
4090 return;
4091 }
4092 }
4093
4094 /* Update the last node of the list and move to the next one. */
4095 last_loc = loc;
4096 loc = loc->next;
4097 }
4098
4099 /* If we didn't find an assertion already registered for
4100 NAME COMP_CODE VAL, add a new one at the end of the list of
4101 assertions associated with NAME. */
4102 n = XNEW (struct assert_locus_d);
4103 n->bb = dest_bb;
4104 n->e = e;
4105 n->si = si;
4106 n->comp_code = comp_code;
4107 n->val = val;
4108 n->expr = expr;
4109 n->next = NULL;
4110
4111 if (last_loc)
4112 last_loc->next = n;
4113 else
4114 asserts_for[SSA_NAME_VERSION (name)] = n;
4115
4116 bitmap_set_bit (need_assert_for, SSA_NAME_VERSION (name));
4117 }
4118
4119 /* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME.
4120 Extract a suitable test code and value and store them into *CODE_P and
4121 *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P.
4122
4123 If no extraction was possible, return FALSE, otherwise return TRUE.
4124
4125 If INVERT is true, then we invert the result stored into *CODE_P. */
4126
4127 static bool
4128 extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code,
4129 tree cond_op0, tree cond_op1,
4130 bool invert, enum tree_code *code_p,
4131 tree *val_p)
4132 {
4133 enum tree_code comp_code;
4134 tree val;
4135
4136 /* Otherwise, we have a comparison of the form NAME COMP VAL
4137 or VAL COMP NAME. */
4138 if (name == cond_op1)
4139 {
4140 /* If the predicate is of the form VAL COMP NAME, flip
4141 COMP around because we need to register NAME as the
4142 first operand in the predicate. */
4143 comp_code = swap_tree_comparison (cond_code);
4144 val = cond_op0;
4145 }
4146 else
4147 {
4148 /* The comparison is of the form NAME COMP VAL, so the
4149 comparison code remains unchanged. */
4150 comp_code = cond_code;
4151 val = cond_op1;
4152 }
4153
4154 /* Invert the comparison code as necessary. */
4155 if (invert)
4156 comp_code = invert_tree_comparison (comp_code, 0);
4157
4158 /* VRP does not handle float types. */
4159 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (val)))
4160 return false;
4161
4162 /* Do not register always-false predicates.
4163 FIXME: this works around a limitation in fold() when dealing with
4164 enumerations. Given 'enum { N1, N2 } x;', fold will not
4165 fold 'if (x > N2)' to 'if (0)'. */
4166 if ((comp_code == GT_EXPR || comp_code == LT_EXPR)
4167 && INTEGRAL_TYPE_P (TREE_TYPE (val)))
4168 {
4169 tree min = TYPE_MIN_VALUE (TREE_TYPE (val));
4170 tree max = TYPE_MAX_VALUE (TREE_TYPE (val));
4171
4172 if (comp_code == GT_EXPR
4173 && (!max
4174 || compare_values (val, max) == 0))
4175 return false;
4176
4177 if (comp_code == LT_EXPR
4178 && (!min
4179 || compare_values (val, min) == 0))
4180 return false;
4181 }
4182 *code_p = comp_code;
4183 *val_p = val;
4184 return true;
4185 }
4186
4187 /* Try to register an edge assertion for SSA name NAME on edge E for
4188 the condition COND contributing to the conditional jump pointed to by BSI.
4189 Invert the condition COND if INVERT is true.
4190 Return true if an assertion for NAME could be registered. */
4191
4192 static bool
4193 register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi,
4194 enum tree_code cond_code,
4195 tree cond_op0, tree cond_op1, bool invert)
4196 {
4197 tree val;
4198 enum tree_code comp_code;
4199 bool retval = false;
4200
4201 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
4202 cond_op0,
4203 cond_op1,
4204 invert, &comp_code, &val))
4205 return false;
4206
4207 /* Only register an ASSERT_EXPR if NAME was found in the sub-graph
4208 reachable from E. */
4209 if (live_on_edge (e, name)
4210 && !has_single_use (name))
4211 {
4212 register_new_assert_for (name, name, comp_code, val, NULL, e, bsi);
4213 retval = true;
4214 }
4215
4216 /* In the case of NAME <= CST and NAME being defined as
4217 NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2
4218 and NAME2 <= CST - CST2. We can do the same for NAME > CST.
4219 This catches range and anti-range tests. */
4220 if ((comp_code == LE_EXPR
4221 || comp_code == GT_EXPR)
4222 && TREE_CODE (val) == INTEGER_CST
4223 && TYPE_UNSIGNED (TREE_TYPE (val)))
4224 {
4225 gimple def_stmt = SSA_NAME_DEF_STMT (name);
4226 tree cst2 = NULL_TREE, name2 = NULL_TREE, name3 = NULL_TREE;
4227
4228 /* Extract CST2 from the (optional) addition. */
4229 if (is_gimple_assign (def_stmt)
4230 && gimple_assign_rhs_code (def_stmt) == PLUS_EXPR)
4231 {
4232 name2 = gimple_assign_rhs1 (def_stmt);
4233 cst2 = gimple_assign_rhs2 (def_stmt);
4234 if (TREE_CODE (name2) == SSA_NAME
4235 && TREE_CODE (cst2) == INTEGER_CST)
4236 def_stmt = SSA_NAME_DEF_STMT (name2);
4237 }
4238
4239 /* Extract NAME2 from the (optional) sign-changing cast. */
4240 if (gimple_assign_cast_p (def_stmt))
4241 {
4242 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt))
4243 && ! TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
4244 && (TYPE_PRECISION (gimple_expr_type (def_stmt))
4245 == TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))))
4246 name3 = gimple_assign_rhs1 (def_stmt);
4247 }
4248
4249 /* If name3 is used later, create an ASSERT_EXPR for it. */
4250 if (name3 != NULL_TREE
4251 && TREE_CODE (name3) == SSA_NAME
4252 && (cst2 == NULL_TREE
4253 || TREE_CODE (cst2) == INTEGER_CST)
4254 && INTEGRAL_TYPE_P (TREE_TYPE (name3))
4255 && live_on_edge (e, name3)
4256 && !has_single_use (name3))
4257 {
4258 tree tmp;
4259
4260 /* Build an expression for the range test. */
4261 tmp = build1 (NOP_EXPR, TREE_TYPE (name), name3);
4262 if (cst2 != NULL_TREE)
4263 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
4264
4265 if (dump_file)
4266 {
4267 fprintf (dump_file, "Adding assert for ");
4268 print_generic_expr (dump_file, name3, 0);
4269 fprintf (dump_file, " from ");
4270 print_generic_expr (dump_file, tmp, 0);
4271 fprintf (dump_file, "\n");
4272 }
4273
4274 register_new_assert_for (name3, tmp, comp_code, val, NULL, e, bsi);
4275
4276 retval = true;
4277 }
4278
4279 /* If name2 is used later, create an ASSERT_EXPR for it. */
4280 if (name2 != NULL_TREE
4281 && TREE_CODE (name2) == SSA_NAME
4282 && TREE_CODE (cst2) == INTEGER_CST
4283 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
4284 && live_on_edge (e, name2)
4285 && !has_single_use (name2))
4286 {
4287 tree tmp;
4288
4289 /* Build an expression for the range test. */
4290 tmp = name2;
4291 if (TREE_TYPE (name) != TREE_TYPE (name2))
4292 tmp = build1 (NOP_EXPR, TREE_TYPE (name), tmp);
4293 if (cst2 != NULL_TREE)
4294 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
4295
4296 if (dump_file)
4297 {
4298 fprintf (dump_file, "Adding assert for ");
4299 print_generic_expr (dump_file, name2, 0);
4300 fprintf (dump_file, " from ");
4301 print_generic_expr (dump_file, tmp, 0);
4302 fprintf (dump_file, "\n");
4303 }
4304
4305 register_new_assert_for (name2, tmp, comp_code, val, NULL, e, bsi);
4306
4307 retval = true;
4308 }
4309 }
4310
4311 return retval;
4312 }
4313
4314 /* OP is an operand of a truth value expression which is known to have
4315 a particular value. Register any asserts for OP and for any
4316 operands in OP's defining statement.
4317
4318 If CODE is EQ_EXPR, then we want to register OP is zero (false),
4319 if CODE is NE_EXPR, then we want to register OP is nonzero (true). */
4320
4321 static bool
4322 register_edge_assert_for_1 (tree op, enum tree_code code,
4323 edge e, gimple_stmt_iterator bsi)
4324 {
4325 bool retval = false;
4326 gimple op_def;
4327 tree val;
4328 enum tree_code rhs_code;
4329
4330 /* We only care about SSA_NAMEs. */
4331 if (TREE_CODE (op) != SSA_NAME)
4332 return false;
4333
4334 /* We know that OP will have a zero or nonzero value. If OP is used
4335 more than once go ahead and register an assert for OP.
4336
4337 The FOUND_IN_SUBGRAPH support is not helpful in this situation as
4338 it will always be set for OP (because OP is used in a COND_EXPR in
4339 the subgraph). */
4340 if (!has_single_use (op))
4341 {
4342 val = build_int_cst (TREE_TYPE (op), 0);
4343 register_new_assert_for (op, op, code, val, NULL, e, bsi);
4344 retval = true;
4345 }
4346
4347 /* Now look at how OP is set. If it's set from a comparison,
4348 a truth operation or some bit operations, then we may be able
4349 to register information about the operands of that assignment. */
4350 op_def = SSA_NAME_DEF_STMT (op);
4351 if (gimple_code (op_def) != GIMPLE_ASSIGN)
4352 return retval;
4353
4354 rhs_code = gimple_assign_rhs_code (op_def);
4355
4356 if (TREE_CODE_CLASS (rhs_code) == tcc_comparison)
4357 {
4358 bool invert = (code == EQ_EXPR ? true : false);
4359 tree op0 = gimple_assign_rhs1 (op_def);
4360 tree op1 = gimple_assign_rhs2 (op_def);
4361
4362 if (TREE_CODE (op0) == SSA_NAME)
4363 retval |= register_edge_assert_for_2 (op0, e, bsi, rhs_code, op0, op1,
4364 invert);
4365 if (TREE_CODE (op1) == SSA_NAME)
4366 retval |= register_edge_assert_for_2 (op1, e, bsi, rhs_code, op0, op1,
4367 invert);
4368 }
4369 else if ((code == NE_EXPR
4370 && (gimple_assign_rhs_code (op_def) == TRUTH_AND_EXPR
4371 || gimple_assign_rhs_code (op_def) == BIT_AND_EXPR))
4372 || (code == EQ_EXPR
4373 && (gimple_assign_rhs_code (op_def) == TRUTH_OR_EXPR
4374 || gimple_assign_rhs_code (op_def) == BIT_IOR_EXPR)))
4375 {
4376 /* Recurse on each operand. */
4377 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def),
4378 code, e, bsi);
4379 retval |= register_edge_assert_for_1 (gimple_assign_rhs2 (op_def),
4380 code, e, bsi);
4381 }
4382 else if (gimple_assign_rhs_code (op_def) == TRUTH_NOT_EXPR)
4383 {
4384 /* Recurse, flipping CODE. */
4385 code = invert_tree_comparison (code, false);
4386 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def),
4387 code, e, bsi);
4388 }
4389 else if (gimple_assign_rhs_code (op_def) == SSA_NAME)
4390 {
4391 /* Recurse through the copy. */
4392 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def),
4393 code, e, bsi);
4394 }
4395 else if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (op_def)))
4396 {
4397 /* Recurse through the type conversion. */
4398 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def),
4399 code, e, bsi);
4400 }
4401
4402 return retval;
4403 }
4404
4405 /* Try to register an edge assertion for SSA name NAME on edge E for
4406 the condition COND contributing to the conditional jump pointed to by SI.
4407 Return true if an assertion for NAME could be registered. */
4408
4409 static bool
4410 register_edge_assert_for (tree name, edge e, gimple_stmt_iterator si,
4411 enum tree_code cond_code, tree cond_op0,
4412 tree cond_op1)
4413 {
4414 tree val;
4415 enum tree_code comp_code;
4416 bool retval = false;
4417 bool is_else_edge = (e->flags & EDGE_FALSE_VALUE) != 0;
4418
4419 /* Do not attempt to infer anything in names that flow through
4420 abnormal edges. */
4421 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
4422 return false;
4423
4424 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
4425 cond_op0, cond_op1,
4426 is_else_edge,
4427 &comp_code, &val))
4428 return false;
4429
4430 /* Register ASSERT_EXPRs for name. */
4431 retval |= register_edge_assert_for_2 (name, e, si, cond_code, cond_op0,
4432 cond_op1, is_else_edge);
4433
4434
4435 /* If COND is effectively an equality test of an SSA_NAME against
4436 the value zero or one, then we may be able to assert values
4437 for SSA_NAMEs which flow into COND. */
4438
4439 /* In the case of NAME == 1 or NAME != 0, for TRUTH_AND_EXPR defining
4440 statement of NAME we can assert both operands of the TRUTH_AND_EXPR
4441 have nonzero value. */
4442 if (((comp_code == EQ_EXPR && integer_onep (val))
4443 || (comp_code == NE_EXPR && integer_zerop (val))))
4444 {
4445 gimple def_stmt = SSA_NAME_DEF_STMT (name);
4446
4447 if (is_gimple_assign (def_stmt)
4448 && (gimple_assign_rhs_code (def_stmt) == TRUTH_AND_EXPR
4449 || gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR))
4450 {
4451 tree op0 = gimple_assign_rhs1 (def_stmt);
4452 tree op1 = gimple_assign_rhs2 (def_stmt);
4453 retval |= register_edge_assert_for_1 (op0, NE_EXPR, e, si);
4454 retval |= register_edge_assert_for_1 (op1, NE_EXPR, e, si);
4455 }
4456 }
4457
4458 /* In the case of NAME == 0 or NAME != 1, for TRUTH_OR_EXPR defining
4459 statement of NAME we can assert both operands of the TRUTH_OR_EXPR
4460 have zero value. */
4461 if (((comp_code == EQ_EXPR && integer_zerop (val))
4462 || (comp_code == NE_EXPR && integer_onep (val))))
4463 {
4464 gimple def_stmt = SSA_NAME_DEF_STMT (name);
4465
4466 if (is_gimple_assign (def_stmt)
4467 && (gimple_assign_rhs_code (def_stmt) == TRUTH_OR_EXPR
4468 /* For BIT_IOR_EXPR only if NAME == 0 both operands have
4469 necessarily zero value. */
4470 || (comp_code == EQ_EXPR
4471 && (gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR))))
4472 {
4473 tree op0 = gimple_assign_rhs1 (def_stmt);
4474 tree op1 = gimple_assign_rhs2 (def_stmt);
4475 retval |= register_edge_assert_for_1 (op0, EQ_EXPR, e, si);
4476 retval |= register_edge_assert_for_1 (op1, EQ_EXPR, e, si);
4477 }
4478 }
4479
4480 return retval;
4481 }
4482
4483
4484 /* Determine whether the outgoing edges of BB should receive an
4485 ASSERT_EXPR for each of the operands of BB's LAST statement.
4486 The last statement of BB must be a COND_EXPR.
4487
4488 If any of the sub-graphs rooted at BB have an interesting use of
4489 the predicate operands, an assert location node is added to the
4490 list of assertions for the corresponding operands. */
4491
4492 static bool
4493 find_conditional_asserts (basic_block bb, gimple last)
4494 {
4495 bool need_assert;
4496 gimple_stmt_iterator bsi;
4497 tree op;
4498 edge_iterator ei;
4499 edge e;
4500 ssa_op_iter iter;
4501
4502 need_assert = false;
4503 bsi = gsi_for_stmt (last);
4504
4505 /* Look for uses of the operands in each of the sub-graphs
4506 rooted at BB. We need to check each of the outgoing edges
4507 separately, so that we know what kind of ASSERT_EXPR to
4508 insert. */
4509 FOR_EACH_EDGE (e, ei, bb->succs)
4510 {
4511 if (e->dest == bb)
4512 continue;
4513
4514 /* Register the necessary assertions for each operand in the
4515 conditional predicate. */
4516 FOR_EACH_SSA_TREE_OPERAND (op, last, iter, SSA_OP_USE)
4517 {
4518 need_assert |= register_edge_assert_for (op, e, bsi,
4519 gimple_cond_code (last),
4520 gimple_cond_lhs (last),
4521 gimple_cond_rhs (last));
4522 }
4523 }
4524
4525 return need_assert;
4526 }
4527
4528 /* Compare two case labels sorting first by the destination label uid
4529 and then by the case value. */
4530
4531 static int
4532 compare_case_labels (const void *p1, const void *p2)
4533 {
4534 const_tree const case1 = *(const_tree const*)p1;
4535 const_tree const case2 = *(const_tree const*)p2;
4536 unsigned int uid1 = DECL_UID (CASE_LABEL (case1));
4537 unsigned int uid2 = DECL_UID (CASE_LABEL (case2));
4538
4539 if (uid1 < uid2)
4540 return -1;
4541 else if (uid1 == uid2)
4542 {
4543 /* Make sure the default label is first in a group. */
4544 if (!CASE_LOW (case1))
4545 return -1;
4546 else if (!CASE_LOW (case2))
4547 return 1;
4548 else
4549 return tree_int_cst_compare (CASE_LOW (case1), CASE_LOW (case2));
4550 }
4551 else
4552 return 1;
4553 }
4554
4555 /* Determine whether the outgoing edges of BB should receive an
4556 ASSERT_EXPR for each of the operands of BB's LAST statement.
4557 The last statement of BB must be a SWITCH_EXPR.
4558
4559 If any of the sub-graphs rooted at BB have an interesting use of
4560 the predicate operands, an assert location node is added to the
4561 list of assertions for the corresponding operands. */
4562
4563 static bool
4564 find_switch_asserts (basic_block bb, gimple last)
4565 {
4566 bool need_assert;
4567 gimple_stmt_iterator bsi;
4568 tree op;
4569 edge e;
4570 tree vec2;
4571 size_t n = gimple_switch_num_labels(last);
4572 #if GCC_VERSION >= 4000
4573 unsigned int idx;
4574 #else
4575 /* Work around GCC 3.4 bug (PR 37086). */
4576 volatile unsigned int idx;
4577 #endif
4578
4579 need_assert = false;
4580 bsi = gsi_for_stmt (last);
4581 op = gimple_switch_index (last);
4582 if (TREE_CODE (op) != SSA_NAME)
4583 return false;
4584
4585 /* Build a vector of case labels sorted by destination label. */
4586 vec2 = make_tree_vec (n);
4587 for (idx = 0; idx < n; ++idx)
4588 TREE_VEC_ELT (vec2, idx) = gimple_switch_label (last, idx);
4589 qsort (&TREE_VEC_ELT (vec2, 0), n, sizeof (tree), compare_case_labels);
4590
4591 for (idx = 0; idx < n; ++idx)
4592 {
4593 tree min, max;
4594 tree cl = TREE_VEC_ELT (vec2, idx);
4595
4596 min = CASE_LOW (cl);
4597 max = CASE_HIGH (cl);
4598
4599 /* If there are multiple case labels with the same destination
4600 we need to combine them to a single value range for the edge. */
4601 if (idx + 1 < n
4602 && CASE_LABEL (cl) == CASE_LABEL (TREE_VEC_ELT (vec2, idx + 1)))
4603 {
4604 /* Skip labels until the last of the group. */
4605 do {
4606 ++idx;
4607 } while (idx < n
4608 && CASE_LABEL (cl) == CASE_LABEL (TREE_VEC_ELT (vec2, idx)));
4609 --idx;
4610
4611 /* Pick up the maximum of the case label range. */
4612 if (CASE_HIGH (TREE_VEC_ELT (vec2, idx)))
4613 max = CASE_HIGH (TREE_VEC_ELT (vec2, idx));
4614 else
4615 max = CASE_LOW (TREE_VEC_ELT (vec2, idx));
4616 }
4617
4618 /* Nothing to do if the range includes the default label until we
4619 can register anti-ranges. */
4620 if (min == NULL_TREE)
4621 continue;
4622
4623 /* Find the edge to register the assert expr on. */
4624 e = find_edge (bb, label_to_block (CASE_LABEL (cl)));
4625
4626 /* Register the necessary assertions for the operand in the
4627 SWITCH_EXPR. */
4628 need_assert |= register_edge_assert_for (op, e, bsi,
4629 max ? GE_EXPR : EQ_EXPR,
4630 op,
4631 fold_convert (TREE_TYPE (op),
4632 min));
4633 if (max)
4634 {
4635 need_assert |= register_edge_assert_for (op, e, bsi, LE_EXPR,
4636 op,
4637 fold_convert (TREE_TYPE (op),
4638 max));
4639 }
4640 }
4641
4642 return need_assert;
4643 }
4644
4645
4646 /* Traverse all the statements in block BB looking for statements that
4647 may generate useful assertions for the SSA names in their operand.
4648 If a statement produces a useful assertion A for name N_i, then the
4649 list of assertions already generated for N_i is scanned to
4650 determine if A is actually needed.
4651
4652 If N_i already had the assertion A at a location dominating the
4653 current location, then nothing needs to be done. Otherwise, the
4654 new location for A is recorded instead.
4655
4656 1- For every statement S in BB, all the variables used by S are
4657 added to bitmap FOUND_IN_SUBGRAPH.
4658
4659 2- If statement S uses an operand N in a way that exposes a known
4660 value range for N, then if N was not already generated by an
4661 ASSERT_EXPR, create a new assert location for N. For instance,
4662 if N is a pointer and the statement dereferences it, we can
4663 assume that N is not NULL.
4664
4665 3- COND_EXPRs are a special case of #2. We can derive range
4666 information from the predicate but need to insert different
4667 ASSERT_EXPRs for each of the sub-graphs rooted at the
4668 conditional block. If the last statement of BB is a conditional
4669 expression of the form 'X op Y', then
4670
4671 a) Remove X and Y from the set FOUND_IN_SUBGRAPH.
4672
4673 b) If the conditional is the only entry point to the sub-graph
4674 corresponding to the THEN_CLAUSE, recurse into it. On
4675 return, if X and/or Y are marked in FOUND_IN_SUBGRAPH, then
4676 an ASSERT_EXPR is added for the corresponding variable.
4677
4678 c) Repeat step (b) on the ELSE_CLAUSE.
4679
4680 d) Mark X and Y in FOUND_IN_SUBGRAPH.
4681
4682 For instance,
4683
4684 if (a == 9)
4685 b = a;
4686 else
4687 b = c + 1;
4688
4689 In this case, an assertion on the THEN clause is useful to
4690 determine that 'a' is always 9 on that edge. However, an assertion
4691 on the ELSE clause would be unnecessary.
4692
4693 4- If BB does not end in a conditional expression, then we recurse
4694 into BB's dominator children.
4695
4696 At the end of the recursive traversal, every SSA name will have a
4697 list of locations where ASSERT_EXPRs should be added. When a new
4698 location for name N is found, it is registered by calling
4699 register_new_assert_for. That function keeps track of all the
4700 registered assertions to prevent adding unnecessary assertions.
4701 For instance, if a pointer P_4 is dereferenced more than once in a
4702 dominator tree, only the location dominating all the dereference of
4703 P_4 will receive an ASSERT_EXPR.
4704
4705 If this function returns true, then it means that there are names
4706 for which we need to generate ASSERT_EXPRs. Those assertions are
4707 inserted by process_assert_insertions. */
4708
4709 static bool
4710 find_assert_locations_1 (basic_block bb, sbitmap live)
4711 {
4712 gimple_stmt_iterator si;
4713 gimple last;
4714 gimple phi;
4715 bool need_assert;
4716
4717 need_assert = false;
4718 last = last_stmt (bb);
4719
4720 /* If BB's last statement is a conditional statement involving integer
4721 operands, determine if we need to add ASSERT_EXPRs. */
4722 if (last
4723 && gimple_code (last) == GIMPLE_COND
4724 && !fp_predicate (last)
4725 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
4726 need_assert |= find_conditional_asserts (bb, last);
4727
4728 /* If BB's last statement is a switch statement involving integer
4729 operands, determine if we need to add ASSERT_EXPRs. */
4730 if (last
4731 && gimple_code (last) == GIMPLE_SWITCH
4732 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
4733 need_assert |= find_switch_asserts (bb, last);
4734
4735 /* Traverse all the statements in BB marking used names and looking
4736 for statements that may infer assertions for their used operands. */
4737 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
4738 {
4739 gimple stmt;
4740 tree op;
4741 ssa_op_iter i;
4742
4743 stmt = gsi_stmt (si);
4744
4745 if (is_gimple_debug (stmt))
4746 continue;
4747
4748 /* See if we can derive an assertion for any of STMT's operands. */
4749 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
4750 {
4751 tree value;
4752 enum tree_code comp_code;
4753
4754 /* Mark OP in our live bitmap. */
4755 SET_BIT (live, SSA_NAME_VERSION (op));
4756
4757 /* If OP is used in such a way that we can infer a value
4758 range for it, and we don't find a previous assertion for
4759 it, create a new assertion location node for OP. */
4760 if (infer_value_range (stmt, op, &comp_code, &value))
4761 {
4762 /* If we are able to infer a nonzero value range for OP,
4763 then walk backwards through the use-def chain to see if OP
4764 was set via a typecast.
4765
4766 If so, then we can also infer a nonzero value range
4767 for the operand of the NOP_EXPR. */
4768 if (comp_code == NE_EXPR && integer_zerop (value))
4769 {
4770 tree t = op;
4771 gimple def_stmt = SSA_NAME_DEF_STMT (t);
4772
4773 while (is_gimple_assign (def_stmt)
4774 && gimple_assign_rhs_code (def_stmt) == NOP_EXPR
4775 && TREE_CODE
4776 (gimple_assign_rhs1 (def_stmt)) == SSA_NAME
4777 && POINTER_TYPE_P
4778 (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))
4779 {
4780 t = gimple_assign_rhs1 (def_stmt);
4781 def_stmt = SSA_NAME_DEF_STMT (t);
4782
4783 /* Note we want to register the assert for the
4784 operand of the NOP_EXPR after SI, not after the
4785 conversion. */
4786 if (! has_single_use (t))
4787 {
4788 register_new_assert_for (t, t, comp_code, value,
4789 bb, NULL, si);
4790 need_assert = true;
4791 }
4792 }
4793 }
4794
4795 /* If OP is used only once, namely in this STMT, don't
4796 bother creating an ASSERT_EXPR for it. Such an
4797 ASSERT_EXPR would do nothing but increase compile time. */
4798 if (!has_single_use (op))
4799 {
4800 register_new_assert_for (op, op, comp_code, value,
4801 bb, NULL, si);
4802 need_assert = true;
4803 }
4804 }
4805 }
4806 }
4807
4808 /* Traverse all PHI nodes in BB marking used operands. */
4809 for (si = gsi_start_phis (bb); !gsi_end_p(si); gsi_next (&si))
4810 {
4811 use_operand_p arg_p;
4812 ssa_op_iter i;
4813 phi = gsi_stmt (si);
4814
4815 FOR_EACH_PHI_ARG (arg_p, phi, i, SSA_OP_USE)
4816 {
4817 tree arg = USE_FROM_PTR (arg_p);
4818 if (TREE_CODE (arg) == SSA_NAME)
4819 SET_BIT (live, SSA_NAME_VERSION (arg));
4820 }
4821 }
4822
4823 return need_assert;
4824 }
4825
4826 /* Do an RPO walk over the function computing SSA name liveness
4827 on-the-fly and deciding on assert expressions to insert.
4828 Returns true if there are assert expressions to be inserted. */
4829
4830 static bool
4831 find_assert_locations (void)
4832 {
4833 int *rpo = XCNEWVEC (int, last_basic_block + NUM_FIXED_BLOCKS);
4834 int *bb_rpo = XCNEWVEC (int, last_basic_block + NUM_FIXED_BLOCKS);
4835 int *last_rpo = XCNEWVEC (int, last_basic_block + NUM_FIXED_BLOCKS);
4836 int rpo_cnt, i;
4837 bool need_asserts;
4838
4839 live = XCNEWVEC (sbitmap, last_basic_block + NUM_FIXED_BLOCKS);
4840 rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
4841 for (i = 0; i < rpo_cnt; ++i)
4842 bb_rpo[rpo[i]] = i;
4843
4844 need_asserts = false;
4845 for (i = rpo_cnt-1; i >= 0; --i)
4846 {
4847 basic_block bb = BASIC_BLOCK (rpo[i]);
4848 edge e;
4849 edge_iterator ei;
4850
4851 if (!live[rpo[i]])
4852 {
4853 live[rpo[i]] = sbitmap_alloc (num_ssa_names);
4854 sbitmap_zero (live[rpo[i]]);
4855 }
4856
4857 /* Process BB and update the live information with uses in
4858 this block. */
4859 need_asserts |= find_assert_locations_1 (bb, live[rpo[i]]);
4860
4861 /* Merge liveness into the predecessor blocks and free it. */
4862 if (!sbitmap_empty_p (live[rpo[i]]))
4863 {
4864 int pred_rpo = i;
4865 FOR_EACH_EDGE (e, ei, bb->preds)
4866 {
4867 int pred = e->src->index;
4868 if (e->flags & EDGE_DFS_BACK)
4869 continue;
4870
4871 if (!live[pred])
4872 {
4873 live[pred] = sbitmap_alloc (num_ssa_names);
4874 sbitmap_zero (live[pred]);
4875 }
4876 sbitmap_a_or_b (live[pred], live[pred], live[rpo[i]]);
4877
4878 if (bb_rpo[pred] < pred_rpo)
4879 pred_rpo = bb_rpo[pred];
4880 }
4881
4882 /* Record the RPO number of the last visited block that needs
4883 live information from this block. */
4884 last_rpo[rpo[i]] = pred_rpo;
4885 }
4886 else
4887 {
4888 sbitmap_free (live[rpo[i]]);
4889 live[rpo[i]] = NULL;
4890 }
4891
4892 /* We can free all successors live bitmaps if all their
4893 predecessors have been visited already. */
4894 FOR_EACH_EDGE (e, ei, bb->succs)
4895 if (last_rpo[e->dest->index] == i
4896 && live[e->dest->index])
4897 {
4898 sbitmap_free (live[e->dest->index]);
4899 live[e->dest->index] = NULL;
4900 }
4901 }
4902
4903 XDELETEVEC (rpo);
4904 XDELETEVEC (bb_rpo);
4905 XDELETEVEC (last_rpo);
4906 for (i = 0; i < last_basic_block + NUM_FIXED_BLOCKS; ++i)
4907 if (live[i])
4908 sbitmap_free (live[i]);
4909 XDELETEVEC (live);
4910
4911 return need_asserts;
4912 }
4913
4914 /* Create an ASSERT_EXPR for NAME and insert it in the location
4915 indicated by LOC. Return true if we made any edge insertions. */
4916
4917 static bool
4918 process_assert_insertions_for (tree name, assert_locus_t loc)
4919 {
4920 /* Build the comparison expression NAME_i COMP_CODE VAL. */
4921 gimple stmt;
4922 tree cond;
4923 gimple assert_stmt;
4924 edge_iterator ei;
4925 edge e;
4926
4927 /* If we have X <=> X do not insert an assert expr for that. */
4928 if (loc->expr == loc->val)
4929 return false;
4930
4931 cond = build2 (loc->comp_code, boolean_type_node, loc->expr, loc->val);
4932 assert_stmt = build_assert_expr_for (cond, name);
4933 if (loc->e)
4934 {
4935 /* We have been asked to insert the assertion on an edge. This
4936 is used only by COND_EXPR and SWITCH_EXPR assertions. */
4937 #if defined ENABLE_CHECKING
4938 gcc_assert (gimple_code (gsi_stmt (loc->si)) == GIMPLE_COND
4939 || gimple_code (gsi_stmt (loc->si)) == GIMPLE_SWITCH);
4940 #endif
4941
4942 gsi_insert_on_edge (loc->e, assert_stmt);
4943 return true;
4944 }
4945
4946 /* Otherwise, we can insert right after LOC->SI iff the
4947 statement must not be the last statement in the block. */
4948 stmt = gsi_stmt (loc->si);
4949 if (!stmt_ends_bb_p (stmt))
4950 {
4951 gsi_insert_after (&loc->si, assert_stmt, GSI_SAME_STMT);
4952 return false;
4953 }
4954
4955 /* If STMT must be the last statement in BB, we can only insert new
4956 assertions on the non-abnormal edge out of BB. Note that since
4957 STMT is not control flow, there may only be one non-abnormal edge
4958 out of BB. */
4959 FOR_EACH_EDGE (e, ei, loc->bb->succs)
4960 if (!(e->flags & EDGE_ABNORMAL))
4961 {
4962 gsi_insert_on_edge (e, assert_stmt);
4963 return true;
4964 }
4965
4966 gcc_unreachable ();
4967 }
4968
4969
4970 /* Process all the insertions registered for every name N_i registered
4971 in NEED_ASSERT_FOR. The list of assertions to be inserted are
4972 found in ASSERTS_FOR[i]. */
4973
4974 static void
4975 process_assert_insertions (void)
4976 {
4977 unsigned i;
4978 bitmap_iterator bi;
4979 bool update_edges_p = false;
4980 int num_asserts = 0;
4981
4982 if (dump_file && (dump_flags & TDF_DETAILS))
4983 dump_all_asserts (dump_file);
4984
4985 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
4986 {
4987 assert_locus_t loc = asserts_for[i];
4988 gcc_assert (loc);
4989
4990 while (loc)
4991 {
4992 assert_locus_t next = loc->next;
4993 update_edges_p |= process_assert_insertions_for (ssa_name (i), loc);
4994 free (loc);
4995 loc = next;
4996 num_asserts++;
4997 }
4998 }
4999
5000 if (update_edges_p)
5001 gsi_commit_edge_inserts ();
5002
5003 statistics_counter_event (cfun, "Number of ASSERT_EXPR expressions inserted",
5004 num_asserts);
5005 }
5006
5007
5008 /* Traverse the flowgraph looking for conditional jumps to insert range
5009 expressions. These range expressions are meant to provide information
5010 to optimizations that need to reason in terms of value ranges. They
5011 will not be expanded into RTL. For instance, given:
5012
5013 x = ...
5014 y = ...
5015 if (x < y)
5016 y = x - 2;
5017 else
5018 x = y + 3;
5019
5020 this pass will transform the code into:
5021
5022 x = ...
5023 y = ...
5024 if (x < y)
5025 {
5026 x = ASSERT_EXPR <x, x < y>
5027 y = x - 2
5028 }
5029 else
5030 {
5031 y = ASSERT_EXPR <y, x <= y>
5032 x = y + 3
5033 }
5034
5035 The idea is that once copy and constant propagation have run, other
5036 optimizations will be able to determine what ranges of values can 'x'
5037 take in different paths of the code, simply by checking the reaching
5038 definition of 'x'. */
5039
5040 static void
5041 insert_range_assertions (void)
5042 {
5043 need_assert_for = BITMAP_ALLOC (NULL);
5044 asserts_for = XCNEWVEC (assert_locus_t, num_ssa_names);
5045
5046 calculate_dominance_info (CDI_DOMINATORS);
5047
5048 if (find_assert_locations ())
5049 {
5050 process_assert_insertions ();
5051 update_ssa (TODO_update_ssa_no_phi);
5052 }
5053
5054 if (dump_file && (dump_flags & TDF_DETAILS))
5055 {
5056 fprintf (dump_file, "\nSSA form after inserting ASSERT_EXPRs\n");
5057 dump_function_to_file (current_function_decl, dump_file, dump_flags);
5058 }
5059
5060 free (asserts_for);
5061 BITMAP_FREE (need_assert_for);
5062 }
5063
5064 /* Checks one ARRAY_REF in REF, located at LOCUS. Ignores flexible arrays
5065 and "struct" hacks. If VRP can determine that the
5066 array subscript is a constant, check if it is outside valid
5067 range. If the array subscript is a RANGE, warn if it is
5068 non-overlapping with valid range.
5069 IGNORE_OFF_BY_ONE is true if the ARRAY_REF is inside a ADDR_EXPR. */
5070
5071 static void
5072 check_array_ref (location_t location, tree ref, bool ignore_off_by_one)
5073 {
5074 value_range_t* vr = NULL;
5075 tree low_sub, up_sub;
5076 tree low_bound, up_bound, up_bound_p1;
5077 tree base;
5078
5079 if (TREE_NO_WARNING (ref))
5080 return;
5081
5082 low_sub = up_sub = TREE_OPERAND (ref, 1);
5083 up_bound = array_ref_up_bound (ref);
5084
5085 /* Can not check flexible arrays. */
5086 if (!up_bound
5087 || TREE_CODE (up_bound) != INTEGER_CST)
5088 return;
5089
5090 /* Accesses to trailing arrays via pointers may access storage
5091 beyond the types array bounds. */
5092 base = get_base_address (ref);
5093 if (base && TREE_CODE (base) == MEM_REF)
5094 {
5095 tree cref, next = NULL_TREE;
5096
5097 if (TREE_CODE (TREE_OPERAND (ref, 0)) != COMPONENT_REF)
5098 return;
5099
5100 cref = TREE_OPERAND (ref, 0);
5101 if (TREE_CODE (TREE_TYPE (TREE_OPERAND (cref, 0))) == RECORD_TYPE)
5102 for (next = TREE_CHAIN (TREE_OPERAND (cref, 1));
5103 next && TREE_CODE (next) != FIELD_DECL;
5104 next = TREE_CHAIN (next))
5105 ;
5106
5107 /* If this is the last field in a struct type or a field in a
5108 union type do not warn. */
5109 if (!next)
5110 return;
5111 }
5112
5113 low_bound = array_ref_low_bound (ref);
5114 up_bound_p1 = int_const_binop (PLUS_EXPR, up_bound, integer_one_node, 0);
5115
5116 if (TREE_CODE (low_sub) == SSA_NAME)
5117 {
5118 vr = get_value_range (low_sub);
5119 if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
5120 {
5121 low_sub = vr->type == VR_RANGE ? vr->max : vr->min;
5122 up_sub = vr->type == VR_RANGE ? vr->min : vr->max;
5123 }
5124 }
5125
5126 if (vr && vr->type == VR_ANTI_RANGE)
5127 {
5128 if (TREE_CODE (up_sub) == INTEGER_CST
5129 && tree_int_cst_lt (up_bound, up_sub)
5130 && TREE_CODE (low_sub) == INTEGER_CST
5131 && tree_int_cst_lt (low_sub, low_bound))
5132 {
5133 warning_at (location, OPT_Warray_bounds,
5134 "array subscript is outside array bounds");
5135 TREE_NO_WARNING (ref) = 1;
5136 }
5137 }
5138 else if (TREE_CODE (up_sub) == INTEGER_CST
5139 && (ignore_off_by_one
5140 ? (tree_int_cst_lt (up_bound, up_sub)
5141 && !tree_int_cst_equal (up_bound_p1, up_sub))
5142 : (tree_int_cst_lt (up_bound, up_sub)
5143 || tree_int_cst_equal (up_bound_p1, up_sub))))
5144 {
5145 warning_at (location, OPT_Warray_bounds,
5146 "array subscript is above array bounds");
5147 TREE_NO_WARNING (ref) = 1;
5148 }
5149 else if (TREE_CODE (low_sub) == INTEGER_CST
5150 && tree_int_cst_lt (low_sub, low_bound))
5151 {
5152 warning_at (location, OPT_Warray_bounds,
5153 "array subscript is below array bounds");
5154 TREE_NO_WARNING (ref) = 1;
5155 }
5156 }
5157
5158 /* Searches if the expr T, located at LOCATION computes
5159 address of an ARRAY_REF, and call check_array_ref on it. */
5160
5161 static void
5162 search_for_addr_array (tree t, location_t location)
5163 {
5164 while (TREE_CODE (t) == SSA_NAME)
5165 {
5166 gimple g = SSA_NAME_DEF_STMT (t);
5167
5168 if (gimple_code (g) != GIMPLE_ASSIGN)
5169 return;
5170
5171 if (get_gimple_rhs_class (gimple_assign_rhs_code (g))
5172 != GIMPLE_SINGLE_RHS)
5173 return;
5174
5175 t = gimple_assign_rhs1 (g);
5176 }
5177
5178
5179 /* We are only interested in addresses of ARRAY_REF's. */
5180 if (TREE_CODE (t) != ADDR_EXPR)
5181 return;
5182
5183 /* Check each ARRAY_REFs in the reference chain. */
5184 do
5185 {
5186 if (TREE_CODE (t) == ARRAY_REF)
5187 check_array_ref (location, t, true /*ignore_off_by_one*/);
5188
5189 t = TREE_OPERAND (t, 0);
5190 }
5191 while (handled_component_p (t));
5192
5193 if (TREE_CODE (t) == MEM_REF
5194 && TREE_CODE (TREE_OPERAND (t, 0)) == ADDR_EXPR
5195 && !TREE_NO_WARNING (t))
5196 {
5197 tree tem = TREE_OPERAND (TREE_OPERAND (t, 0), 0);
5198 tree low_bound, up_bound, el_sz;
5199 double_int idx;
5200 if (TREE_CODE (TREE_TYPE (tem)) != ARRAY_TYPE
5201 || TREE_CODE (TREE_TYPE (TREE_TYPE (tem))) == ARRAY_TYPE
5202 || !TYPE_DOMAIN (TREE_TYPE (tem)))
5203 return;
5204
5205 low_bound = TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
5206 up_bound = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
5207 el_sz = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (tem)));
5208 if (!low_bound
5209 || TREE_CODE (low_bound) != INTEGER_CST
5210 || !up_bound
5211 || TREE_CODE (up_bound) != INTEGER_CST
5212 || !el_sz
5213 || TREE_CODE (el_sz) != INTEGER_CST)
5214 return;
5215
5216 idx = mem_ref_offset (t);
5217 idx = double_int_sdiv (idx, tree_to_double_int (el_sz), TRUNC_DIV_EXPR);
5218 if (double_int_scmp (idx, double_int_zero) < 0)
5219 {
5220 warning_at (location, OPT_Warray_bounds,
5221 "array subscript is below array bounds");
5222 TREE_NO_WARNING (t) = 1;
5223 }
5224 else if (double_int_scmp (idx,
5225 double_int_add
5226 (double_int_add
5227 (tree_to_double_int (up_bound),
5228 double_int_neg
5229 (tree_to_double_int (low_bound))),
5230 double_int_one)) > 0)
5231 {
5232 warning_at (location, OPT_Warray_bounds,
5233 "array subscript is above array bounds");
5234 TREE_NO_WARNING (t) = 1;
5235 }
5236 }
5237 }
5238
5239 /* walk_tree() callback that checks if *TP is
5240 an ARRAY_REF inside an ADDR_EXPR (in which an array
5241 subscript one outside the valid range is allowed). Call
5242 check_array_ref for each ARRAY_REF found. The location is
5243 passed in DATA. */
5244
5245 static tree
5246 check_array_bounds (tree *tp, int *walk_subtree, void *data)
5247 {
5248 tree t = *tp;
5249 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
5250 location_t location;
5251
5252 if (EXPR_HAS_LOCATION (t))
5253 location = EXPR_LOCATION (t);
5254 else
5255 {
5256 location_t *locp = (location_t *) wi->info;
5257 location = *locp;
5258 }
5259
5260 *walk_subtree = TRUE;
5261
5262 if (TREE_CODE (t) == ARRAY_REF)
5263 check_array_ref (location, t, false /*ignore_off_by_one*/);
5264
5265 if (TREE_CODE (t) == MEM_REF
5266 || (TREE_CODE (t) == RETURN_EXPR && TREE_OPERAND (t, 0)))
5267 search_for_addr_array (TREE_OPERAND (t, 0), location);
5268
5269 if (TREE_CODE (t) == ADDR_EXPR)
5270 *walk_subtree = FALSE;
5271
5272 return NULL_TREE;
5273 }
5274
5275 /* Walk over all statements of all reachable BBs and call check_array_bounds
5276 on them. */
5277
5278 static void
5279 check_all_array_refs (void)
5280 {
5281 basic_block bb;
5282 gimple_stmt_iterator si;
5283
5284 FOR_EACH_BB (bb)
5285 {
5286 edge_iterator ei;
5287 edge e;
5288 bool executable = false;
5289
5290 /* Skip blocks that were found to be unreachable. */
5291 FOR_EACH_EDGE (e, ei, bb->preds)
5292 executable |= !!(e->flags & EDGE_EXECUTABLE);
5293 if (!executable)
5294 continue;
5295
5296 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
5297 {
5298 gimple stmt = gsi_stmt (si);
5299 struct walk_stmt_info wi;
5300 if (!gimple_has_location (stmt))
5301 continue;
5302
5303 if (is_gimple_call (stmt))
5304 {
5305 size_t i;
5306 size_t n = gimple_call_num_args (stmt);
5307 for (i = 0; i < n; i++)
5308 {
5309 tree arg = gimple_call_arg (stmt, i);
5310 search_for_addr_array (arg, gimple_location (stmt));
5311 }
5312 }
5313 else
5314 {
5315 memset (&wi, 0, sizeof (wi));
5316 wi.info = CONST_CAST (void *, (const void *)
5317 gimple_location_ptr (stmt));
5318
5319 walk_gimple_op (gsi_stmt (si),
5320 check_array_bounds,
5321 &wi);
5322 }
5323 }
5324 }
5325 }
5326
5327 /* Convert range assertion expressions into the implied copies and
5328 copy propagate away the copies. Doing the trivial copy propagation
5329 here avoids the need to run the full copy propagation pass after
5330 VRP.
5331
5332 FIXME, this will eventually lead to copy propagation removing the
5333 names that had useful range information attached to them. For
5334 instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>,
5335 then N_i will have the range [3, +INF].
5336
5337 However, by converting the assertion into the implied copy
5338 operation N_i = N_j, we will then copy-propagate N_j into the uses
5339 of N_i and lose the range information. We may want to hold on to
5340 ASSERT_EXPRs a little while longer as the ranges could be used in
5341 things like jump threading.
5342
5343 The problem with keeping ASSERT_EXPRs around is that passes after
5344 VRP need to handle them appropriately.
5345
5346 Another approach would be to make the range information a first
5347 class property of the SSA_NAME so that it can be queried from
5348 any pass. This is made somewhat more complex by the need for
5349 multiple ranges to be associated with one SSA_NAME. */
5350
5351 static void
5352 remove_range_assertions (void)
5353 {
5354 basic_block bb;
5355 gimple_stmt_iterator si;
5356
5357 /* Note that the BSI iterator bump happens at the bottom of the
5358 loop and no bump is necessary if we're removing the statement
5359 referenced by the current BSI. */
5360 FOR_EACH_BB (bb)
5361 for (si = gsi_start_bb (bb); !gsi_end_p (si);)
5362 {
5363 gimple stmt = gsi_stmt (si);
5364 gimple use_stmt;
5365
5366 if (is_gimple_assign (stmt)
5367 && gimple_assign_rhs_code (stmt) == ASSERT_EXPR)
5368 {
5369 tree rhs = gimple_assign_rhs1 (stmt);
5370 tree var;
5371 tree cond = fold (ASSERT_EXPR_COND (rhs));
5372 use_operand_p use_p;
5373 imm_use_iterator iter;
5374
5375 gcc_assert (cond != boolean_false_node);
5376
5377 /* Propagate the RHS into every use of the LHS. */
5378 var = ASSERT_EXPR_VAR (rhs);
5379 FOR_EACH_IMM_USE_STMT (use_stmt, iter,
5380 gimple_assign_lhs (stmt))
5381 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
5382 {
5383 SET_USE (use_p, var);
5384 gcc_assert (TREE_CODE (var) == SSA_NAME);
5385 }
5386
5387 /* And finally, remove the copy, it is not needed. */
5388 gsi_remove (&si, true);
5389 release_defs (stmt);
5390 }
5391 else
5392 gsi_next (&si);
5393 }
5394 }
5395
5396
5397 /* Return true if STMT is interesting for VRP. */
5398
5399 static bool
5400 stmt_interesting_for_vrp (gimple stmt)
5401 {
5402 if (gimple_code (stmt) == GIMPLE_PHI
5403 && is_gimple_reg (gimple_phi_result (stmt))
5404 && (INTEGRAL_TYPE_P (TREE_TYPE (gimple_phi_result (stmt)))
5405 || POINTER_TYPE_P (TREE_TYPE (gimple_phi_result (stmt)))))
5406 return true;
5407 else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
5408 {
5409 tree lhs = gimple_get_lhs (stmt);
5410
5411 /* In general, assignments with virtual operands are not useful
5412 for deriving ranges, with the obvious exception of calls to
5413 builtin functions. */
5414 if (lhs && TREE_CODE (lhs) == SSA_NAME
5415 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
5416 || POINTER_TYPE_P (TREE_TYPE (lhs)))
5417 && ((is_gimple_call (stmt)
5418 && gimple_call_fndecl (stmt) != NULL_TREE
5419 && DECL_IS_BUILTIN (gimple_call_fndecl (stmt)))
5420 || !gimple_vuse (stmt)))
5421 return true;
5422 }
5423 else if (gimple_code (stmt) == GIMPLE_COND
5424 || gimple_code (stmt) == GIMPLE_SWITCH)
5425 return true;
5426
5427 return false;
5428 }
5429
5430
5431 /* Initialize local data structures for VRP. */
5432
5433 static void
5434 vrp_initialize (void)
5435 {
5436 basic_block bb;
5437
5438 vr_value = XCNEWVEC (value_range_t *, num_ssa_names);
5439 vr_phi_edge_counts = XCNEWVEC (int, num_ssa_names);
5440
5441 FOR_EACH_BB (bb)
5442 {
5443 gimple_stmt_iterator si;
5444
5445 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
5446 {
5447 gimple phi = gsi_stmt (si);
5448 if (!stmt_interesting_for_vrp (phi))
5449 {
5450 tree lhs = PHI_RESULT (phi);
5451 set_value_range_to_varying (get_value_range (lhs));
5452 prop_set_simulate_again (phi, false);
5453 }
5454 else
5455 prop_set_simulate_again (phi, true);
5456 }
5457
5458 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
5459 {
5460 gimple stmt = gsi_stmt (si);
5461
5462 /* If the statement is a control insn, then we do not
5463 want to avoid simulating the statement once. Failure
5464 to do so means that those edges will never get added. */
5465 if (stmt_ends_bb_p (stmt))
5466 prop_set_simulate_again (stmt, true);
5467 else if (!stmt_interesting_for_vrp (stmt))
5468 {
5469 ssa_op_iter i;
5470 tree def;
5471 FOR_EACH_SSA_TREE_OPERAND (def, stmt, i, SSA_OP_DEF)
5472 set_value_range_to_varying (get_value_range (def));
5473 prop_set_simulate_again (stmt, false);
5474 }
5475 else
5476 prop_set_simulate_again (stmt, true);
5477 }
5478 }
5479 }
5480
5481
5482 /* Visit assignment STMT. If it produces an interesting range, record
5483 the SSA name in *OUTPUT_P. */
5484
5485 static enum ssa_prop_result
5486 vrp_visit_assignment_or_call (gimple stmt, tree *output_p)
5487 {
5488 tree def, lhs;
5489 ssa_op_iter iter;
5490 enum gimple_code code = gimple_code (stmt);
5491 lhs = gimple_get_lhs (stmt);
5492
5493 /* We only keep track of ranges in integral and pointer types. */
5494 if (TREE_CODE (lhs) == SSA_NAME
5495 && ((INTEGRAL_TYPE_P (TREE_TYPE (lhs))
5496 /* It is valid to have NULL MIN/MAX values on a type. See
5497 build_range_type. */
5498 && TYPE_MIN_VALUE (TREE_TYPE (lhs))
5499 && TYPE_MAX_VALUE (TREE_TYPE (lhs)))
5500 || POINTER_TYPE_P (TREE_TYPE (lhs))))
5501 {
5502 value_range_t new_vr = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
5503
5504 if (code == GIMPLE_CALL)
5505 extract_range_basic (&new_vr, stmt);
5506 else
5507 extract_range_from_assignment (&new_vr, stmt);
5508
5509 if (update_value_range (lhs, &new_vr))
5510 {
5511 *output_p = lhs;
5512
5513 if (dump_file && (dump_flags & TDF_DETAILS))
5514 {
5515 fprintf (dump_file, "Found new range for ");
5516 print_generic_expr (dump_file, lhs, 0);
5517 fprintf (dump_file, ": ");
5518 dump_value_range (dump_file, &new_vr);
5519 fprintf (dump_file, "\n\n");
5520 }
5521
5522 if (new_vr.type == VR_VARYING)
5523 return SSA_PROP_VARYING;
5524
5525 return SSA_PROP_INTERESTING;
5526 }
5527
5528 return SSA_PROP_NOT_INTERESTING;
5529 }
5530
5531 /* Every other statement produces no useful ranges. */
5532 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_DEF)
5533 set_value_range_to_varying (get_value_range (def));
5534
5535 return SSA_PROP_VARYING;
5536 }
5537
5538 /* Helper that gets the value range of the SSA_NAME with version I
5539 or a symbolic range containing the SSA_NAME only if the value range
5540 is varying or undefined. */
5541
5542 static inline value_range_t
5543 get_vr_for_comparison (int i)
5544 {
5545 value_range_t vr = *(vr_value[i]);
5546
5547 /* If name N_i does not have a valid range, use N_i as its own
5548 range. This allows us to compare against names that may
5549 have N_i in their ranges. */
5550 if (vr.type == VR_VARYING || vr.type == VR_UNDEFINED)
5551 {
5552 vr.type = VR_RANGE;
5553 vr.min = ssa_name (i);
5554 vr.max = ssa_name (i);
5555 }
5556
5557 return vr;
5558 }
5559
5560 /* Compare all the value ranges for names equivalent to VAR with VAL
5561 using comparison code COMP. Return the same value returned by
5562 compare_range_with_value, including the setting of
5563 *STRICT_OVERFLOW_P. */
5564
5565 static tree
5566 compare_name_with_value (enum tree_code comp, tree var, tree val,
5567 bool *strict_overflow_p)
5568 {
5569 bitmap_iterator bi;
5570 unsigned i;
5571 bitmap e;
5572 tree retval, t;
5573 int used_strict_overflow;
5574 bool sop;
5575 value_range_t equiv_vr;
5576
5577 /* Get the set of equivalences for VAR. */
5578 e = get_value_range (var)->equiv;
5579
5580 /* Start at -1. Set it to 0 if we do a comparison without relying
5581 on overflow, or 1 if all comparisons rely on overflow. */
5582 used_strict_overflow = -1;
5583
5584 /* Compare vars' value range with val. */
5585 equiv_vr = get_vr_for_comparison (SSA_NAME_VERSION (var));
5586 sop = false;
5587 retval = compare_range_with_value (comp, &equiv_vr, val, &sop);
5588 if (retval)
5589 used_strict_overflow = sop ? 1 : 0;
5590
5591 /* If the equiv set is empty we have done all work we need to do. */
5592 if (e == NULL)
5593 {
5594 if (retval
5595 && used_strict_overflow > 0)
5596 *strict_overflow_p = true;
5597 return retval;
5598 }
5599
5600 EXECUTE_IF_SET_IN_BITMAP (e, 0, i, bi)
5601 {
5602 equiv_vr = get_vr_for_comparison (i);
5603 sop = false;
5604 t = compare_range_with_value (comp, &equiv_vr, val, &sop);
5605 if (t)
5606 {
5607 /* If we get different answers from different members
5608 of the equivalence set this check must be in a dead
5609 code region. Folding it to a trap representation
5610 would be correct here. For now just return don't-know. */
5611 if (retval != NULL
5612 && t != retval)
5613 {
5614 retval = NULL_TREE;
5615 break;
5616 }
5617 retval = t;
5618
5619 if (!sop)
5620 used_strict_overflow = 0;
5621 else if (used_strict_overflow < 0)
5622 used_strict_overflow = 1;
5623 }
5624 }
5625
5626 if (retval
5627 && used_strict_overflow > 0)
5628 *strict_overflow_p = true;
5629
5630 return retval;
5631 }
5632
5633
5634 /* Given a comparison code COMP and names N1 and N2, compare all the
5635 ranges equivalent to N1 against all the ranges equivalent to N2
5636 to determine the value of N1 COMP N2. Return the same value
5637 returned by compare_ranges. Set *STRICT_OVERFLOW_P to indicate
5638 whether we relied on an overflow infinity in the comparison. */
5639
5640
5641 static tree
5642 compare_names (enum tree_code comp, tree n1, tree n2,
5643 bool *strict_overflow_p)
5644 {
5645 tree t, retval;
5646 bitmap e1, e2;
5647 bitmap_iterator bi1, bi2;
5648 unsigned i1, i2;
5649 int used_strict_overflow;
5650 static bitmap_obstack *s_obstack = NULL;
5651 static bitmap s_e1 = NULL, s_e2 = NULL;
5652
5653 /* Compare the ranges of every name equivalent to N1 against the
5654 ranges of every name equivalent to N2. */
5655 e1 = get_value_range (n1)->equiv;
5656 e2 = get_value_range (n2)->equiv;
5657
5658 /* Use the fake bitmaps if e1 or e2 are not available. */
5659 if (s_obstack == NULL)
5660 {
5661 s_obstack = XNEW (bitmap_obstack);
5662 bitmap_obstack_initialize (s_obstack);
5663 s_e1 = BITMAP_ALLOC (s_obstack);
5664 s_e2 = BITMAP_ALLOC (s_obstack);
5665 }
5666 if (e1 == NULL)
5667 e1 = s_e1;
5668 if (e2 == NULL)
5669 e2 = s_e2;
5670
5671 /* Add N1 and N2 to their own set of equivalences to avoid
5672 duplicating the body of the loop just to check N1 and N2
5673 ranges. */
5674 bitmap_set_bit (e1, SSA_NAME_VERSION (n1));
5675 bitmap_set_bit (e2, SSA_NAME_VERSION (n2));
5676
5677 /* If the equivalence sets have a common intersection, then the two
5678 names can be compared without checking their ranges. */
5679 if (bitmap_intersect_p (e1, e2))
5680 {
5681 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
5682 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
5683
5684 return (comp == EQ_EXPR || comp == GE_EXPR || comp == LE_EXPR)
5685 ? boolean_true_node
5686 : boolean_false_node;
5687 }
5688
5689 /* Start at -1. Set it to 0 if we do a comparison without relying
5690 on overflow, or 1 if all comparisons rely on overflow. */
5691 used_strict_overflow = -1;
5692
5693 /* Otherwise, compare all the equivalent ranges. First, add N1 and
5694 N2 to their own set of equivalences to avoid duplicating the body
5695 of the loop just to check N1 and N2 ranges. */
5696 EXECUTE_IF_SET_IN_BITMAP (e1, 0, i1, bi1)
5697 {
5698 value_range_t vr1 = get_vr_for_comparison (i1);
5699
5700 t = retval = NULL_TREE;
5701 EXECUTE_IF_SET_IN_BITMAP (e2, 0, i2, bi2)
5702 {
5703 bool sop = false;
5704
5705 value_range_t vr2 = get_vr_for_comparison (i2);
5706
5707 t = compare_ranges (comp, &vr1, &vr2, &sop);
5708 if (t)
5709 {
5710 /* If we get different answers from different members
5711 of the equivalence set this check must be in a dead
5712 code region. Folding it to a trap representation
5713 would be correct here. For now just return don't-know. */
5714 if (retval != NULL
5715 && t != retval)
5716 {
5717 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
5718 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
5719 return NULL_TREE;
5720 }
5721 retval = t;
5722
5723 if (!sop)
5724 used_strict_overflow = 0;
5725 else if (used_strict_overflow < 0)
5726 used_strict_overflow = 1;
5727 }
5728 }
5729
5730 if (retval)
5731 {
5732 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
5733 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
5734 if (used_strict_overflow > 0)
5735 *strict_overflow_p = true;
5736 return retval;
5737 }
5738 }
5739
5740 /* None of the equivalent ranges are useful in computing this
5741 comparison. */
5742 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
5743 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
5744 return NULL_TREE;
5745 }
5746
5747 /* Helper function for vrp_evaluate_conditional_warnv. */
5748
5749 static tree
5750 vrp_evaluate_conditional_warnv_with_ops_using_ranges (enum tree_code code,
5751 tree op0, tree op1,
5752 bool * strict_overflow_p)
5753 {
5754 value_range_t *vr0, *vr1;
5755
5756 vr0 = (TREE_CODE (op0) == SSA_NAME) ? get_value_range (op0) : NULL;
5757 vr1 = (TREE_CODE (op1) == SSA_NAME) ? get_value_range (op1) : NULL;
5758
5759 if (vr0 && vr1)
5760 return compare_ranges (code, vr0, vr1, strict_overflow_p);
5761 else if (vr0 && vr1 == NULL)
5762 return compare_range_with_value (code, vr0, op1, strict_overflow_p);
5763 else if (vr0 == NULL && vr1)
5764 return (compare_range_with_value
5765 (swap_tree_comparison (code), vr1, op0, strict_overflow_p));
5766 return NULL;
5767 }
5768
5769 /* Helper function for vrp_evaluate_conditional_warnv. */
5770
5771 static tree
5772 vrp_evaluate_conditional_warnv_with_ops (enum tree_code code, tree op0,
5773 tree op1, bool use_equiv_p,
5774 bool *strict_overflow_p, bool *only_ranges)
5775 {
5776 tree ret;
5777 if (only_ranges)
5778 *only_ranges = true;
5779
5780 /* We only deal with integral and pointer types. */
5781 if (!INTEGRAL_TYPE_P (TREE_TYPE (op0))
5782 && !POINTER_TYPE_P (TREE_TYPE (op0)))
5783 return NULL_TREE;
5784
5785 if (use_equiv_p)
5786 {
5787 if (only_ranges
5788 && (ret = vrp_evaluate_conditional_warnv_with_ops_using_ranges
5789 (code, op0, op1, strict_overflow_p)))
5790 return ret;
5791 *only_ranges = false;
5792 if (TREE_CODE (op0) == SSA_NAME && TREE_CODE (op1) == SSA_NAME)
5793 return compare_names (code, op0, op1, strict_overflow_p);
5794 else if (TREE_CODE (op0) == SSA_NAME)
5795 return compare_name_with_value (code, op0, op1, strict_overflow_p);
5796 else if (TREE_CODE (op1) == SSA_NAME)
5797 return (compare_name_with_value
5798 (swap_tree_comparison (code), op1, op0, strict_overflow_p));
5799 }
5800 else
5801 return vrp_evaluate_conditional_warnv_with_ops_using_ranges (code, op0, op1,
5802 strict_overflow_p);
5803 return NULL_TREE;
5804 }
5805
5806 /* Given (CODE OP0 OP1) within STMT, try to simplify it based on value range
5807 information. Return NULL if the conditional can not be evaluated.
5808 The ranges of all the names equivalent with the operands in COND
5809 will be used when trying to compute the value. If the result is
5810 based on undefined signed overflow, issue a warning if
5811 appropriate. */
5812
5813 static tree
5814 vrp_evaluate_conditional (enum tree_code code, tree op0, tree op1, gimple stmt)
5815 {
5816 bool sop;
5817 tree ret;
5818 bool only_ranges;
5819
5820 /* Some passes and foldings leak constants with overflow flag set
5821 into the IL. Avoid doing wrong things with these and bail out. */
5822 if ((TREE_CODE (op0) == INTEGER_CST
5823 && TREE_OVERFLOW (op0))
5824 || (TREE_CODE (op1) == INTEGER_CST
5825 && TREE_OVERFLOW (op1)))
5826 return NULL_TREE;
5827
5828 sop = false;
5829 ret = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, true, &sop,
5830 &only_ranges);
5831
5832 if (ret && sop)
5833 {
5834 enum warn_strict_overflow_code wc;
5835 const char* warnmsg;
5836
5837 if (is_gimple_min_invariant (ret))
5838 {
5839 wc = WARN_STRICT_OVERFLOW_CONDITIONAL;
5840 warnmsg = G_("assuming signed overflow does not occur when "
5841 "simplifying conditional to constant");
5842 }
5843 else
5844 {
5845 wc = WARN_STRICT_OVERFLOW_COMPARISON;
5846 warnmsg = G_("assuming signed overflow does not occur when "
5847 "simplifying conditional");
5848 }
5849
5850 if (issue_strict_overflow_warning (wc))
5851 {
5852 location_t location;
5853
5854 if (!gimple_has_location (stmt))
5855 location = input_location;
5856 else
5857 location = gimple_location (stmt);
5858 warning_at (location, OPT_Wstrict_overflow, "%s", warnmsg);
5859 }
5860 }
5861
5862 if (warn_type_limits
5863 && ret && only_ranges
5864 && TREE_CODE_CLASS (code) == tcc_comparison
5865 && TREE_CODE (op0) == SSA_NAME)
5866 {
5867 /* If the comparison is being folded and the operand on the LHS
5868 is being compared against a constant value that is outside of
5869 the natural range of OP0's type, then the predicate will
5870 always fold regardless of the value of OP0. If -Wtype-limits
5871 was specified, emit a warning. */
5872 tree type = TREE_TYPE (op0);
5873 value_range_t *vr0 = get_value_range (op0);
5874
5875 if (vr0->type != VR_VARYING
5876 && INTEGRAL_TYPE_P (type)
5877 && vrp_val_is_min (vr0->min)
5878 && vrp_val_is_max (vr0->max)
5879 && is_gimple_min_invariant (op1))
5880 {
5881 location_t location;
5882
5883 if (!gimple_has_location (stmt))
5884 location = input_location;
5885 else
5886 location = gimple_location (stmt);
5887
5888 warning_at (location, OPT_Wtype_limits,
5889 integer_zerop (ret)
5890 ? G_("comparison always false "
5891 "due to limited range of data type")
5892 : G_("comparison always true "
5893 "due to limited range of data type"));
5894 }
5895 }
5896
5897 return ret;
5898 }
5899
5900
5901 /* Visit conditional statement STMT. If we can determine which edge
5902 will be taken out of STMT's basic block, record it in
5903 *TAKEN_EDGE_P and return SSA_PROP_INTERESTING. Otherwise, return
5904 SSA_PROP_VARYING. */
5905
5906 static enum ssa_prop_result
5907 vrp_visit_cond_stmt (gimple stmt, edge *taken_edge_p)
5908 {
5909 tree val;
5910 bool sop;
5911
5912 *taken_edge_p = NULL;
5913
5914 if (dump_file && (dump_flags & TDF_DETAILS))
5915 {
5916 tree use;
5917 ssa_op_iter i;
5918
5919 fprintf (dump_file, "\nVisiting conditional with predicate: ");
5920 print_gimple_stmt (dump_file, stmt, 0, 0);
5921 fprintf (dump_file, "\nWith known ranges\n");
5922
5923 FOR_EACH_SSA_TREE_OPERAND (use, stmt, i, SSA_OP_USE)
5924 {
5925 fprintf (dump_file, "\t");
5926 print_generic_expr (dump_file, use, 0);
5927 fprintf (dump_file, ": ");
5928 dump_value_range (dump_file, vr_value[SSA_NAME_VERSION (use)]);
5929 }
5930
5931 fprintf (dump_file, "\n");
5932 }
5933
5934 /* Compute the value of the predicate COND by checking the known
5935 ranges of each of its operands.
5936
5937 Note that we cannot evaluate all the equivalent ranges here
5938 because those ranges may not yet be final and with the current
5939 propagation strategy, we cannot determine when the value ranges
5940 of the names in the equivalence set have changed.
5941
5942 For instance, given the following code fragment
5943
5944 i_5 = PHI <8, i_13>
5945 ...
5946 i_14 = ASSERT_EXPR <i_5, i_5 != 0>
5947 if (i_14 == 1)
5948 ...
5949
5950 Assume that on the first visit to i_14, i_5 has the temporary
5951 range [8, 8] because the second argument to the PHI function is
5952 not yet executable. We derive the range ~[0, 0] for i_14 and the
5953 equivalence set { i_5 }. So, when we visit 'if (i_14 == 1)' for
5954 the first time, since i_14 is equivalent to the range [8, 8], we
5955 determine that the predicate is always false.
5956
5957 On the next round of propagation, i_13 is determined to be
5958 VARYING, which causes i_5 to drop down to VARYING. So, another
5959 visit to i_14 is scheduled. In this second visit, we compute the
5960 exact same range and equivalence set for i_14, namely ~[0, 0] and
5961 { i_5 }. But we did not have the previous range for i_5
5962 registered, so vrp_visit_assignment thinks that the range for
5963 i_14 has not changed. Therefore, the predicate 'if (i_14 == 1)'
5964 is not visited again, which stops propagation from visiting
5965 statements in the THEN clause of that if().
5966
5967 To properly fix this we would need to keep the previous range
5968 value for the names in the equivalence set. This way we would've
5969 discovered that from one visit to the other i_5 changed from
5970 range [8, 8] to VR_VARYING.
5971
5972 However, fixing this apparent limitation may not be worth the
5973 additional checking. Testing on several code bases (GCC, DLV,
5974 MICO, TRAMP3D and SPEC2000) showed that doing this results in
5975 4 more predicates folded in SPEC. */
5976 sop = false;
5977
5978 val = vrp_evaluate_conditional_warnv_with_ops (gimple_cond_code (stmt),
5979 gimple_cond_lhs (stmt),
5980 gimple_cond_rhs (stmt),
5981 false, &sop, NULL);
5982 if (val)
5983 {
5984 if (!sop)
5985 *taken_edge_p = find_taken_edge (gimple_bb (stmt), val);
5986 else
5987 {
5988 if (dump_file && (dump_flags & TDF_DETAILS))
5989 fprintf (dump_file,
5990 "\nIgnoring predicate evaluation because "
5991 "it assumes that signed overflow is undefined");
5992 val = NULL_TREE;
5993 }
5994 }
5995
5996 if (dump_file && (dump_flags & TDF_DETAILS))
5997 {
5998 fprintf (dump_file, "\nPredicate evaluates to: ");
5999 if (val == NULL_TREE)
6000 fprintf (dump_file, "DON'T KNOW\n");
6001 else
6002 print_generic_stmt (dump_file, val, 0);
6003 }
6004
6005 return (*taken_edge_p) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING;
6006 }
6007
6008 /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL
6009 that includes the value VAL. The search is restricted to the range
6010 [START_IDX, n - 1] where n is the size of VEC.
6011
6012 If there is a CASE_LABEL for VAL, its index is placed in IDX and true is
6013 returned.
6014
6015 If there is no CASE_LABEL for VAL and there is one that is larger than VAL,
6016 it is placed in IDX and false is returned.
6017
6018 If VAL is larger than any CASE_LABEL, n is placed on IDX and false is
6019 returned. */
6020
6021 static bool
6022 find_case_label_index (gimple stmt, size_t start_idx, tree val, size_t *idx)
6023 {
6024 size_t n = gimple_switch_num_labels (stmt);
6025 size_t low, high;
6026
6027 /* Find case label for minimum of the value range or the next one.
6028 At each iteration we are searching in [low, high - 1]. */
6029
6030 for (low = start_idx, high = n; high != low; )
6031 {
6032 tree t;
6033 int cmp;
6034 /* Note that i != high, so we never ask for n. */
6035 size_t i = (high + low) / 2;
6036 t = gimple_switch_label (stmt, i);
6037
6038 /* Cache the result of comparing CASE_LOW and val. */
6039 cmp = tree_int_cst_compare (CASE_LOW (t), val);
6040
6041 if (cmp == 0)
6042 {
6043 /* Ranges cannot be empty. */
6044 *idx = i;
6045 return true;
6046 }
6047 else if (cmp > 0)
6048 high = i;
6049 else
6050 {
6051 low = i + 1;
6052 if (CASE_HIGH (t) != NULL
6053 && tree_int_cst_compare (CASE_HIGH (t), val) >= 0)
6054 {
6055 *idx = i;
6056 return true;
6057 }
6058 }
6059 }
6060
6061 *idx = high;
6062 return false;
6063 }
6064
6065 /* Searches the case label vector VEC for the range of CASE_LABELs that is used
6066 for values between MIN and MAX. The first index is placed in MIN_IDX. The
6067 last index is placed in MAX_IDX. If the range of CASE_LABELs is empty
6068 then MAX_IDX < MIN_IDX.
6069 Returns true if the default label is not needed. */
6070
6071 static bool
6072 find_case_label_range (gimple stmt, tree min, tree max, size_t *min_idx,
6073 size_t *max_idx)
6074 {
6075 size_t i, j;
6076 bool min_take_default = !find_case_label_index (stmt, 1, min, &i);
6077 bool max_take_default = !find_case_label_index (stmt, i, max, &j);
6078
6079 if (i == j
6080 && min_take_default
6081 && max_take_default)
6082 {
6083 /* Only the default case label reached.
6084 Return an empty range. */
6085 *min_idx = 1;
6086 *max_idx = 0;
6087 return false;
6088 }
6089 else
6090 {
6091 bool take_default = min_take_default || max_take_default;
6092 tree low, high;
6093 size_t k;
6094
6095 if (max_take_default)
6096 j--;
6097
6098 /* If the case label range is continuous, we do not need
6099 the default case label. Verify that. */
6100 high = CASE_LOW (gimple_switch_label (stmt, i));
6101 if (CASE_HIGH (gimple_switch_label (stmt, i)))
6102 high = CASE_HIGH (gimple_switch_label (stmt, i));
6103 for (k = i + 1; k <= j; ++k)
6104 {
6105 low = CASE_LOW (gimple_switch_label (stmt, k));
6106 if (!integer_onep (int_const_binop (MINUS_EXPR, low, high, 0)))
6107 {
6108 take_default = true;
6109 break;
6110 }
6111 high = low;
6112 if (CASE_HIGH (gimple_switch_label (stmt, k)))
6113 high = CASE_HIGH (gimple_switch_label (stmt, k));
6114 }
6115
6116 *min_idx = i;
6117 *max_idx = j;
6118 return !take_default;
6119 }
6120 }
6121
6122 /* Visit switch statement STMT. If we can determine which edge
6123 will be taken out of STMT's basic block, record it in
6124 *TAKEN_EDGE_P and return SSA_PROP_INTERESTING. Otherwise, return
6125 SSA_PROP_VARYING. */
6126
6127 static enum ssa_prop_result
6128 vrp_visit_switch_stmt (gimple stmt, edge *taken_edge_p)
6129 {
6130 tree op, val;
6131 value_range_t *vr;
6132 size_t i = 0, j = 0;
6133 bool take_default;
6134
6135 *taken_edge_p = NULL;
6136 op = gimple_switch_index (stmt);
6137 if (TREE_CODE (op) != SSA_NAME)
6138 return SSA_PROP_VARYING;
6139
6140 vr = get_value_range (op);
6141 if (dump_file && (dump_flags & TDF_DETAILS))
6142 {
6143 fprintf (dump_file, "\nVisiting switch expression with operand ");
6144 print_generic_expr (dump_file, op, 0);
6145 fprintf (dump_file, " with known range ");
6146 dump_value_range (dump_file, vr);
6147 fprintf (dump_file, "\n");
6148 }
6149
6150 if (vr->type != VR_RANGE
6151 || symbolic_range_p (vr))
6152 return SSA_PROP_VARYING;
6153
6154 /* Find the single edge that is taken from the switch expression. */
6155 take_default = !find_case_label_range (stmt, vr->min, vr->max, &i, &j);
6156
6157 /* Check if the range spans no CASE_LABEL. If so, we only reach the default
6158 label */
6159 if (j < i)
6160 {
6161 gcc_assert (take_default);
6162 val = gimple_switch_default_label (stmt);
6163 }
6164 else
6165 {
6166 /* Check if labels with index i to j and maybe the default label
6167 are all reaching the same label. */
6168
6169 val = gimple_switch_label (stmt, i);
6170 if (take_default
6171 && CASE_LABEL (gimple_switch_default_label (stmt))
6172 != CASE_LABEL (val))
6173 {
6174 if (dump_file && (dump_flags & TDF_DETAILS))
6175 fprintf (dump_file, " not a single destination for this "
6176 "range\n");
6177 return SSA_PROP_VARYING;
6178 }
6179 for (++i; i <= j; ++i)
6180 {
6181 if (CASE_LABEL (gimple_switch_label (stmt, i)) != CASE_LABEL (val))
6182 {
6183 if (dump_file && (dump_flags & TDF_DETAILS))
6184 fprintf (dump_file, " not a single destination for this "
6185 "range\n");
6186 return SSA_PROP_VARYING;
6187 }
6188 }
6189 }
6190
6191 *taken_edge_p = find_edge (gimple_bb (stmt),
6192 label_to_block (CASE_LABEL (val)));
6193
6194 if (dump_file && (dump_flags & TDF_DETAILS))
6195 {
6196 fprintf (dump_file, " will take edge to ");
6197 print_generic_stmt (dump_file, CASE_LABEL (val), 0);
6198 }
6199
6200 return SSA_PROP_INTERESTING;
6201 }
6202
6203
6204 /* Evaluate statement STMT. If the statement produces a useful range,
6205 return SSA_PROP_INTERESTING and record the SSA name with the
6206 interesting range into *OUTPUT_P.
6207
6208 If STMT is a conditional branch and we can determine its truth
6209 value, the taken edge is recorded in *TAKEN_EDGE_P.
6210
6211 If STMT produces a varying value, return SSA_PROP_VARYING. */
6212
6213 static enum ssa_prop_result
6214 vrp_visit_stmt (gimple stmt, edge *taken_edge_p, tree *output_p)
6215 {
6216 tree def;
6217 ssa_op_iter iter;
6218
6219 if (dump_file && (dump_flags & TDF_DETAILS))
6220 {
6221 fprintf (dump_file, "\nVisiting statement:\n");
6222 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
6223 fprintf (dump_file, "\n");
6224 }
6225
6226 if (!stmt_interesting_for_vrp (stmt))
6227 gcc_assert (stmt_ends_bb_p (stmt));
6228 else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
6229 {
6230 /* In general, assignments with virtual operands are not useful
6231 for deriving ranges, with the obvious exception of calls to
6232 builtin functions. */
6233
6234 if ((is_gimple_call (stmt)
6235 && gimple_call_fndecl (stmt) != NULL_TREE
6236 && DECL_IS_BUILTIN (gimple_call_fndecl (stmt)))
6237 || !gimple_vuse (stmt))
6238 return vrp_visit_assignment_or_call (stmt, output_p);
6239 }
6240 else if (gimple_code (stmt) == GIMPLE_COND)
6241 return vrp_visit_cond_stmt (stmt, taken_edge_p);
6242 else if (gimple_code (stmt) == GIMPLE_SWITCH)
6243 return vrp_visit_switch_stmt (stmt, taken_edge_p);
6244
6245 /* All other statements produce nothing of interest for VRP, so mark
6246 their outputs varying and prevent further simulation. */
6247 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_DEF)
6248 set_value_range_to_varying (get_value_range (def));
6249
6250 return SSA_PROP_VARYING;
6251 }
6252
6253
6254 /* Meet operation for value ranges. Given two value ranges VR0 and
6255 VR1, store in VR0 a range that contains both VR0 and VR1. This
6256 may not be the smallest possible such range. */
6257
6258 static void
6259 vrp_meet (value_range_t *vr0, value_range_t *vr1)
6260 {
6261 if (vr0->type == VR_UNDEFINED)
6262 {
6263 copy_value_range (vr0, vr1);
6264 return;
6265 }
6266
6267 if (vr1->type == VR_UNDEFINED)
6268 {
6269 /* Nothing to do. VR0 already has the resulting range. */
6270 return;
6271 }
6272
6273 if (vr0->type == VR_VARYING)
6274 {
6275 /* Nothing to do. VR0 already has the resulting range. */
6276 return;
6277 }
6278
6279 if (vr1->type == VR_VARYING)
6280 {
6281 set_value_range_to_varying (vr0);
6282 return;
6283 }
6284
6285 if (vr0->type == VR_RANGE && vr1->type == VR_RANGE)
6286 {
6287 int cmp;
6288 tree min, max;
6289
6290 /* Compute the convex hull of the ranges. The lower limit of
6291 the new range is the minimum of the two ranges. If they
6292 cannot be compared, then give up. */
6293 cmp = compare_values (vr0->min, vr1->min);
6294 if (cmp == 0 || cmp == 1)
6295 min = vr1->min;
6296 else if (cmp == -1)
6297 min = vr0->min;
6298 else
6299 goto give_up;
6300
6301 /* Similarly, the upper limit of the new range is the maximum
6302 of the two ranges. If they cannot be compared, then
6303 give up. */
6304 cmp = compare_values (vr0->max, vr1->max);
6305 if (cmp == 0 || cmp == -1)
6306 max = vr1->max;
6307 else if (cmp == 1)
6308 max = vr0->max;
6309 else
6310 goto give_up;
6311
6312 /* Check for useless ranges. */
6313 if (INTEGRAL_TYPE_P (TREE_TYPE (min))
6314 && ((vrp_val_is_min (min) || is_overflow_infinity (min))
6315 && (vrp_val_is_max (max) || is_overflow_infinity (max))))
6316 goto give_up;
6317
6318 /* The resulting set of equivalences is the intersection of
6319 the two sets. */
6320 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
6321 bitmap_and_into (vr0->equiv, vr1->equiv);
6322 else if (vr0->equiv && !vr1->equiv)
6323 bitmap_clear (vr0->equiv);
6324
6325 set_value_range (vr0, vr0->type, min, max, vr0->equiv);
6326 }
6327 else if (vr0->type == VR_ANTI_RANGE && vr1->type == VR_ANTI_RANGE)
6328 {
6329 /* Two anti-ranges meet only if their complements intersect.
6330 Only handle the case of identical ranges. */
6331 if (compare_values (vr0->min, vr1->min) == 0
6332 && compare_values (vr0->max, vr1->max) == 0
6333 && compare_values (vr0->min, vr0->max) == 0)
6334 {
6335 /* The resulting set of equivalences is the intersection of
6336 the two sets. */
6337 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
6338 bitmap_and_into (vr0->equiv, vr1->equiv);
6339 else if (vr0->equiv && !vr1->equiv)
6340 bitmap_clear (vr0->equiv);
6341 }
6342 else
6343 goto give_up;
6344 }
6345 else if (vr0->type == VR_ANTI_RANGE || vr1->type == VR_ANTI_RANGE)
6346 {
6347 /* For a numeric range [VAL1, VAL2] and an anti-range ~[VAL3, VAL4],
6348 only handle the case where the ranges have an empty intersection.
6349 The result of the meet operation is the anti-range. */
6350 if (!symbolic_range_p (vr0)
6351 && !symbolic_range_p (vr1)
6352 && !value_ranges_intersect_p (vr0, vr1))
6353 {
6354 /* Copy most of VR1 into VR0. Don't copy VR1's equivalence
6355 set. We need to compute the intersection of the two
6356 equivalence sets. */
6357 if (vr1->type == VR_ANTI_RANGE)
6358 set_value_range (vr0, vr1->type, vr1->min, vr1->max, vr0->equiv);
6359
6360 /* The resulting set of equivalences is the intersection of
6361 the two sets. */
6362 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
6363 bitmap_and_into (vr0->equiv, vr1->equiv);
6364 else if (vr0->equiv && !vr1->equiv)
6365 bitmap_clear (vr0->equiv);
6366 }
6367 else
6368 goto give_up;
6369 }
6370 else
6371 gcc_unreachable ();
6372
6373 return;
6374
6375 give_up:
6376 /* Failed to find an efficient meet. Before giving up and setting
6377 the result to VARYING, see if we can at least derive a useful
6378 anti-range. FIXME, all this nonsense about distinguishing
6379 anti-ranges from ranges is necessary because of the odd
6380 semantics of range_includes_zero_p and friends. */
6381 if (!symbolic_range_p (vr0)
6382 && ((vr0->type == VR_RANGE && !range_includes_zero_p (vr0))
6383 || (vr0->type == VR_ANTI_RANGE && range_includes_zero_p (vr0)))
6384 && !symbolic_range_p (vr1)
6385 && ((vr1->type == VR_RANGE && !range_includes_zero_p (vr1))
6386 || (vr1->type == VR_ANTI_RANGE && range_includes_zero_p (vr1))))
6387 {
6388 set_value_range_to_nonnull (vr0, TREE_TYPE (vr0->min));
6389
6390 /* Since this meet operation did not result from the meeting of
6391 two equivalent names, VR0 cannot have any equivalences. */
6392 if (vr0->equiv)
6393 bitmap_clear (vr0->equiv);
6394 }
6395 else
6396 set_value_range_to_varying (vr0);
6397 }
6398
6399
6400 /* Visit all arguments for PHI node PHI that flow through executable
6401 edges. If a valid value range can be derived from all the incoming
6402 value ranges, set a new range for the LHS of PHI. */
6403
6404 static enum ssa_prop_result
6405 vrp_visit_phi_node (gimple phi)
6406 {
6407 size_t i;
6408 tree lhs = PHI_RESULT (phi);
6409 value_range_t *lhs_vr = get_value_range (lhs);
6410 value_range_t vr_result = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
6411 int edges, old_edges;
6412 struct loop *l;
6413
6414 copy_value_range (&vr_result, lhs_vr);
6415
6416 if (dump_file && (dump_flags & TDF_DETAILS))
6417 {
6418 fprintf (dump_file, "\nVisiting PHI node: ");
6419 print_gimple_stmt (dump_file, phi, 0, dump_flags);
6420 }
6421
6422 edges = 0;
6423 for (i = 0; i < gimple_phi_num_args (phi); i++)
6424 {
6425 edge e = gimple_phi_arg_edge (phi, i);
6426
6427 if (dump_file && (dump_flags & TDF_DETAILS))
6428 {
6429 fprintf (dump_file,
6430 "\n Argument #%d (%d -> %d %sexecutable)\n",
6431 (int) i, e->src->index, e->dest->index,
6432 (e->flags & EDGE_EXECUTABLE) ? "" : "not ");
6433 }
6434
6435 if (e->flags & EDGE_EXECUTABLE)
6436 {
6437 tree arg = PHI_ARG_DEF (phi, i);
6438 value_range_t vr_arg;
6439
6440 ++edges;
6441
6442 if (TREE_CODE (arg) == SSA_NAME)
6443 {
6444 vr_arg = *(get_value_range (arg));
6445 }
6446 else
6447 {
6448 if (is_overflow_infinity (arg))
6449 {
6450 arg = copy_node (arg);
6451 TREE_OVERFLOW (arg) = 0;
6452 }
6453
6454 vr_arg.type = VR_RANGE;
6455 vr_arg.min = arg;
6456 vr_arg.max = arg;
6457 vr_arg.equiv = NULL;
6458 }
6459
6460 if (dump_file && (dump_flags & TDF_DETAILS))
6461 {
6462 fprintf (dump_file, "\t");
6463 print_generic_expr (dump_file, arg, dump_flags);
6464 fprintf (dump_file, "\n\tValue: ");
6465 dump_value_range (dump_file, &vr_arg);
6466 fprintf (dump_file, "\n");
6467 }
6468
6469 vrp_meet (&vr_result, &vr_arg);
6470
6471 if (vr_result.type == VR_VARYING)
6472 break;
6473 }
6474 }
6475
6476 /* If this is a loop PHI node SCEV may known more about its
6477 value-range. */
6478 if (current_loops
6479 && (l = loop_containing_stmt (phi))
6480 && l->header == gimple_bb (phi))
6481 adjust_range_with_scev (&vr_result, l, phi, lhs);
6482
6483 if (vr_result.type == VR_VARYING)
6484 goto varying;
6485
6486 old_edges = vr_phi_edge_counts[SSA_NAME_VERSION (lhs)];
6487 vr_phi_edge_counts[SSA_NAME_VERSION (lhs)] = edges;
6488
6489 /* To prevent infinite iterations in the algorithm, derive ranges
6490 when the new value is slightly bigger or smaller than the
6491 previous one. We don't do this if we have seen a new executable
6492 edge; this helps us avoid an overflow infinity for conditionals
6493 which are not in a loop. */
6494 if (lhs_vr->type == VR_RANGE && vr_result.type == VR_RANGE
6495 && edges <= old_edges)
6496 {
6497 if (!POINTER_TYPE_P (TREE_TYPE (lhs)))
6498 {
6499 int cmp_min = compare_values (lhs_vr->min, vr_result.min);
6500 int cmp_max = compare_values (lhs_vr->max, vr_result.max);
6501
6502 /* If the new minimum is smaller or larger than the previous
6503 one, go all the way to -INF. In the first case, to avoid
6504 iterating millions of times to reach -INF, and in the
6505 other case to avoid infinite bouncing between different
6506 minimums. */
6507 if (cmp_min > 0 || cmp_min < 0)
6508 {
6509 /* If we will end up with a (-INF, +INF) range, set it to
6510 VARYING. Same if the previous max value was invalid for
6511 the type and we'd end up with vr_result.min > vr_result.max. */
6512 if (vrp_val_is_max (vr_result.max)
6513 || compare_values (TYPE_MIN_VALUE (TREE_TYPE (vr_result.min)),
6514 vr_result.max) > 0)
6515 goto varying;
6516
6517 if (!needs_overflow_infinity (TREE_TYPE (vr_result.min))
6518 || !vrp_var_may_overflow (lhs, phi))
6519 vr_result.min = TYPE_MIN_VALUE (TREE_TYPE (vr_result.min));
6520 else if (supports_overflow_infinity (TREE_TYPE (vr_result.min)))
6521 vr_result.min =
6522 negative_overflow_infinity (TREE_TYPE (vr_result.min));
6523 else
6524 goto varying;
6525 }
6526
6527 /* Similarly, if the new maximum is smaller or larger than
6528 the previous one, go all the way to +INF. */
6529 if (cmp_max < 0 || cmp_max > 0)
6530 {
6531 /* If we will end up with a (-INF, +INF) range, set it to
6532 VARYING. Same if the previous min value was invalid for
6533 the type and we'd end up with vr_result.max < vr_result.min. */
6534 if (vrp_val_is_min (vr_result.min)
6535 || compare_values (TYPE_MAX_VALUE (TREE_TYPE (vr_result.max)),
6536 vr_result.min) < 0)
6537 goto varying;
6538
6539 if (!needs_overflow_infinity (TREE_TYPE (vr_result.max))
6540 || !vrp_var_may_overflow (lhs, phi))
6541 vr_result.max = TYPE_MAX_VALUE (TREE_TYPE (vr_result.max));
6542 else if (supports_overflow_infinity (TREE_TYPE (vr_result.max)))
6543 vr_result.max =
6544 positive_overflow_infinity (TREE_TYPE (vr_result.max));
6545 else
6546 goto varying;
6547 }
6548 }
6549 }
6550
6551 /* If the new range is different than the previous value, keep
6552 iterating. */
6553 if (update_value_range (lhs, &vr_result))
6554 {
6555 if (dump_file && (dump_flags & TDF_DETAILS))
6556 {
6557 fprintf (dump_file, "Found new range for ");
6558 print_generic_expr (dump_file, lhs, 0);
6559 fprintf (dump_file, ": ");
6560 dump_value_range (dump_file, &vr_result);
6561 fprintf (dump_file, "\n\n");
6562 }
6563
6564 return SSA_PROP_INTERESTING;
6565 }
6566
6567 /* Nothing changed, don't add outgoing edges. */
6568 return SSA_PROP_NOT_INTERESTING;
6569
6570 /* No match found. Set the LHS to VARYING. */
6571 varying:
6572 set_value_range_to_varying (lhs_vr);
6573 return SSA_PROP_VARYING;
6574 }
6575
6576 /* Simplify boolean operations if the source is known
6577 to be already a boolean. */
6578 static bool
6579 simplify_truth_ops_using_ranges (gimple_stmt_iterator *gsi, gimple stmt)
6580 {
6581 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
6582 tree val = NULL;
6583 tree op0, op1;
6584 value_range_t *vr;
6585 bool sop = false;
6586 bool need_conversion;
6587
6588 op0 = gimple_assign_rhs1 (stmt);
6589 if (TYPE_PRECISION (TREE_TYPE (op0)) != 1)
6590 {
6591 if (TREE_CODE (op0) != SSA_NAME)
6592 return false;
6593 vr = get_value_range (op0);
6594
6595 val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, &sop);
6596 if (!val || !integer_onep (val))
6597 return false;
6598
6599 val = compare_range_with_value (LE_EXPR, vr, integer_one_node, &sop);
6600 if (!val || !integer_onep (val))
6601 return false;
6602 }
6603
6604 if (rhs_code == TRUTH_NOT_EXPR)
6605 {
6606 rhs_code = NE_EXPR;
6607 op1 = build_int_cst (TREE_TYPE (op0), 1);
6608 }
6609 else
6610 {
6611 op1 = gimple_assign_rhs2 (stmt);
6612
6613 /* Reduce number of cases to handle. */
6614 if (is_gimple_min_invariant (op1))
6615 {
6616 /* Exclude anything that should have been already folded. */
6617 if (rhs_code != EQ_EXPR
6618 && rhs_code != NE_EXPR
6619 && rhs_code != TRUTH_XOR_EXPR)
6620 return false;
6621
6622 if (!integer_zerop (op1)
6623 && !integer_onep (op1)
6624 && !integer_all_onesp (op1))
6625 return false;
6626
6627 /* Limit the number of cases we have to consider. */
6628 if (rhs_code == EQ_EXPR)
6629 {
6630 rhs_code = NE_EXPR;
6631 op1 = fold_unary (TRUTH_NOT_EXPR, TREE_TYPE (op1), op1);
6632 }
6633 }
6634 else
6635 {
6636 /* Punt on A == B as there is no BIT_XNOR_EXPR. */
6637 if (rhs_code == EQ_EXPR)
6638 return false;
6639
6640 if (TYPE_PRECISION (TREE_TYPE (op1)) != 1)
6641 {
6642 vr = get_value_range (op1);
6643 val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, &sop);
6644 if (!val || !integer_onep (val))
6645 return false;
6646
6647 val = compare_range_with_value (LE_EXPR, vr, integer_one_node, &sop);
6648 if (!val || !integer_onep (val))
6649 return false;
6650 }
6651 }
6652 }
6653
6654 if (sop && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
6655 {
6656 location_t location;
6657
6658 if (!gimple_has_location (stmt))
6659 location = input_location;
6660 else
6661 location = gimple_location (stmt);
6662
6663 if (rhs_code == TRUTH_AND_EXPR || rhs_code == TRUTH_OR_EXPR)
6664 warning_at (location, OPT_Wstrict_overflow,
6665 _("assuming signed overflow does not occur when "
6666 "simplifying && or || to & or |"));
6667 else
6668 warning_at (location, OPT_Wstrict_overflow,
6669 _("assuming signed overflow does not occur when "
6670 "simplifying ==, != or ! to identity or ^"));
6671 }
6672
6673 need_conversion =
6674 !useless_type_conversion_p (TREE_TYPE (gimple_assign_lhs (stmt)),
6675 TREE_TYPE (op0));
6676
6677 /* Make sure to not sign-extend -1 as a boolean value. */
6678 if (need_conversion
6679 && !TYPE_UNSIGNED (TREE_TYPE (op0))
6680 && TYPE_PRECISION (TREE_TYPE (op0)) == 1)
6681 return false;
6682
6683 switch (rhs_code)
6684 {
6685 case TRUTH_AND_EXPR:
6686 rhs_code = BIT_AND_EXPR;
6687 break;
6688 case TRUTH_OR_EXPR:
6689 rhs_code = BIT_IOR_EXPR;
6690 break;
6691 case TRUTH_XOR_EXPR:
6692 case NE_EXPR:
6693 if (integer_zerop (op1))
6694 {
6695 gimple_assign_set_rhs_with_ops (gsi,
6696 need_conversion ? NOP_EXPR : SSA_NAME,
6697 op0, NULL);
6698 update_stmt (gsi_stmt (*gsi));
6699 return true;
6700 }
6701
6702 rhs_code = BIT_XOR_EXPR;
6703 break;
6704 default:
6705 gcc_unreachable ();
6706 }
6707
6708 if (need_conversion)
6709 return false;
6710
6711 gimple_assign_set_rhs_with_ops (gsi, rhs_code, op0, op1);
6712 update_stmt (gsi_stmt (*gsi));
6713 return true;
6714 }
6715
6716 /* Simplify a division or modulo operator to a right shift or
6717 bitwise and if the first operand is unsigned or is greater
6718 than zero and the second operand is an exact power of two. */
6719
6720 static bool
6721 simplify_div_or_mod_using_ranges (gimple stmt)
6722 {
6723 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
6724 tree val = NULL;
6725 tree op0 = gimple_assign_rhs1 (stmt);
6726 tree op1 = gimple_assign_rhs2 (stmt);
6727 value_range_t *vr = get_value_range (gimple_assign_rhs1 (stmt));
6728
6729 if (TYPE_UNSIGNED (TREE_TYPE (op0)))
6730 {
6731 val = integer_one_node;
6732 }
6733 else
6734 {
6735 bool sop = false;
6736
6737 val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, &sop);
6738
6739 if (val
6740 && sop
6741 && integer_onep (val)
6742 && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
6743 {
6744 location_t location;
6745
6746 if (!gimple_has_location (stmt))
6747 location = input_location;
6748 else
6749 location = gimple_location (stmt);
6750 warning_at (location, OPT_Wstrict_overflow,
6751 "assuming signed overflow does not occur when "
6752 "simplifying %</%> or %<%%%> to %<>>%> or %<&%>");
6753 }
6754 }
6755
6756 if (val && integer_onep (val))
6757 {
6758 tree t;
6759
6760 if (rhs_code == TRUNC_DIV_EXPR)
6761 {
6762 t = build_int_cst (NULL_TREE, tree_log2 (op1));
6763 gimple_assign_set_rhs_code (stmt, RSHIFT_EXPR);
6764 gimple_assign_set_rhs1 (stmt, op0);
6765 gimple_assign_set_rhs2 (stmt, t);
6766 }
6767 else
6768 {
6769 t = build_int_cst (TREE_TYPE (op1), 1);
6770 t = int_const_binop (MINUS_EXPR, op1, t, 0);
6771 t = fold_convert (TREE_TYPE (op0), t);
6772
6773 gimple_assign_set_rhs_code (stmt, BIT_AND_EXPR);
6774 gimple_assign_set_rhs1 (stmt, op0);
6775 gimple_assign_set_rhs2 (stmt, t);
6776 }
6777
6778 update_stmt (stmt);
6779 return true;
6780 }
6781
6782 return false;
6783 }
6784
6785 /* If the operand to an ABS_EXPR is >= 0, then eliminate the
6786 ABS_EXPR. If the operand is <= 0, then simplify the
6787 ABS_EXPR into a NEGATE_EXPR. */
6788
6789 static bool
6790 simplify_abs_using_ranges (gimple stmt)
6791 {
6792 tree val = NULL;
6793 tree op = gimple_assign_rhs1 (stmt);
6794 tree type = TREE_TYPE (op);
6795 value_range_t *vr = get_value_range (op);
6796
6797 if (TYPE_UNSIGNED (type))
6798 {
6799 val = integer_zero_node;
6800 }
6801 else if (vr)
6802 {
6803 bool sop = false;
6804
6805 val = compare_range_with_value (LE_EXPR, vr, integer_zero_node, &sop);
6806 if (!val)
6807 {
6808 sop = false;
6809 val = compare_range_with_value (GE_EXPR, vr, integer_zero_node,
6810 &sop);
6811
6812 if (val)
6813 {
6814 if (integer_zerop (val))
6815 val = integer_one_node;
6816 else if (integer_onep (val))
6817 val = integer_zero_node;
6818 }
6819 }
6820
6821 if (val
6822 && (integer_onep (val) || integer_zerop (val)))
6823 {
6824 if (sop && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
6825 {
6826 location_t location;
6827
6828 if (!gimple_has_location (stmt))
6829 location = input_location;
6830 else
6831 location = gimple_location (stmt);
6832 warning_at (location, OPT_Wstrict_overflow,
6833 "assuming signed overflow does not occur when "
6834 "simplifying %<abs (X)%> to %<X%> or %<-X%>");
6835 }
6836
6837 gimple_assign_set_rhs1 (stmt, op);
6838 if (integer_onep (val))
6839 gimple_assign_set_rhs_code (stmt, NEGATE_EXPR);
6840 else
6841 gimple_assign_set_rhs_code (stmt, SSA_NAME);
6842 update_stmt (stmt);
6843 return true;
6844 }
6845 }
6846
6847 return false;
6848 }
6849
6850 /* We are comparing trees OP0 and OP1 using COND_CODE. OP0 has
6851 a known value range VR.
6852
6853 If there is one and only one value which will satisfy the
6854 conditional, then return that value. Else return NULL. */
6855
6856 static tree
6857 test_for_singularity (enum tree_code cond_code, tree op0,
6858 tree op1, value_range_t *vr)
6859 {
6860 tree min = NULL;
6861 tree max = NULL;
6862
6863 /* Extract minimum/maximum values which satisfy the
6864 the conditional as it was written. */
6865 if (cond_code == LE_EXPR || cond_code == LT_EXPR)
6866 {
6867 /* This should not be negative infinity; there is no overflow
6868 here. */
6869 min = TYPE_MIN_VALUE (TREE_TYPE (op0));
6870
6871 max = op1;
6872 if (cond_code == LT_EXPR && !is_overflow_infinity (max))
6873 {
6874 tree one = build_int_cst (TREE_TYPE (op0), 1);
6875 max = fold_build2 (MINUS_EXPR, TREE_TYPE (op0), max, one);
6876 if (EXPR_P (max))
6877 TREE_NO_WARNING (max) = 1;
6878 }
6879 }
6880 else if (cond_code == GE_EXPR || cond_code == GT_EXPR)
6881 {
6882 /* This should not be positive infinity; there is no overflow
6883 here. */
6884 max = TYPE_MAX_VALUE (TREE_TYPE (op0));
6885
6886 min = op1;
6887 if (cond_code == GT_EXPR && !is_overflow_infinity (min))
6888 {
6889 tree one = build_int_cst (TREE_TYPE (op0), 1);
6890 min = fold_build2 (PLUS_EXPR, TREE_TYPE (op0), min, one);
6891 if (EXPR_P (min))
6892 TREE_NO_WARNING (min) = 1;
6893 }
6894 }
6895
6896 /* Now refine the minimum and maximum values using any
6897 value range information we have for op0. */
6898 if (min && max)
6899 {
6900 if (compare_values (vr->min, min) == 1)
6901 min = vr->min;
6902 if (compare_values (vr->max, max) == -1)
6903 max = vr->max;
6904
6905 /* If the new min/max values have converged to a single value,
6906 then there is only one value which can satisfy the condition,
6907 return that value. */
6908 if (operand_equal_p (min, max, 0) && is_gimple_min_invariant (min))
6909 return min;
6910 }
6911 return NULL;
6912 }
6913
6914 /* Simplify a conditional using a relational operator to an equality
6915 test if the range information indicates only one value can satisfy
6916 the original conditional. */
6917
6918 static bool
6919 simplify_cond_using_ranges (gimple stmt)
6920 {
6921 tree op0 = gimple_cond_lhs (stmt);
6922 tree op1 = gimple_cond_rhs (stmt);
6923 enum tree_code cond_code = gimple_cond_code (stmt);
6924
6925 if (cond_code != NE_EXPR
6926 && cond_code != EQ_EXPR
6927 && TREE_CODE (op0) == SSA_NAME
6928 && INTEGRAL_TYPE_P (TREE_TYPE (op0))
6929 && is_gimple_min_invariant (op1))
6930 {
6931 value_range_t *vr = get_value_range (op0);
6932
6933 /* If we have range information for OP0, then we might be
6934 able to simplify this conditional. */
6935 if (vr->type == VR_RANGE)
6936 {
6937 tree new_tree = test_for_singularity (cond_code, op0, op1, vr);
6938
6939 if (new_tree)
6940 {
6941 if (dump_file)
6942 {
6943 fprintf (dump_file, "Simplified relational ");
6944 print_gimple_stmt (dump_file, stmt, 0, 0);
6945 fprintf (dump_file, " into ");
6946 }
6947
6948 gimple_cond_set_code (stmt, EQ_EXPR);
6949 gimple_cond_set_lhs (stmt, op0);
6950 gimple_cond_set_rhs (stmt, new_tree);
6951
6952 update_stmt (stmt);
6953
6954 if (dump_file)
6955 {
6956 print_gimple_stmt (dump_file, stmt, 0, 0);
6957 fprintf (dump_file, "\n");
6958 }
6959
6960 return true;
6961 }
6962
6963 /* Try again after inverting the condition. We only deal
6964 with integral types here, so no need to worry about
6965 issues with inverting FP comparisons. */
6966 cond_code = invert_tree_comparison (cond_code, false);
6967 new_tree = test_for_singularity (cond_code, op0, op1, vr);
6968
6969 if (new_tree)
6970 {
6971 if (dump_file)
6972 {
6973 fprintf (dump_file, "Simplified relational ");
6974 print_gimple_stmt (dump_file, stmt, 0, 0);
6975 fprintf (dump_file, " into ");
6976 }
6977
6978 gimple_cond_set_code (stmt, NE_EXPR);
6979 gimple_cond_set_lhs (stmt, op0);
6980 gimple_cond_set_rhs (stmt, new_tree);
6981
6982 update_stmt (stmt);
6983
6984 if (dump_file)
6985 {
6986 print_gimple_stmt (dump_file, stmt, 0, 0);
6987 fprintf (dump_file, "\n");
6988 }
6989
6990 return true;
6991 }
6992 }
6993 }
6994
6995 return false;
6996 }
6997
6998 /* Simplify a switch statement using the value range of the switch
6999 argument. */
7000
7001 static bool
7002 simplify_switch_using_ranges (gimple stmt)
7003 {
7004 tree op = gimple_switch_index (stmt);
7005 value_range_t *vr;
7006 bool take_default;
7007 edge e;
7008 edge_iterator ei;
7009 size_t i = 0, j = 0, n, n2;
7010 tree vec2;
7011 switch_update su;
7012
7013 if (TREE_CODE (op) == SSA_NAME)
7014 {
7015 vr = get_value_range (op);
7016
7017 /* We can only handle integer ranges. */
7018 if (vr->type != VR_RANGE
7019 || symbolic_range_p (vr))
7020 return false;
7021
7022 /* Find case label for min/max of the value range. */
7023 take_default = !find_case_label_range (stmt, vr->min, vr->max, &i, &j);
7024 }
7025 else if (TREE_CODE (op) == INTEGER_CST)
7026 {
7027 take_default = !find_case_label_index (stmt, 1, op, &i);
7028 if (take_default)
7029 {
7030 i = 1;
7031 j = 0;
7032 }
7033 else
7034 {
7035 j = i;
7036 }
7037 }
7038 else
7039 return false;
7040
7041 n = gimple_switch_num_labels (stmt);
7042
7043 /* Bail out if this is just all edges taken. */
7044 if (i == 1
7045 && j == n - 1
7046 && take_default)
7047 return false;
7048
7049 /* Build a new vector of taken case labels. */
7050 vec2 = make_tree_vec (j - i + 1 + (int)take_default);
7051 n2 = 0;
7052
7053 /* Add the default edge, if necessary. */
7054 if (take_default)
7055 TREE_VEC_ELT (vec2, n2++) = gimple_switch_default_label (stmt);
7056
7057 for (; i <= j; ++i, ++n2)
7058 TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, i);
7059
7060 /* Mark needed edges. */
7061 for (i = 0; i < n2; ++i)
7062 {
7063 e = find_edge (gimple_bb (stmt),
7064 label_to_block (CASE_LABEL (TREE_VEC_ELT (vec2, i))));
7065 e->aux = (void *)-1;
7066 }
7067
7068 /* Queue not needed edges for later removal. */
7069 FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
7070 {
7071 if (e->aux == (void *)-1)
7072 {
7073 e->aux = NULL;
7074 continue;
7075 }
7076
7077 if (dump_file && (dump_flags & TDF_DETAILS))
7078 {
7079 fprintf (dump_file, "removing unreachable case label\n");
7080 }
7081 VEC_safe_push (edge, heap, to_remove_edges, e);
7082 e->flags &= ~EDGE_EXECUTABLE;
7083 }
7084
7085 /* And queue an update for the stmt. */
7086 su.stmt = stmt;
7087 su.vec = vec2;
7088 VEC_safe_push (switch_update, heap, to_update_switch_stmts, &su);
7089 return false;
7090 }
7091
7092 /* Simplify STMT using ranges if possible. */
7093
7094 static bool
7095 simplify_stmt_using_ranges (gimple_stmt_iterator *gsi)
7096 {
7097 gimple stmt = gsi_stmt (*gsi);
7098 if (is_gimple_assign (stmt))
7099 {
7100 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
7101
7102 switch (rhs_code)
7103 {
7104 case EQ_EXPR:
7105 case NE_EXPR:
7106 case TRUTH_NOT_EXPR:
7107 case TRUTH_AND_EXPR:
7108 case TRUTH_OR_EXPR:
7109 case TRUTH_XOR_EXPR:
7110 /* Transform EQ_EXPR, NE_EXPR, TRUTH_NOT_EXPR into BIT_XOR_EXPR
7111 or identity if the RHS is zero or one, and the LHS are known
7112 to be boolean values. Transform all TRUTH_*_EXPR into
7113 BIT_*_EXPR if both arguments are known to be boolean values. */
7114 if (INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_rhs1 (stmt))))
7115 return simplify_truth_ops_using_ranges (gsi, stmt);
7116 break;
7117
7118 /* Transform TRUNC_DIV_EXPR and TRUNC_MOD_EXPR into RSHIFT_EXPR
7119 and BIT_AND_EXPR respectively if the first operand is greater
7120 than zero and the second operand is an exact power of two. */
7121 case TRUNC_DIV_EXPR:
7122 case TRUNC_MOD_EXPR:
7123 if (INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_rhs1 (stmt)))
7124 && integer_pow2p (gimple_assign_rhs2 (stmt)))
7125 return simplify_div_or_mod_using_ranges (stmt);
7126 break;
7127
7128 /* Transform ABS (X) into X or -X as appropriate. */
7129 case ABS_EXPR:
7130 if (TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME
7131 && INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_rhs1 (stmt))))
7132 return simplify_abs_using_ranges (stmt);
7133 break;
7134
7135 default:
7136 break;
7137 }
7138 }
7139 else if (gimple_code (stmt) == GIMPLE_COND)
7140 return simplify_cond_using_ranges (stmt);
7141 else if (gimple_code (stmt) == GIMPLE_SWITCH)
7142 return simplify_switch_using_ranges (stmt);
7143
7144 return false;
7145 }
7146
7147 /* If the statement pointed by SI has a predicate whose value can be
7148 computed using the value range information computed by VRP, compute
7149 its value and return true. Otherwise, return false. */
7150
7151 static bool
7152 fold_predicate_in (gimple_stmt_iterator *si)
7153 {
7154 bool assignment_p = false;
7155 tree val;
7156 gimple stmt = gsi_stmt (*si);
7157
7158 if (is_gimple_assign (stmt)
7159 && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison)
7160 {
7161 assignment_p = true;
7162 val = vrp_evaluate_conditional (gimple_assign_rhs_code (stmt),
7163 gimple_assign_rhs1 (stmt),
7164 gimple_assign_rhs2 (stmt),
7165 stmt);
7166 }
7167 else if (gimple_code (stmt) == GIMPLE_COND)
7168 val = vrp_evaluate_conditional (gimple_cond_code (stmt),
7169 gimple_cond_lhs (stmt),
7170 gimple_cond_rhs (stmt),
7171 stmt);
7172 else
7173 return false;
7174
7175 if (val)
7176 {
7177 if (assignment_p)
7178 val = fold_convert (gimple_expr_type (stmt), val);
7179
7180 if (dump_file)
7181 {
7182 fprintf (dump_file, "Folding predicate ");
7183 print_gimple_expr (dump_file, stmt, 0, 0);
7184 fprintf (dump_file, " to ");
7185 print_generic_expr (dump_file, val, 0);
7186 fprintf (dump_file, "\n");
7187 }
7188
7189 if (is_gimple_assign (stmt))
7190 gimple_assign_set_rhs_from_tree (si, val);
7191 else
7192 {
7193 gcc_assert (gimple_code (stmt) == GIMPLE_COND);
7194 if (integer_zerop (val))
7195 gimple_cond_make_false (stmt);
7196 else if (integer_onep (val))
7197 gimple_cond_make_true (stmt);
7198 else
7199 gcc_unreachable ();
7200 }
7201
7202 return true;
7203 }
7204
7205 return false;
7206 }
7207
7208 /* Callback for substitute_and_fold folding the stmt at *SI. */
7209
7210 static bool
7211 vrp_fold_stmt (gimple_stmt_iterator *si)
7212 {
7213 if (fold_predicate_in (si))
7214 return true;
7215
7216 return simplify_stmt_using_ranges (si);
7217 }
7218
7219 /* Stack of dest,src equivalency pairs that need to be restored after
7220 each attempt to thread a block's incoming edge to an outgoing edge.
7221
7222 A NULL entry is used to mark the end of pairs which need to be
7223 restored. */
7224 static VEC(tree,heap) *stack;
7225
7226 /* A trivial wrapper so that we can present the generic jump threading
7227 code with a simple API for simplifying statements. STMT is the
7228 statement we want to simplify, WITHIN_STMT provides the location
7229 for any overflow warnings. */
7230
7231 static tree
7232 simplify_stmt_for_jump_threading (gimple stmt, gimple within_stmt)
7233 {
7234 /* We only use VRP information to simplify conditionals. This is
7235 overly conservative, but it's unclear if doing more would be
7236 worth the compile time cost. */
7237 if (gimple_code (stmt) != GIMPLE_COND)
7238 return NULL;
7239
7240 return vrp_evaluate_conditional (gimple_cond_code (stmt),
7241 gimple_cond_lhs (stmt),
7242 gimple_cond_rhs (stmt), within_stmt);
7243 }
7244
7245 /* Blocks which have more than one predecessor and more than
7246 one successor present jump threading opportunities, i.e.,
7247 when the block is reached from a specific predecessor, we
7248 may be able to determine which of the outgoing edges will
7249 be traversed. When this optimization applies, we are able
7250 to avoid conditionals at runtime and we may expose secondary
7251 optimization opportunities.
7252
7253 This routine is effectively a driver for the generic jump
7254 threading code. It basically just presents the generic code
7255 with edges that may be suitable for jump threading.
7256
7257 Unlike DOM, we do not iterate VRP if jump threading was successful.
7258 While iterating may expose new opportunities for VRP, it is expected
7259 those opportunities would be very limited and the compile time cost
7260 to expose those opportunities would be significant.
7261
7262 As jump threading opportunities are discovered, they are registered
7263 for later realization. */
7264
7265 static void
7266 identify_jump_threads (void)
7267 {
7268 basic_block bb;
7269 gimple dummy;
7270 int i;
7271 edge e;
7272
7273 /* Ugh. When substituting values earlier in this pass we can
7274 wipe the dominance information. So rebuild the dominator
7275 information as we need it within the jump threading code. */
7276 calculate_dominance_info (CDI_DOMINATORS);
7277
7278 /* We do not allow VRP information to be used for jump threading
7279 across a back edge in the CFG. Otherwise it becomes too
7280 difficult to avoid eliminating loop exit tests. Of course
7281 EDGE_DFS_BACK is not accurate at this time so we have to
7282 recompute it. */
7283 mark_dfs_back_edges ();
7284
7285 /* Do not thread across edges we are about to remove. Just marking
7286 them as EDGE_DFS_BACK will do. */
7287 for (i = 0; VEC_iterate (edge, to_remove_edges, i, e); ++i)
7288 e->flags |= EDGE_DFS_BACK;
7289
7290 /* Allocate our unwinder stack to unwind any temporary equivalences
7291 that might be recorded. */
7292 stack = VEC_alloc (tree, heap, 20);
7293
7294 /* To avoid lots of silly node creation, we create a single
7295 conditional and just modify it in-place when attempting to
7296 thread jumps. */
7297 dummy = gimple_build_cond (EQ_EXPR,
7298 integer_zero_node, integer_zero_node,
7299 NULL, NULL);
7300
7301 /* Walk through all the blocks finding those which present a
7302 potential jump threading opportunity. We could set this up
7303 as a dominator walker and record data during the walk, but
7304 I doubt it's worth the effort for the classes of jump
7305 threading opportunities we are trying to identify at this
7306 point in compilation. */
7307 FOR_EACH_BB (bb)
7308 {
7309 gimple last;
7310
7311 /* If the generic jump threading code does not find this block
7312 interesting, then there is nothing to do. */
7313 if (! potentially_threadable_block (bb))
7314 continue;
7315
7316 /* We only care about blocks ending in a COND_EXPR. While there
7317 may be some value in handling SWITCH_EXPR here, I doubt it's
7318 terribly important. */
7319 last = gsi_stmt (gsi_last_bb (bb));
7320 if (gimple_code (last) != GIMPLE_COND)
7321 continue;
7322
7323 /* We're basically looking for any kind of conditional with
7324 integral type arguments. */
7325 if (TREE_CODE (gimple_cond_lhs (last)) == SSA_NAME
7326 && INTEGRAL_TYPE_P (TREE_TYPE (gimple_cond_lhs (last)))
7327 && (TREE_CODE (gimple_cond_rhs (last)) == SSA_NAME
7328 || is_gimple_min_invariant (gimple_cond_rhs (last)))
7329 && INTEGRAL_TYPE_P (TREE_TYPE (gimple_cond_rhs (last))))
7330 {
7331 edge_iterator ei;
7332
7333 /* We've got a block with multiple predecessors and multiple
7334 successors which also ends in a suitable conditional. For
7335 each predecessor, see if we can thread it to a specific
7336 successor. */
7337 FOR_EACH_EDGE (e, ei, bb->preds)
7338 {
7339 /* Do not thread across back edges or abnormal edges
7340 in the CFG. */
7341 if (e->flags & (EDGE_DFS_BACK | EDGE_COMPLEX))
7342 continue;
7343
7344 thread_across_edge (dummy, e, true, &stack,
7345 simplify_stmt_for_jump_threading);
7346 }
7347 }
7348 }
7349
7350 /* We do not actually update the CFG or SSA graphs at this point as
7351 ASSERT_EXPRs are still in the IL and cfg cleanup code does not yet
7352 handle ASSERT_EXPRs gracefully. */
7353 }
7354
7355 /* We identified all the jump threading opportunities earlier, but could
7356 not transform the CFG at that time. This routine transforms the
7357 CFG and arranges for the dominator tree to be rebuilt if necessary.
7358
7359 Note the SSA graph update will occur during the normal TODO
7360 processing by the pass manager. */
7361 static void
7362 finalize_jump_threads (void)
7363 {
7364 thread_through_all_blocks (false);
7365 VEC_free (tree, heap, stack);
7366 }
7367
7368
7369 /* Traverse all the blocks folding conditionals with known ranges. */
7370
7371 static void
7372 vrp_finalize (void)
7373 {
7374 size_t i;
7375 prop_value_t *single_val_range;
7376 bool do_value_subst_p;
7377 unsigned num = num_ssa_names;
7378
7379 if (dump_file)
7380 {
7381 fprintf (dump_file, "\nValue ranges after VRP:\n\n");
7382 dump_all_value_ranges (dump_file);
7383 fprintf (dump_file, "\n");
7384 }
7385
7386 /* We may have ended with ranges that have exactly one value. Those
7387 values can be substituted as any other const propagated
7388 value using substitute_and_fold. */
7389 single_val_range = XCNEWVEC (prop_value_t, num);
7390
7391 do_value_subst_p = false;
7392 for (i = 0; i < num; i++)
7393 if (vr_value[i]
7394 && vr_value[i]->type == VR_RANGE
7395 && vr_value[i]->min == vr_value[i]->max
7396 && is_gimple_min_invariant (vr_value[i]->min))
7397 {
7398 single_val_range[i].value = vr_value[i]->min;
7399 do_value_subst_p = true;
7400 }
7401
7402 if (!do_value_subst_p)
7403 {
7404 /* We found no single-valued ranges, don't waste time trying to
7405 do single value substitution in substitute_and_fold. */
7406 free (single_val_range);
7407 single_val_range = NULL;
7408 }
7409
7410 substitute_and_fold (single_val_range, vrp_fold_stmt, false);
7411
7412 if (warn_array_bounds)
7413 check_all_array_refs ();
7414
7415 /* We must identify jump threading opportunities before we release
7416 the datastructures built by VRP. */
7417 identify_jump_threads ();
7418
7419 /* Free allocated memory. */
7420 for (i = 0; i < num; i++)
7421 if (vr_value[i])
7422 {
7423 BITMAP_FREE (vr_value[i]->equiv);
7424 free (vr_value[i]);
7425 }
7426
7427 free (single_val_range);
7428 free (vr_value);
7429 free (vr_phi_edge_counts);
7430
7431 /* So that we can distinguish between VRP data being available
7432 and not available. */
7433 vr_value = NULL;
7434 vr_phi_edge_counts = NULL;
7435 }
7436
7437
7438 /* Main entry point to VRP (Value Range Propagation). This pass is
7439 loosely based on J. R. C. Patterson, ``Accurate Static Branch
7440 Prediction by Value Range Propagation,'' in SIGPLAN Conference on
7441 Programming Language Design and Implementation, pp. 67-78, 1995.
7442 Also available at http://citeseer.ist.psu.edu/patterson95accurate.html
7443
7444 This is essentially an SSA-CCP pass modified to deal with ranges
7445 instead of constants.
7446
7447 While propagating ranges, we may find that two or more SSA name
7448 have equivalent, though distinct ranges. For instance,
7449
7450 1 x_9 = p_3->a;
7451 2 p_4 = ASSERT_EXPR <p_3, p_3 != 0>
7452 3 if (p_4 == q_2)
7453 4 p_5 = ASSERT_EXPR <p_4, p_4 == q_2>;
7454 5 endif
7455 6 if (q_2)
7456
7457 In the code above, pointer p_5 has range [q_2, q_2], but from the
7458 code we can also determine that p_5 cannot be NULL and, if q_2 had
7459 a non-varying range, p_5's range should also be compatible with it.
7460
7461 These equivalences are created by two expressions: ASSERT_EXPR and
7462 copy operations. Since p_5 is an assertion on p_4, and p_4 was the
7463 result of another assertion, then we can use the fact that p_5 and
7464 p_4 are equivalent when evaluating p_5's range.
7465
7466 Together with value ranges, we also propagate these equivalences
7467 between names so that we can take advantage of information from
7468 multiple ranges when doing final replacement. Note that this
7469 equivalency relation is transitive but not symmetric.
7470
7471 In the example above, p_5 is equivalent to p_4, q_2 and p_3, but we
7472 cannot assert that q_2 is equivalent to p_5 because q_2 may be used
7473 in contexts where that assertion does not hold (e.g., in line 6).
7474
7475 TODO, the main difference between this pass and Patterson's is that
7476 we do not propagate edge probabilities. We only compute whether
7477 edges can be taken or not. That is, instead of having a spectrum
7478 of jump probabilities between 0 and 1, we only deal with 0, 1 and
7479 DON'T KNOW. In the future, it may be worthwhile to propagate
7480 probabilities to aid branch prediction. */
7481
7482 static unsigned int
7483 execute_vrp (void)
7484 {
7485 int i;
7486 edge e;
7487 switch_update *su;
7488
7489 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
7490 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
7491 scev_initialize ();
7492
7493 insert_range_assertions ();
7494
7495 to_remove_edges = VEC_alloc (edge, heap, 10);
7496 to_update_switch_stmts = VEC_alloc (switch_update, heap, 5);
7497 threadedge_initialize_values ();
7498
7499 vrp_initialize ();
7500 ssa_propagate (vrp_visit_stmt, vrp_visit_phi_node);
7501 vrp_finalize ();
7502
7503 /* ASSERT_EXPRs must be removed before finalizing jump threads
7504 as finalizing jump threads calls the CFG cleanup code which
7505 does not properly handle ASSERT_EXPRs. */
7506 remove_range_assertions ();
7507
7508 /* If we exposed any new variables, go ahead and put them into
7509 SSA form now, before we handle jump threading. This simplifies
7510 interactions between rewriting of _DECL nodes into SSA form
7511 and rewriting SSA_NAME nodes into SSA form after block
7512 duplication and CFG manipulation. */
7513 update_ssa (TODO_update_ssa);
7514
7515 finalize_jump_threads ();
7516
7517 /* Remove dead edges from SWITCH_EXPR optimization. This leaves the
7518 CFG in a broken state and requires a cfg_cleanup run. */
7519 for (i = 0; VEC_iterate (edge, to_remove_edges, i, e); ++i)
7520 remove_edge (e);
7521 /* Update SWITCH_EXPR case label vector. */
7522 for (i = 0; VEC_iterate (switch_update, to_update_switch_stmts, i, su); ++i)
7523 {
7524 size_t j;
7525 size_t n = TREE_VEC_LENGTH (su->vec);
7526 tree label;
7527 gimple_switch_set_num_labels (su->stmt, n);
7528 for (j = 0; j < n; j++)
7529 gimple_switch_set_label (su->stmt, j, TREE_VEC_ELT (su->vec, j));
7530 /* As we may have replaced the default label with a regular one
7531 make sure to make it a real default label again. This ensures
7532 optimal expansion. */
7533 label = gimple_switch_default_label (su->stmt);
7534 CASE_LOW (label) = NULL_TREE;
7535 CASE_HIGH (label) = NULL_TREE;
7536 }
7537
7538 if (VEC_length (edge, to_remove_edges) > 0)
7539 free_dominance_info (CDI_DOMINATORS);
7540
7541 VEC_free (edge, heap, to_remove_edges);
7542 VEC_free (switch_update, heap, to_update_switch_stmts);
7543 threadedge_finalize_values ();
7544
7545 scev_finalize ();
7546 loop_optimizer_finalize ();
7547 return 0;
7548 }
7549
7550 static bool
7551 gate_vrp (void)
7552 {
7553 return flag_tree_vrp != 0;
7554 }
7555
7556 struct gimple_opt_pass pass_vrp =
7557 {
7558 {
7559 GIMPLE_PASS,
7560 "vrp", /* name */
7561 gate_vrp, /* gate */
7562 execute_vrp, /* execute */
7563 NULL, /* sub */
7564 NULL, /* next */
7565 0, /* static_pass_number */
7566 TV_TREE_VRP, /* tv_id */
7567 PROP_ssa, /* properties_required */
7568 0, /* properties_provided */
7569 0, /* properties_destroyed */
7570 0, /* todo_flags_start */
7571 TODO_cleanup_cfg
7572 | TODO_ggc_collect
7573 | TODO_verify_ssa
7574 | TODO_dump_func
7575 | TODO_update_ssa /* todo_flags_finish */
7576 }
7577 };