re PR tree-optimization/43833 (false warning: array subscript is above array bounds...
[gcc.git] / gcc / tree-vrp.c
1 /* Support routines for Value Range Propagation (VRP).
2 Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
4 Contributed by Diego Novillo <dnovillo@redhat.com>.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
12
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "ggc.h"
27 #include "flags.h"
28 #include "tree.h"
29 #include "basic-block.h"
30 #include "tree-flow.h"
31 #include "tree-pass.h"
32 #include "tree-dump.h"
33 #include "timevar.h"
34 #include "diagnostic.h"
35 #include "toplev.h"
36 #include "intl.h"
37 #include "cfgloop.h"
38 #include "tree-scalar-evolution.h"
39 #include "tree-ssa-propagate.h"
40 #include "tree-chrec.h"
41
42
43 /* Set of SSA names found live during the RPO traversal of the function
44 for still active basic-blocks. */
45 static sbitmap *live;
46
47 /* Return true if the SSA name NAME is live on the edge E. */
48
49 static bool
50 live_on_edge (edge e, tree name)
51 {
52 return (live[e->dest->index]
53 && TEST_BIT (live[e->dest->index], SSA_NAME_VERSION (name)));
54 }
55
56 /* Local functions. */
57 static int compare_values (tree val1, tree val2);
58 static int compare_values_warnv (tree val1, tree val2, bool *);
59 static void vrp_meet (value_range_t *, value_range_t *);
60 static tree vrp_evaluate_conditional_warnv_with_ops (enum tree_code,
61 tree, tree, bool, bool *,
62 bool *);
63
64 /* Location information for ASSERT_EXPRs. Each instance of this
65 structure describes an ASSERT_EXPR for an SSA name. Since a single
66 SSA name may have more than one assertion associated with it, these
67 locations are kept in a linked list attached to the corresponding
68 SSA name. */
69 struct assert_locus_d
70 {
71 /* Basic block where the assertion would be inserted. */
72 basic_block bb;
73
74 /* Some assertions need to be inserted on an edge (e.g., assertions
75 generated by COND_EXPRs). In those cases, BB will be NULL. */
76 edge e;
77
78 /* Pointer to the statement that generated this assertion. */
79 gimple_stmt_iterator si;
80
81 /* Predicate code for the ASSERT_EXPR. Must be COMPARISON_CLASS_P. */
82 enum tree_code comp_code;
83
84 /* Value being compared against. */
85 tree val;
86
87 /* Expression to compare. */
88 tree expr;
89
90 /* Next node in the linked list. */
91 struct assert_locus_d *next;
92 };
93
94 typedef struct assert_locus_d *assert_locus_t;
95
96 /* If bit I is present, it means that SSA name N_i has a list of
97 assertions that should be inserted in the IL. */
98 static bitmap need_assert_for;
99
100 /* Array of locations lists where to insert assertions. ASSERTS_FOR[I]
101 holds a list of ASSERT_LOCUS_T nodes that describe where
102 ASSERT_EXPRs for SSA name N_I should be inserted. */
103 static assert_locus_t *asserts_for;
104
105 /* Value range array. After propagation, VR_VALUE[I] holds the range
106 of values that SSA name N_I may take. */
107 static value_range_t **vr_value;
108
109 /* For a PHI node which sets SSA name N_I, VR_COUNTS[I] holds the
110 number of executable edges we saw the last time we visited the
111 node. */
112 static int *vr_phi_edge_counts;
113
114 typedef struct {
115 gimple stmt;
116 tree vec;
117 } switch_update;
118
119 static VEC (edge, heap) *to_remove_edges;
120 DEF_VEC_O(switch_update);
121 DEF_VEC_ALLOC_O(switch_update, heap);
122 static VEC (switch_update, heap) *to_update_switch_stmts;
123
124
125 /* Return the maximum value for TYPE. */
126
127 static inline tree
128 vrp_val_max (const_tree type)
129 {
130 if (!INTEGRAL_TYPE_P (type))
131 return NULL_TREE;
132
133 return TYPE_MAX_VALUE (type);
134 }
135
136 /* Return the minimum value for TYPE. */
137
138 static inline tree
139 vrp_val_min (const_tree type)
140 {
141 if (!INTEGRAL_TYPE_P (type))
142 return NULL_TREE;
143
144 return TYPE_MIN_VALUE (type);
145 }
146
147 /* Return whether VAL is equal to the maximum value of its type. This
148 will be true for a positive overflow infinity. We can't do a
149 simple equality comparison with TYPE_MAX_VALUE because C typedefs
150 and Ada subtypes can produce types whose TYPE_MAX_VALUE is not ==
151 to the integer constant with the same value in the type. */
152
153 static inline bool
154 vrp_val_is_max (const_tree val)
155 {
156 tree type_max = vrp_val_max (TREE_TYPE (val));
157 return (val == type_max
158 || (type_max != NULL_TREE
159 && operand_equal_p (val, type_max, 0)));
160 }
161
162 /* Return whether VAL is equal to the minimum value of its type. This
163 will be true for a negative overflow infinity. */
164
165 static inline bool
166 vrp_val_is_min (const_tree val)
167 {
168 tree type_min = vrp_val_min (TREE_TYPE (val));
169 return (val == type_min
170 || (type_min != NULL_TREE
171 && operand_equal_p (val, type_min, 0)));
172 }
173
174
175 /* Return whether TYPE should use an overflow infinity distinct from
176 TYPE_{MIN,MAX}_VALUE. We use an overflow infinity value to
177 represent a signed overflow during VRP computations. An infinity
178 is distinct from a half-range, which will go from some number to
179 TYPE_{MIN,MAX}_VALUE. */
180
181 static inline bool
182 needs_overflow_infinity (const_tree type)
183 {
184 return INTEGRAL_TYPE_P (type) && !TYPE_OVERFLOW_WRAPS (type);
185 }
186
187 /* Return whether TYPE can support our overflow infinity
188 representation: we use the TREE_OVERFLOW flag, which only exists
189 for constants. If TYPE doesn't support this, we don't optimize
190 cases which would require signed overflow--we drop them to
191 VARYING. */
192
193 static inline bool
194 supports_overflow_infinity (const_tree type)
195 {
196 tree min = vrp_val_min (type), max = vrp_val_max (type);
197 #ifdef ENABLE_CHECKING
198 gcc_assert (needs_overflow_infinity (type));
199 #endif
200 return (min != NULL_TREE
201 && CONSTANT_CLASS_P (min)
202 && max != NULL_TREE
203 && CONSTANT_CLASS_P (max));
204 }
205
206 /* VAL is the maximum or minimum value of a type. Return a
207 corresponding overflow infinity. */
208
209 static inline tree
210 make_overflow_infinity (tree val)
211 {
212 #ifdef ENABLE_CHECKING
213 gcc_assert (val != NULL_TREE && CONSTANT_CLASS_P (val));
214 #endif
215 val = copy_node (val);
216 TREE_OVERFLOW (val) = 1;
217 return val;
218 }
219
220 /* Return a negative overflow infinity for TYPE. */
221
222 static inline tree
223 negative_overflow_infinity (tree type)
224 {
225 #ifdef ENABLE_CHECKING
226 gcc_assert (supports_overflow_infinity (type));
227 #endif
228 return make_overflow_infinity (vrp_val_min (type));
229 }
230
231 /* Return a positive overflow infinity for TYPE. */
232
233 static inline tree
234 positive_overflow_infinity (tree type)
235 {
236 #ifdef ENABLE_CHECKING
237 gcc_assert (supports_overflow_infinity (type));
238 #endif
239 return make_overflow_infinity (vrp_val_max (type));
240 }
241
242 /* Return whether VAL is a negative overflow infinity. */
243
244 static inline bool
245 is_negative_overflow_infinity (const_tree val)
246 {
247 return (needs_overflow_infinity (TREE_TYPE (val))
248 && CONSTANT_CLASS_P (val)
249 && TREE_OVERFLOW (val)
250 && vrp_val_is_min (val));
251 }
252
253 /* Return whether VAL is a positive overflow infinity. */
254
255 static inline bool
256 is_positive_overflow_infinity (const_tree val)
257 {
258 return (needs_overflow_infinity (TREE_TYPE (val))
259 && CONSTANT_CLASS_P (val)
260 && TREE_OVERFLOW (val)
261 && vrp_val_is_max (val));
262 }
263
264 /* Return whether VAL is a positive or negative overflow infinity. */
265
266 static inline bool
267 is_overflow_infinity (const_tree val)
268 {
269 return (needs_overflow_infinity (TREE_TYPE (val))
270 && CONSTANT_CLASS_P (val)
271 && TREE_OVERFLOW (val)
272 && (vrp_val_is_min (val) || vrp_val_is_max (val)));
273 }
274
275 /* Return whether STMT has a constant rhs that is_overflow_infinity. */
276
277 static inline bool
278 stmt_overflow_infinity (gimple stmt)
279 {
280 if (is_gimple_assign (stmt)
281 && get_gimple_rhs_class (gimple_assign_rhs_code (stmt)) ==
282 GIMPLE_SINGLE_RHS)
283 return is_overflow_infinity (gimple_assign_rhs1 (stmt));
284 return false;
285 }
286
287 /* If VAL is now an overflow infinity, return VAL. Otherwise, return
288 the same value with TREE_OVERFLOW clear. This can be used to avoid
289 confusing a regular value with an overflow value. */
290
291 static inline tree
292 avoid_overflow_infinity (tree val)
293 {
294 if (!is_overflow_infinity (val))
295 return val;
296
297 if (vrp_val_is_max (val))
298 return vrp_val_max (TREE_TYPE (val));
299 else
300 {
301 #ifdef ENABLE_CHECKING
302 gcc_assert (vrp_val_is_min (val));
303 #endif
304 return vrp_val_min (TREE_TYPE (val));
305 }
306 }
307
308
309 /* Return true if ARG is marked with the nonnull attribute in the
310 current function signature. */
311
312 static bool
313 nonnull_arg_p (const_tree arg)
314 {
315 tree t, attrs, fntype;
316 unsigned HOST_WIDE_INT arg_num;
317
318 gcc_assert (TREE_CODE (arg) == PARM_DECL && POINTER_TYPE_P (TREE_TYPE (arg)));
319
320 /* The static chain decl is always non null. */
321 if (arg == cfun->static_chain_decl)
322 return true;
323
324 fntype = TREE_TYPE (current_function_decl);
325 attrs = lookup_attribute ("nonnull", TYPE_ATTRIBUTES (fntype));
326
327 /* If "nonnull" wasn't specified, we know nothing about the argument. */
328 if (attrs == NULL_TREE)
329 return false;
330
331 /* If "nonnull" applies to all the arguments, then ARG is non-null. */
332 if (TREE_VALUE (attrs) == NULL_TREE)
333 return true;
334
335 /* Get the position number for ARG in the function signature. */
336 for (arg_num = 1, t = DECL_ARGUMENTS (current_function_decl);
337 t;
338 t = TREE_CHAIN (t), arg_num++)
339 {
340 if (t == arg)
341 break;
342 }
343
344 gcc_assert (t == arg);
345
346 /* Now see if ARG_NUM is mentioned in the nonnull list. */
347 for (t = TREE_VALUE (attrs); t; t = TREE_CHAIN (t))
348 {
349 if (compare_tree_int (TREE_VALUE (t), arg_num) == 0)
350 return true;
351 }
352
353 return false;
354 }
355
356
357 /* Set value range VR to VR_VARYING. */
358
359 static inline void
360 set_value_range_to_varying (value_range_t *vr)
361 {
362 vr->type = VR_VARYING;
363 vr->min = vr->max = NULL_TREE;
364 if (vr->equiv)
365 bitmap_clear (vr->equiv);
366 }
367
368
369 /* Set value range VR to {T, MIN, MAX, EQUIV}. */
370
371 static void
372 set_value_range (value_range_t *vr, enum value_range_type t, tree min,
373 tree max, bitmap equiv)
374 {
375 #if defined ENABLE_CHECKING
376 /* Check the validity of the range. */
377 if (t == VR_RANGE || t == VR_ANTI_RANGE)
378 {
379 int cmp;
380
381 gcc_assert (min && max);
382
383 if (INTEGRAL_TYPE_P (TREE_TYPE (min)) && t == VR_ANTI_RANGE)
384 gcc_assert (!vrp_val_is_min (min) || !vrp_val_is_max (max));
385
386 cmp = compare_values (min, max);
387 gcc_assert (cmp == 0 || cmp == -1 || cmp == -2);
388
389 if (needs_overflow_infinity (TREE_TYPE (min)))
390 gcc_assert (!is_overflow_infinity (min)
391 || !is_overflow_infinity (max));
392 }
393
394 if (t == VR_UNDEFINED || t == VR_VARYING)
395 gcc_assert (min == NULL_TREE && max == NULL_TREE);
396
397 if (t == VR_UNDEFINED || t == VR_VARYING)
398 gcc_assert (equiv == NULL || bitmap_empty_p (equiv));
399 #endif
400
401 vr->type = t;
402 vr->min = min;
403 vr->max = max;
404
405 /* Since updating the equivalence set involves deep copying the
406 bitmaps, only do it if absolutely necessary. */
407 if (vr->equiv == NULL
408 && equiv != NULL)
409 vr->equiv = BITMAP_ALLOC (NULL);
410
411 if (equiv != vr->equiv)
412 {
413 if (equiv && !bitmap_empty_p (equiv))
414 bitmap_copy (vr->equiv, equiv);
415 else
416 bitmap_clear (vr->equiv);
417 }
418 }
419
420
421 /* Set value range VR to the canonical form of {T, MIN, MAX, EQUIV}.
422 This means adjusting T, MIN and MAX representing the case of a
423 wrapping range with MAX < MIN covering [MIN, type_max] U [type_min, MAX]
424 as anti-rage ~[MAX+1, MIN-1]. Likewise for wrapping anti-ranges.
425 In corner cases where MAX+1 or MIN-1 wraps this will fall back
426 to varying.
427 This routine exists to ease canonicalization in the case where we
428 extract ranges from var + CST op limit. */
429
430 static void
431 set_and_canonicalize_value_range (value_range_t *vr, enum value_range_type t,
432 tree min, tree max, bitmap equiv)
433 {
434 /* Nothing to canonicalize for symbolic or unknown or varying ranges. */
435 if ((t != VR_RANGE
436 && t != VR_ANTI_RANGE)
437 || TREE_CODE (min) != INTEGER_CST
438 || TREE_CODE (max) != INTEGER_CST)
439 {
440 set_value_range (vr, t, min, max, equiv);
441 return;
442 }
443
444 /* Wrong order for min and max, to swap them and the VR type we need
445 to adjust them. */
446 if (tree_int_cst_lt (max, min))
447 {
448 tree one = build_int_cst (TREE_TYPE (min), 1);
449 tree tmp = int_const_binop (PLUS_EXPR, max, one, 0);
450 max = int_const_binop (MINUS_EXPR, min, one, 0);
451 min = tmp;
452
453 /* There's one corner case, if we had [C+1, C] before we now have
454 that again. But this represents an empty value range, so drop
455 to varying in this case. */
456 if (tree_int_cst_lt (max, min))
457 {
458 set_value_range_to_varying (vr);
459 return;
460 }
461
462 t = t == VR_RANGE ? VR_ANTI_RANGE : VR_RANGE;
463 }
464
465 /* Anti-ranges that can be represented as ranges should be so. */
466 if (t == VR_ANTI_RANGE)
467 {
468 bool is_min = vrp_val_is_min (min);
469 bool is_max = vrp_val_is_max (max);
470
471 if (is_min && is_max)
472 {
473 /* We cannot deal with empty ranges, drop to varying. */
474 set_value_range_to_varying (vr);
475 return;
476 }
477 else if (is_min
478 /* As a special exception preserve non-null ranges. */
479 && !(TYPE_UNSIGNED (TREE_TYPE (min))
480 && integer_zerop (max)))
481 {
482 tree one = build_int_cst (TREE_TYPE (max), 1);
483 min = int_const_binop (PLUS_EXPR, max, one, 0);
484 max = vrp_val_max (TREE_TYPE (max));
485 t = VR_RANGE;
486 }
487 else if (is_max)
488 {
489 tree one = build_int_cst (TREE_TYPE (min), 1);
490 max = int_const_binop (MINUS_EXPR, min, one, 0);
491 min = vrp_val_min (TREE_TYPE (min));
492 t = VR_RANGE;
493 }
494 }
495
496 set_value_range (vr, t, min, max, equiv);
497 }
498
499 /* Copy value range FROM into value range TO. */
500
501 static inline void
502 copy_value_range (value_range_t *to, value_range_t *from)
503 {
504 set_value_range (to, from->type, from->min, from->max, from->equiv);
505 }
506
507 /* Set value range VR to a single value. This function is only called
508 with values we get from statements, and exists to clear the
509 TREE_OVERFLOW flag so that we don't think we have an overflow
510 infinity when we shouldn't. */
511
512 static inline void
513 set_value_range_to_value (value_range_t *vr, tree val, bitmap equiv)
514 {
515 gcc_assert (is_gimple_min_invariant (val));
516 val = avoid_overflow_infinity (val);
517 set_value_range (vr, VR_RANGE, val, val, equiv);
518 }
519
520 /* Set value range VR to a non-negative range of type TYPE.
521 OVERFLOW_INFINITY indicates whether to use an overflow infinity
522 rather than TYPE_MAX_VALUE; this should be true if we determine
523 that the range is nonnegative based on the assumption that signed
524 overflow does not occur. */
525
526 static inline void
527 set_value_range_to_nonnegative (value_range_t *vr, tree type,
528 bool overflow_infinity)
529 {
530 tree zero;
531
532 if (overflow_infinity && !supports_overflow_infinity (type))
533 {
534 set_value_range_to_varying (vr);
535 return;
536 }
537
538 zero = build_int_cst (type, 0);
539 set_value_range (vr, VR_RANGE, zero,
540 (overflow_infinity
541 ? positive_overflow_infinity (type)
542 : TYPE_MAX_VALUE (type)),
543 vr->equiv);
544 }
545
546 /* Set value range VR to a non-NULL range of type TYPE. */
547
548 static inline void
549 set_value_range_to_nonnull (value_range_t *vr, tree type)
550 {
551 tree zero = build_int_cst (type, 0);
552 set_value_range (vr, VR_ANTI_RANGE, zero, zero, vr->equiv);
553 }
554
555
556 /* Set value range VR to a NULL range of type TYPE. */
557
558 static inline void
559 set_value_range_to_null (value_range_t *vr, tree type)
560 {
561 set_value_range_to_value (vr, build_int_cst (type, 0), vr->equiv);
562 }
563
564
565 /* Set value range VR to a range of a truthvalue of type TYPE. */
566
567 static inline void
568 set_value_range_to_truthvalue (value_range_t *vr, tree type)
569 {
570 if (TYPE_PRECISION (type) == 1)
571 set_value_range_to_varying (vr);
572 else
573 set_value_range (vr, VR_RANGE,
574 build_int_cst (type, 0), build_int_cst (type, 1),
575 vr->equiv);
576 }
577
578
579 /* Set value range VR to VR_UNDEFINED. */
580
581 static inline void
582 set_value_range_to_undefined (value_range_t *vr)
583 {
584 vr->type = VR_UNDEFINED;
585 vr->min = vr->max = NULL_TREE;
586 if (vr->equiv)
587 bitmap_clear (vr->equiv);
588 }
589
590
591 /* If abs (min) < abs (max), set VR to [-max, max], if
592 abs (min) >= abs (max), set VR to [-min, min]. */
593
594 static void
595 abs_extent_range (value_range_t *vr, tree min, tree max)
596 {
597 int cmp;
598
599 gcc_assert (TREE_CODE (min) == INTEGER_CST);
600 gcc_assert (TREE_CODE (max) == INTEGER_CST);
601 gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (min)));
602 gcc_assert (!TYPE_UNSIGNED (TREE_TYPE (min)));
603 min = fold_unary (ABS_EXPR, TREE_TYPE (min), min);
604 max = fold_unary (ABS_EXPR, TREE_TYPE (max), max);
605 if (TREE_OVERFLOW (min) || TREE_OVERFLOW (max))
606 {
607 set_value_range_to_varying (vr);
608 return;
609 }
610 cmp = compare_values (min, max);
611 if (cmp == -1)
612 min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), max);
613 else if (cmp == 0 || cmp == 1)
614 {
615 max = min;
616 min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), min);
617 }
618 else
619 {
620 set_value_range_to_varying (vr);
621 return;
622 }
623 set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL);
624 }
625
626
627 /* Return value range information for VAR.
628
629 If we have no values ranges recorded (ie, VRP is not running), then
630 return NULL. Otherwise create an empty range if none existed for VAR. */
631
632 static value_range_t *
633 get_value_range (const_tree var)
634 {
635 value_range_t *vr;
636 tree sym;
637 unsigned ver = SSA_NAME_VERSION (var);
638
639 /* If we have no recorded ranges, then return NULL. */
640 if (! vr_value)
641 return NULL;
642
643 vr = vr_value[ver];
644 if (vr)
645 return vr;
646
647 /* Create a default value range. */
648 vr_value[ver] = vr = XCNEW (value_range_t);
649
650 /* Defer allocating the equivalence set. */
651 vr->equiv = NULL;
652
653 /* If VAR is a default definition, the variable can take any value
654 in VAR's type. */
655 sym = SSA_NAME_VAR (var);
656 if (SSA_NAME_IS_DEFAULT_DEF (var))
657 {
658 /* Try to use the "nonnull" attribute to create ~[0, 0]
659 anti-ranges for pointers. Note that this is only valid with
660 default definitions of PARM_DECLs. */
661 if (TREE_CODE (sym) == PARM_DECL
662 && POINTER_TYPE_P (TREE_TYPE (sym))
663 && nonnull_arg_p (sym))
664 set_value_range_to_nonnull (vr, TREE_TYPE (sym));
665 else
666 set_value_range_to_varying (vr);
667 }
668
669 return vr;
670 }
671
672 /* Return true, if VAL1 and VAL2 are equal values for VRP purposes. */
673
674 static inline bool
675 vrp_operand_equal_p (const_tree val1, const_tree val2)
676 {
677 if (val1 == val2)
678 return true;
679 if (!val1 || !val2 || !operand_equal_p (val1, val2, 0))
680 return false;
681 if (is_overflow_infinity (val1))
682 return is_overflow_infinity (val2);
683 return true;
684 }
685
686 /* Return true, if the bitmaps B1 and B2 are equal. */
687
688 static inline bool
689 vrp_bitmap_equal_p (const_bitmap b1, const_bitmap b2)
690 {
691 return (b1 == b2
692 || (b1 && b2
693 && bitmap_equal_p (b1, b2)));
694 }
695
696 /* Update the value range and equivalence set for variable VAR to
697 NEW_VR. Return true if NEW_VR is different from VAR's previous
698 value.
699
700 NOTE: This function assumes that NEW_VR is a temporary value range
701 object created for the sole purpose of updating VAR's range. The
702 storage used by the equivalence set from NEW_VR will be freed by
703 this function. Do not call update_value_range when NEW_VR
704 is the range object associated with another SSA name. */
705
706 static inline bool
707 update_value_range (const_tree var, value_range_t *new_vr)
708 {
709 value_range_t *old_vr;
710 bool is_new;
711
712 /* Update the value range, if necessary. */
713 old_vr = get_value_range (var);
714 is_new = old_vr->type != new_vr->type
715 || !vrp_operand_equal_p (old_vr->min, new_vr->min)
716 || !vrp_operand_equal_p (old_vr->max, new_vr->max)
717 || !vrp_bitmap_equal_p (old_vr->equiv, new_vr->equiv);
718
719 if (is_new)
720 set_value_range (old_vr, new_vr->type, new_vr->min, new_vr->max,
721 new_vr->equiv);
722
723 BITMAP_FREE (new_vr->equiv);
724
725 return is_new;
726 }
727
728
729 /* Add VAR and VAR's equivalence set to EQUIV. This is the central
730 point where equivalence processing can be turned on/off. */
731
732 static void
733 add_equivalence (bitmap *equiv, const_tree var)
734 {
735 unsigned ver = SSA_NAME_VERSION (var);
736 value_range_t *vr = vr_value[ver];
737
738 if (*equiv == NULL)
739 *equiv = BITMAP_ALLOC (NULL);
740 bitmap_set_bit (*equiv, ver);
741 if (vr && vr->equiv)
742 bitmap_ior_into (*equiv, vr->equiv);
743 }
744
745
746 /* Return true if VR is ~[0, 0]. */
747
748 static inline bool
749 range_is_nonnull (value_range_t *vr)
750 {
751 return vr->type == VR_ANTI_RANGE
752 && integer_zerop (vr->min)
753 && integer_zerop (vr->max);
754 }
755
756
757 /* Return true if VR is [0, 0]. */
758
759 static inline bool
760 range_is_null (value_range_t *vr)
761 {
762 return vr->type == VR_RANGE
763 && integer_zerop (vr->min)
764 && integer_zerop (vr->max);
765 }
766
767 /* Return true if max and min of VR are INTEGER_CST. It's not necessary
768 a singleton. */
769
770 static inline bool
771 range_int_cst_p (value_range_t *vr)
772 {
773 return (vr->type == VR_RANGE
774 && TREE_CODE (vr->max) == INTEGER_CST
775 && TREE_CODE (vr->min) == INTEGER_CST
776 && !TREE_OVERFLOW (vr->max)
777 && !TREE_OVERFLOW (vr->min));
778 }
779
780 /* Return true if VR is a INTEGER_CST singleton. */
781
782 static inline bool
783 range_int_cst_singleton_p (value_range_t *vr)
784 {
785 return (range_int_cst_p (vr)
786 && tree_int_cst_equal (vr->min, vr->max));
787 }
788
789 /* Return true if value range VR involves at least one symbol. */
790
791 static inline bool
792 symbolic_range_p (value_range_t *vr)
793 {
794 return (!is_gimple_min_invariant (vr->min)
795 || !is_gimple_min_invariant (vr->max));
796 }
797
798 /* Return true if value range VR uses an overflow infinity. */
799
800 static inline bool
801 overflow_infinity_range_p (value_range_t *vr)
802 {
803 return (vr->type == VR_RANGE
804 && (is_overflow_infinity (vr->min)
805 || is_overflow_infinity (vr->max)));
806 }
807
808 /* Return false if we can not make a valid comparison based on VR;
809 this will be the case if it uses an overflow infinity and overflow
810 is not undefined (i.e., -fno-strict-overflow is in effect).
811 Otherwise return true, and set *STRICT_OVERFLOW_P to true if VR
812 uses an overflow infinity. */
813
814 static bool
815 usable_range_p (value_range_t *vr, bool *strict_overflow_p)
816 {
817 gcc_assert (vr->type == VR_RANGE);
818 if (is_overflow_infinity (vr->min))
819 {
820 *strict_overflow_p = true;
821 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr->min)))
822 return false;
823 }
824 if (is_overflow_infinity (vr->max))
825 {
826 *strict_overflow_p = true;
827 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr->max)))
828 return false;
829 }
830 return true;
831 }
832
833
834 /* Like tree_expr_nonnegative_warnv_p, but this function uses value
835 ranges obtained so far. */
836
837 static bool
838 vrp_expr_computes_nonnegative (tree expr, bool *strict_overflow_p)
839 {
840 return (tree_expr_nonnegative_warnv_p (expr, strict_overflow_p)
841 || (TREE_CODE (expr) == SSA_NAME
842 && ssa_name_nonnegative_p (expr)));
843 }
844
845 /* Return true if the result of assignment STMT is know to be non-negative.
846 If the return value is based on the assumption that signed overflow is
847 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
848 *STRICT_OVERFLOW_P.*/
849
850 static bool
851 gimple_assign_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p)
852 {
853 enum tree_code code = gimple_assign_rhs_code (stmt);
854 switch (get_gimple_rhs_class (code))
855 {
856 case GIMPLE_UNARY_RHS:
857 return tree_unary_nonnegative_warnv_p (gimple_assign_rhs_code (stmt),
858 gimple_expr_type (stmt),
859 gimple_assign_rhs1 (stmt),
860 strict_overflow_p);
861 case GIMPLE_BINARY_RHS:
862 return tree_binary_nonnegative_warnv_p (gimple_assign_rhs_code (stmt),
863 gimple_expr_type (stmt),
864 gimple_assign_rhs1 (stmt),
865 gimple_assign_rhs2 (stmt),
866 strict_overflow_p);
867 case GIMPLE_SINGLE_RHS:
868 return tree_single_nonnegative_warnv_p (gimple_assign_rhs1 (stmt),
869 strict_overflow_p);
870 case GIMPLE_INVALID_RHS:
871 gcc_unreachable ();
872 default:
873 gcc_unreachable ();
874 }
875 }
876
877 /* Return true if return value of call STMT is know to be non-negative.
878 If the return value is based on the assumption that signed overflow is
879 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
880 *STRICT_OVERFLOW_P.*/
881
882 static bool
883 gimple_call_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p)
884 {
885 tree arg0 = gimple_call_num_args (stmt) > 0 ?
886 gimple_call_arg (stmt, 0) : NULL_TREE;
887 tree arg1 = gimple_call_num_args (stmt) > 1 ?
888 gimple_call_arg (stmt, 1) : NULL_TREE;
889
890 return tree_call_nonnegative_warnv_p (gimple_expr_type (stmt),
891 gimple_call_fndecl (stmt),
892 arg0,
893 arg1,
894 strict_overflow_p);
895 }
896
897 /* Return true if STMT is know to to compute a non-negative value.
898 If the return value is based on the assumption that signed overflow is
899 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
900 *STRICT_OVERFLOW_P.*/
901
902 static bool
903 gimple_stmt_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p)
904 {
905 switch (gimple_code (stmt))
906 {
907 case GIMPLE_ASSIGN:
908 return gimple_assign_nonnegative_warnv_p (stmt, strict_overflow_p);
909 case GIMPLE_CALL:
910 return gimple_call_nonnegative_warnv_p (stmt, strict_overflow_p);
911 default:
912 gcc_unreachable ();
913 }
914 }
915
916 /* Return true if the result of assignment STMT is know to be non-zero.
917 If the return value is based on the assumption that signed overflow is
918 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
919 *STRICT_OVERFLOW_P.*/
920
921 static bool
922 gimple_assign_nonzero_warnv_p (gimple stmt, bool *strict_overflow_p)
923 {
924 enum tree_code code = gimple_assign_rhs_code (stmt);
925 switch (get_gimple_rhs_class (code))
926 {
927 case GIMPLE_UNARY_RHS:
928 return tree_unary_nonzero_warnv_p (gimple_assign_rhs_code (stmt),
929 gimple_expr_type (stmt),
930 gimple_assign_rhs1 (stmt),
931 strict_overflow_p);
932 case GIMPLE_BINARY_RHS:
933 return tree_binary_nonzero_warnv_p (gimple_assign_rhs_code (stmt),
934 gimple_expr_type (stmt),
935 gimple_assign_rhs1 (stmt),
936 gimple_assign_rhs2 (stmt),
937 strict_overflow_p);
938 case GIMPLE_SINGLE_RHS:
939 return tree_single_nonzero_warnv_p (gimple_assign_rhs1 (stmt),
940 strict_overflow_p);
941 case GIMPLE_INVALID_RHS:
942 gcc_unreachable ();
943 default:
944 gcc_unreachable ();
945 }
946 }
947
948 /* Return true if STMT is know to to compute a non-zero value.
949 If the return value is based on the assumption that signed overflow is
950 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
951 *STRICT_OVERFLOW_P.*/
952
953 static bool
954 gimple_stmt_nonzero_warnv_p (gimple stmt, bool *strict_overflow_p)
955 {
956 switch (gimple_code (stmt))
957 {
958 case GIMPLE_ASSIGN:
959 return gimple_assign_nonzero_warnv_p (stmt, strict_overflow_p);
960 case GIMPLE_CALL:
961 return gimple_alloca_call_p (stmt);
962 default:
963 gcc_unreachable ();
964 }
965 }
966
967 /* Like tree_expr_nonzero_warnv_p, but this function uses value ranges
968 obtained so far. */
969
970 static bool
971 vrp_stmt_computes_nonzero (gimple stmt, bool *strict_overflow_p)
972 {
973 if (gimple_stmt_nonzero_warnv_p (stmt, strict_overflow_p))
974 return true;
975
976 /* If we have an expression of the form &X->a, then the expression
977 is nonnull if X is nonnull. */
978 if (is_gimple_assign (stmt)
979 && gimple_assign_rhs_code (stmt) == ADDR_EXPR)
980 {
981 tree expr = gimple_assign_rhs1 (stmt);
982 tree base = get_base_address (TREE_OPERAND (expr, 0));
983
984 if (base != NULL_TREE
985 && TREE_CODE (base) == INDIRECT_REF
986 && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME)
987 {
988 value_range_t *vr = get_value_range (TREE_OPERAND (base, 0));
989 if (range_is_nonnull (vr))
990 return true;
991 }
992 }
993
994 return false;
995 }
996
997 /* Returns true if EXPR is a valid value (as expected by compare_values) --
998 a gimple invariant, or SSA_NAME +- CST. */
999
1000 static bool
1001 valid_value_p (tree expr)
1002 {
1003 if (TREE_CODE (expr) == SSA_NAME)
1004 return true;
1005
1006 if (TREE_CODE (expr) == PLUS_EXPR
1007 || TREE_CODE (expr) == MINUS_EXPR)
1008 return (TREE_CODE (TREE_OPERAND (expr, 0)) == SSA_NAME
1009 && TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST);
1010
1011 return is_gimple_min_invariant (expr);
1012 }
1013
1014 /* Return
1015 1 if VAL < VAL2
1016 0 if !(VAL < VAL2)
1017 -2 if those are incomparable. */
1018 static inline int
1019 operand_less_p (tree val, tree val2)
1020 {
1021 /* LT is folded faster than GE and others. Inline the common case. */
1022 if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
1023 {
1024 if (TYPE_UNSIGNED (TREE_TYPE (val)))
1025 return INT_CST_LT_UNSIGNED (val, val2);
1026 else
1027 {
1028 if (INT_CST_LT (val, val2))
1029 return 1;
1030 }
1031 }
1032 else
1033 {
1034 tree tcmp;
1035
1036 fold_defer_overflow_warnings ();
1037
1038 tcmp = fold_binary_to_constant (LT_EXPR, boolean_type_node, val, val2);
1039
1040 fold_undefer_and_ignore_overflow_warnings ();
1041
1042 if (!tcmp
1043 || TREE_CODE (tcmp) != INTEGER_CST)
1044 return -2;
1045
1046 if (!integer_zerop (tcmp))
1047 return 1;
1048 }
1049
1050 /* val >= val2, not considering overflow infinity. */
1051 if (is_negative_overflow_infinity (val))
1052 return is_negative_overflow_infinity (val2) ? 0 : 1;
1053 else if (is_positive_overflow_infinity (val2))
1054 return is_positive_overflow_infinity (val) ? 0 : 1;
1055
1056 return 0;
1057 }
1058
1059 /* Compare two values VAL1 and VAL2. Return
1060
1061 -2 if VAL1 and VAL2 cannot be compared at compile-time,
1062 -1 if VAL1 < VAL2,
1063 0 if VAL1 == VAL2,
1064 +1 if VAL1 > VAL2, and
1065 +2 if VAL1 != VAL2
1066
1067 This is similar to tree_int_cst_compare but supports pointer values
1068 and values that cannot be compared at compile time.
1069
1070 If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to
1071 true if the return value is only valid if we assume that signed
1072 overflow is undefined. */
1073
1074 static int
1075 compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p)
1076 {
1077 if (val1 == val2)
1078 return 0;
1079
1080 /* Below we rely on the fact that VAL1 and VAL2 are both pointers or
1081 both integers. */
1082 gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1))
1083 == POINTER_TYPE_P (TREE_TYPE (val2)));
1084 /* Convert the two values into the same type. This is needed because
1085 sizetype causes sign extension even for unsigned types. */
1086 val2 = fold_convert (TREE_TYPE (val1), val2);
1087 STRIP_USELESS_TYPE_CONVERSION (val2);
1088
1089 if ((TREE_CODE (val1) == SSA_NAME
1090 || TREE_CODE (val1) == PLUS_EXPR
1091 || TREE_CODE (val1) == MINUS_EXPR)
1092 && (TREE_CODE (val2) == SSA_NAME
1093 || TREE_CODE (val2) == PLUS_EXPR
1094 || TREE_CODE (val2) == MINUS_EXPR))
1095 {
1096 tree n1, c1, n2, c2;
1097 enum tree_code code1, code2;
1098
1099 /* If VAL1 and VAL2 are of the form 'NAME [+-] CST' or 'NAME',
1100 return -1 or +1 accordingly. If VAL1 and VAL2 don't use the
1101 same name, return -2. */
1102 if (TREE_CODE (val1) == SSA_NAME)
1103 {
1104 code1 = SSA_NAME;
1105 n1 = val1;
1106 c1 = NULL_TREE;
1107 }
1108 else
1109 {
1110 code1 = TREE_CODE (val1);
1111 n1 = TREE_OPERAND (val1, 0);
1112 c1 = TREE_OPERAND (val1, 1);
1113 if (tree_int_cst_sgn (c1) == -1)
1114 {
1115 if (is_negative_overflow_infinity (c1))
1116 return -2;
1117 c1 = fold_unary_to_constant (NEGATE_EXPR, TREE_TYPE (c1), c1);
1118 if (!c1)
1119 return -2;
1120 code1 = code1 == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR;
1121 }
1122 }
1123
1124 if (TREE_CODE (val2) == SSA_NAME)
1125 {
1126 code2 = SSA_NAME;
1127 n2 = val2;
1128 c2 = NULL_TREE;
1129 }
1130 else
1131 {
1132 code2 = TREE_CODE (val2);
1133 n2 = TREE_OPERAND (val2, 0);
1134 c2 = TREE_OPERAND (val2, 1);
1135 if (tree_int_cst_sgn (c2) == -1)
1136 {
1137 if (is_negative_overflow_infinity (c2))
1138 return -2;
1139 c2 = fold_unary_to_constant (NEGATE_EXPR, TREE_TYPE (c2), c2);
1140 if (!c2)
1141 return -2;
1142 code2 = code2 == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR;
1143 }
1144 }
1145
1146 /* Both values must use the same name. */
1147 if (n1 != n2)
1148 return -2;
1149
1150 if (code1 == SSA_NAME
1151 && code2 == SSA_NAME)
1152 /* NAME == NAME */
1153 return 0;
1154
1155 /* If overflow is defined we cannot simplify more. */
1156 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1)))
1157 return -2;
1158
1159 if (strict_overflow_p != NULL
1160 && (code1 == SSA_NAME || !TREE_NO_WARNING (val1))
1161 && (code2 == SSA_NAME || !TREE_NO_WARNING (val2)))
1162 *strict_overflow_p = true;
1163
1164 if (code1 == SSA_NAME)
1165 {
1166 if (code2 == PLUS_EXPR)
1167 /* NAME < NAME + CST */
1168 return -1;
1169 else if (code2 == MINUS_EXPR)
1170 /* NAME > NAME - CST */
1171 return 1;
1172 }
1173 else if (code1 == PLUS_EXPR)
1174 {
1175 if (code2 == SSA_NAME)
1176 /* NAME + CST > NAME */
1177 return 1;
1178 else if (code2 == PLUS_EXPR)
1179 /* NAME + CST1 > NAME + CST2, if CST1 > CST2 */
1180 return compare_values_warnv (c1, c2, strict_overflow_p);
1181 else if (code2 == MINUS_EXPR)
1182 /* NAME + CST1 > NAME - CST2 */
1183 return 1;
1184 }
1185 else if (code1 == MINUS_EXPR)
1186 {
1187 if (code2 == SSA_NAME)
1188 /* NAME - CST < NAME */
1189 return -1;
1190 else if (code2 == PLUS_EXPR)
1191 /* NAME - CST1 < NAME + CST2 */
1192 return -1;
1193 else if (code2 == MINUS_EXPR)
1194 /* NAME - CST1 > NAME - CST2, if CST1 < CST2. Notice that
1195 C1 and C2 are swapped in the call to compare_values. */
1196 return compare_values_warnv (c2, c1, strict_overflow_p);
1197 }
1198
1199 gcc_unreachable ();
1200 }
1201
1202 /* We cannot compare non-constants. */
1203 if (!is_gimple_min_invariant (val1) || !is_gimple_min_invariant (val2))
1204 return -2;
1205
1206 if (!POINTER_TYPE_P (TREE_TYPE (val1)))
1207 {
1208 /* We cannot compare overflowed values, except for overflow
1209 infinities. */
1210 if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2))
1211 {
1212 if (strict_overflow_p != NULL)
1213 *strict_overflow_p = true;
1214 if (is_negative_overflow_infinity (val1))
1215 return is_negative_overflow_infinity (val2) ? 0 : -1;
1216 else if (is_negative_overflow_infinity (val2))
1217 return 1;
1218 else if (is_positive_overflow_infinity (val1))
1219 return is_positive_overflow_infinity (val2) ? 0 : 1;
1220 else if (is_positive_overflow_infinity (val2))
1221 return -1;
1222 return -2;
1223 }
1224
1225 return tree_int_cst_compare (val1, val2);
1226 }
1227 else
1228 {
1229 tree t;
1230
1231 /* First see if VAL1 and VAL2 are not the same. */
1232 if (val1 == val2 || operand_equal_p (val1, val2, 0))
1233 return 0;
1234
1235 /* If VAL1 is a lower address than VAL2, return -1. */
1236 if (operand_less_p (val1, val2) == 1)
1237 return -1;
1238
1239 /* If VAL1 is a higher address than VAL2, return +1. */
1240 if (operand_less_p (val2, val1) == 1)
1241 return 1;
1242
1243 /* If VAL1 is different than VAL2, return +2.
1244 For integer constants we either have already returned -1 or 1
1245 or they are equivalent. We still might succeed in proving
1246 something about non-trivial operands. */
1247 if (TREE_CODE (val1) != INTEGER_CST
1248 || TREE_CODE (val2) != INTEGER_CST)
1249 {
1250 t = fold_binary_to_constant (NE_EXPR, boolean_type_node, val1, val2);
1251 if (t && integer_onep (t))
1252 return 2;
1253 }
1254
1255 return -2;
1256 }
1257 }
1258
1259 /* Compare values like compare_values_warnv, but treat comparisons of
1260 nonconstants which rely on undefined overflow as incomparable. */
1261
1262 static int
1263 compare_values (tree val1, tree val2)
1264 {
1265 bool sop;
1266 int ret;
1267
1268 sop = false;
1269 ret = compare_values_warnv (val1, val2, &sop);
1270 if (sop
1271 && (!is_gimple_min_invariant (val1) || !is_gimple_min_invariant (val2)))
1272 ret = -2;
1273 return ret;
1274 }
1275
1276
1277 /* Return 1 if VAL is inside value range VR (VR->MIN <= VAL <= VR->MAX),
1278 0 if VAL is not inside VR,
1279 -2 if we cannot tell either way.
1280
1281 FIXME, the current semantics of this functions are a bit quirky
1282 when taken in the context of VRP. In here we do not care
1283 about VR's type. If VR is the anti-range ~[3, 5] the call
1284 value_inside_range (4, VR) will return 1.
1285
1286 This is counter-intuitive in a strict sense, but the callers
1287 currently expect this. They are calling the function
1288 merely to determine whether VR->MIN <= VAL <= VR->MAX. The
1289 callers are applying the VR_RANGE/VR_ANTI_RANGE semantics
1290 themselves.
1291
1292 This also applies to value_ranges_intersect_p and
1293 range_includes_zero_p. The semantics of VR_RANGE and
1294 VR_ANTI_RANGE should be encoded here, but that also means
1295 adapting the users of these functions to the new semantics.
1296
1297 Benchmark compile/20001226-1.c compilation time after changing this
1298 function. */
1299
1300 static inline int
1301 value_inside_range (tree val, value_range_t * vr)
1302 {
1303 int cmp1, cmp2;
1304
1305 cmp1 = operand_less_p (val, vr->min);
1306 if (cmp1 == -2)
1307 return -2;
1308 if (cmp1 == 1)
1309 return 0;
1310
1311 cmp2 = operand_less_p (vr->max, val);
1312 if (cmp2 == -2)
1313 return -2;
1314
1315 return !cmp2;
1316 }
1317
1318
1319 /* Return true if value ranges VR0 and VR1 have a non-empty
1320 intersection.
1321
1322 Benchmark compile/20001226-1.c compilation time after changing this
1323 function.
1324 */
1325
1326 static inline bool
1327 value_ranges_intersect_p (value_range_t *vr0, value_range_t *vr1)
1328 {
1329 /* The value ranges do not intersect if the maximum of the first range is
1330 less than the minimum of the second range or vice versa.
1331 When those relations are unknown, we can't do any better. */
1332 if (operand_less_p (vr0->max, vr1->min) != 0)
1333 return false;
1334 if (operand_less_p (vr1->max, vr0->min) != 0)
1335 return false;
1336 return true;
1337 }
1338
1339
1340 /* Return true if VR includes the value zero, false otherwise. FIXME,
1341 currently this will return false for an anti-range like ~[-4, 3].
1342 This will be wrong when the semantics of value_inside_range are
1343 modified (currently the users of this function expect these
1344 semantics). */
1345
1346 static inline bool
1347 range_includes_zero_p (value_range_t *vr)
1348 {
1349 tree zero;
1350
1351 gcc_assert (vr->type != VR_UNDEFINED
1352 && vr->type != VR_VARYING
1353 && !symbolic_range_p (vr));
1354
1355 zero = build_int_cst (TREE_TYPE (vr->min), 0);
1356 return (value_inside_range (zero, vr) == 1);
1357 }
1358
1359 /* Return true if T, an SSA_NAME, is known to be nonnegative. Return
1360 false otherwise or if no value range information is available. */
1361
1362 bool
1363 ssa_name_nonnegative_p (const_tree t)
1364 {
1365 value_range_t *vr = get_value_range (t);
1366
1367 if (!vr)
1368 return false;
1369
1370 /* Testing for VR_ANTI_RANGE is not useful here as any anti-range
1371 which would return a useful value should be encoded as a VR_RANGE. */
1372 if (vr->type == VR_RANGE)
1373 {
1374 int result = compare_values (vr->min, integer_zero_node);
1375
1376 return (result == 0 || result == 1);
1377 }
1378 return false;
1379 }
1380
1381 /* If OP has a value range with a single constant value return that,
1382 otherwise return NULL_TREE. This returns OP itself if OP is a
1383 constant. */
1384
1385 static tree
1386 op_with_constant_singleton_value_range (tree op)
1387 {
1388 value_range_t *vr;
1389
1390 if (is_gimple_min_invariant (op))
1391 return op;
1392
1393 if (TREE_CODE (op) != SSA_NAME)
1394 return NULL_TREE;
1395
1396 vr = get_value_range (op);
1397 if (vr->type == VR_RANGE
1398 && operand_equal_p (vr->min, vr->max, 0)
1399 && is_gimple_min_invariant (vr->min))
1400 return vr->min;
1401
1402 return NULL_TREE;
1403 }
1404
1405
1406 /* Extract value range information from an ASSERT_EXPR EXPR and store
1407 it in *VR_P. */
1408
1409 static void
1410 extract_range_from_assert (value_range_t *vr_p, tree expr)
1411 {
1412 tree var, cond, limit, min, max, type;
1413 value_range_t *var_vr, *limit_vr;
1414 enum tree_code cond_code;
1415
1416 var = ASSERT_EXPR_VAR (expr);
1417 cond = ASSERT_EXPR_COND (expr);
1418
1419 gcc_assert (COMPARISON_CLASS_P (cond));
1420
1421 /* Find VAR in the ASSERT_EXPR conditional. */
1422 if (var == TREE_OPERAND (cond, 0)
1423 || TREE_CODE (TREE_OPERAND (cond, 0)) == PLUS_EXPR
1424 || TREE_CODE (TREE_OPERAND (cond, 0)) == NOP_EXPR)
1425 {
1426 /* If the predicate is of the form VAR COMP LIMIT, then we just
1427 take LIMIT from the RHS and use the same comparison code. */
1428 cond_code = TREE_CODE (cond);
1429 limit = TREE_OPERAND (cond, 1);
1430 cond = TREE_OPERAND (cond, 0);
1431 }
1432 else
1433 {
1434 /* If the predicate is of the form LIMIT COMP VAR, then we need
1435 to flip around the comparison code to create the proper range
1436 for VAR. */
1437 cond_code = swap_tree_comparison (TREE_CODE (cond));
1438 limit = TREE_OPERAND (cond, 0);
1439 cond = TREE_OPERAND (cond, 1);
1440 }
1441
1442 limit = avoid_overflow_infinity (limit);
1443
1444 type = TREE_TYPE (limit);
1445 gcc_assert (limit != var);
1446
1447 /* For pointer arithmetic, we only keep track of pointer equality
1448 and inequality. */
1449 if (POINTER_TYPE_P (type) && cond_code != NE_EXPR && cond_code != EQ_EXPR)
1450 {
1451 set_value_range_to_varying (vr_p);
1452 return;
1453 }
1454
1455 /* If LIMIT is another SSA name and LIMIT has a range of its own,
1456 try to use LIMIT's range to avoid creating symbolic ranges
1457 unnecessarily. */
1458 limit_vr = (TREE_CODE (limit) == SSA_NAME) ? get_value_range (limit) : NULL;
1459
1460 /* LIMIT's range is only interesting if it has any useful information. */
1461 if (limit_vr
1462 && (limit_vr->type == VR_UNDEFINED
1463 || limit_vr->type == VR_VARYING
1464 || symbolic_range_p (limit_vr)))
1465 limit_vr = NULL;
1466
1467 /* Initially, the new range has the same set of equivalences of
1468 VAR's range. This will be revised before returning the final
1469 value. Since assertions may be chained via mutually exclusive
1470 predicates, we will need to trim the set of equivalences before
1471 we are done. */
1472 gcc_assert (vr_p->equiv == NULL);
1473 add_equivalence (&vr_p->equiv, var);
1474
1475 /* Extract a new range based on the asserted comparison for VAR and
1476 LIMIT's value range. Notice that if LIMIT has an anti-range, we
1477 will only use it for equality comparisons (EQ_EXPR). For any
1478 other kind of assertion, we cannot derive a range from LIMIT's
1479 anti-range that can be used to describe the new range. For
1480 instance, ASSERT_EXPR <x_2, x_2 <= b_4>. If b_4 is ~[2, 10],
1481 then b_4 takes on the ranges [-INF, 1] and [11, +INF]. There is
1482 no single range for x_2 that could describe LE_EXPR, so we might
1483 as well build the range [b_4, +INF] for it.
1484 One special case we handle is extracting a range from a
1485 range test encoded as (unsigned)var + CST <= limit. */
1486 if (TREE_CODE (cond) == NOP_EXPR
1487 || TREE_CODE (cond) == PLUS_EXPR)
1488 {
1489 if (TREE_CODE (cond) == PLUS_EXPR)
1490 {
1491 min = fold_build1 (NEGATE_EXPR, TREE_TYPE (TREE_OPERAND (cond, 1)),
1492 TREE_OPERAND (cond, 1));
1493 max = int_const_binop (PLUS_EXPR, limit, min, 0);
1494 cond = TREE_OPERAND (cond, 0);
1495 }
1496 else
1497 {
1498 min = build_int_cst (TREE_TYPE (var), 0);
1499 max = limit;
1500 }
1501
1502 /* Make sure to not set TREE_OVERFLOW on the final type
1503 conversion. We are willingly interpreting large positive
1504 unsigned values as negative singed values here. */
1505 min = force_fit_type_double (TREE_TYPE (var), TREE_INT_CST_LOW (min),
1506 TREE_INT_CST_HIGH (min), 0, false);
1507 max = force_fit_type_double (TREE_TYPE (var), TREE_INT_CST_LOW (max),
1508 TREE_INT_CST_HIGH (max), 0, false);
1509
1510 /* We can transform a max, min range to an anti-range or
1511 vice-versa. Use set_and_canonicalize_value_range which does
1512 this for us. */
1513 if (cond_code == LE_EXPR)
1514 set_and_canonicalize_value_range (vr_p, VR_RANGE,
1515 min, max, vr_p->equiv);
1516 else if (cond_code == GT_EXPR)
1517 set_and_canonicalize_value_range (vr_p, VR_ANTI_RANGE,
1518 min, max, vr_p->equiv);
1519 else
1520 gcc_unreachable ();
1521 }
1522 else if (cond_code == EQ_EXPR)
1523 {
1524 enum value_range_type range_type;
1525
1526 if (limit_vr)
1527 {
1528 range_type = limit_vr->type;
1529 min = limit_vr->min;
1530 max = limit_vr->max;
1531 }
1532 else
1533 {
1534 range_type = VR_RANGE;
1535 min = limit;
1536 max = limit;
1537 }
1538
1539 set_value_range (vr_p, range_type, min, max, vr_p->equiv);
1540
1541 /* When asserting the equality VAR == LIMIT and LIMIT is another
1542 SSA name, the new range will also inherit the equivalence set
1543 from LIMIT. */
1544 if (TREE_CODE (limit) == SSA_NAME)
1545 add_equivalence (&vr_p->equiv, limit);
1546 }
1547 else if (cond_code == NE_EXPR)
1548 {
1549 /* As described above, when LIMIT's range is an anti-range and
1550 this assertion is an inequality (NE_EXPR), then we cannot
1551 derive anything from the anti-range. For instance, if
1552 LIMIT's range was ~[0, 0], the assertion 'VAR != LIMIT' does
1553 not imply that VAR's range is [0, 0]. So, in the case of
1554 anti-ranges, we just assert the inequality using LIMIT and
1555 not its anti-range.
1556
1557 If LIMIT_VR is a range, we can only use it to build a new
1558 anti-range if LIMIT_VR is a single-valued range. For
1559 instance, if LIMIT_VR is [0, 1], the predicate
1560 VAR != [0, 1] does not mean that VAR's range is ~[0, 1].
1561 Rather, it means that for value 0 VAR should be ~[0, 0]
1562 and for value 1, VAR should be ~[1, 1]. We cannot
1563 represent these ranges.
1564
1565 The only situation in which we can build a valid
1566 anti-range is when LIMIT_VR is a single-valued range
1567 (i.e., LIMIT_VR->MIN == LIMIT_VR->MAX). In that case,
1568 build the anti-range ~[LIMIT_VR->MIN, LIMIT_VR->MAX]. */
1569 if (limit_vr
1570 && limit_vr->type == VR_RANGE
1571 && compare_values (limit_vr->min, limit_vr->max) == 0)
1572 {
1573 min = limit_vr->min;
1574 max = limit_vr->max;
1575 }
1576 else
1577 {
1578 /* In any other case, we cannot use LIMIT's range to build a
1579 valid anti-range. */
1580 min = max = limit;
1581 }
1582
1583 /* If MIN and MAX cover the whole range for their type, then
1584 just use the original LIMIT. */
1585 if (INTEGRAL_TYPE_P (type)
1586 && vrp_val_is_min (min)
1587 && vrp_val_is_max (max))
1588 min = max = limit;
1589
1590 set_value_range (vr_p, VR_ANTI_RANGE, min, max, vr_p->equiv);
1591 }
1592 else if (cond_code == LE_EXPR || cond_code == LT_EXPR)
1593 {
1594 min = TYPE_MIN_VALUE (type);
1595
1596 if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE)
1597 max = limit;
1598 else
1599 {
1600 /* If LIMIT_VR is of the form [N1, N2], we need to build the
1601 range [MIN, N2] for LE_EXPR and [MIN, N2 - 1] for
1602 LT_EXPR. */
1603 max = limit_vr->max;
1604 }
1605
1606 /* If the maximum value forces us to be out of bounds, simply punt.
1607 It would be pointless to try and do anything more since this
1608 all should be optimized away above us. */
1609 if ((cond_code == LT_EXPR
1610 && compare_values (max, min) == 0)
1611 || (CONSTANT_CLASS_P (max) && TREE_OVERFLOW (max)))
1612 set_value_range_to_varying (vr_p);
1613 else
1614 {
1615 /* For LT_EXPR, we create the range [MIN, MAX - 1]. */
1616 if (cond_code == LT_EXPR)
1617 {
1618 tree one = build_int_cst (type, 1);
1619 max = fold_build2 (MINUS_EXPR, type, max, one);
1620 if (EXPR_P (max))
1621 TREE_NO_WARNING (max) = 1;
1622 }
1623
1624 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1625 }
1626 }
1627 else if (cond_code == GE_EXPR || cond_code == GT_EXPR)
1628 {
1629 max = TYPE_MAX_VALUE (type);
1630
1631 if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE)
1632 min = limit;
1633 else
1634 {
1635 /* If LIMIT_VR is of the form [N1, N2], we need to build the
1636 range [N1, MAX] for GE_EXPR and [N1 + 1, MAX] for
1637 GT_EXPR. */
1638 min = limit_vr->min;
1639 }
1640
1641 /* If the minimum value forces us to be out of bounds, simply punt.
1642 It would be pointless to try and do anything more since this
1643 all should be optimized away above us. */
1644 if ((cond_code == GT_EXPR
1645 && compare_values (min, max) == 0)
1646 || (CONSTANT_CLASS_P (min) && TREE_OVERFLOW (min)))
1647 set_value_range_to_varying (vr_p);
1648 else
1649 {
1650 /* For GT_EXPR, we create the range [MIN + 1, MAX]. */
1651 if (cond_code == GT_EXPR)
1652 {
1653 tree one = build_int_cst (type, 1);
1654 min = fold_build2 (PLUS_EXPR, type, min, one);
1655 if (EXPR_P (min))
1656 TREE_NO_WARNING (min) = 1;
1657 }
1658
1659 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1660 }
1661 }
1662 else
1663 gcc_unreachable ();
1664
1665 /* If VAR already had a known range, it may happen that the new
1666 range we have computed and VAR's range are not compatible. For
1667 instance,
1668
1669 if (p_5 == NULL)
1670 p_6 = ASSERT_EXPR <p_5, p_5 == NULL>;
1671 x_7 = p_6->fld;
1672 p_8 = ASSERT_EXPR <p_6, p_6 != NULL>;
1673
1674 While the above comes from a faulty program, it will cause an ICE
1675 later because p_8 and p_6 will have incompatible ranges and at
1676 the same time will be considered equivalent. A similar situation
1677 would arise from
1678
1679 if (i_5 > 10)
1680 i_6 = ASSERT_EXPR <i_5, i_5 > 10>;
1681 if (i_5 < 5)
1682 i_7 = ASSERT_EXPR <i_6, i_6 < 5>;
1683
1684 Again i_6 and i_7 will have incompatible ranges. It would be
1685 pointless to try and do anything with i_7's range because
1686 anything dominated by 'if (i_5 < 5)' will be optimized away.
1687 Note, due to the wa in which simulation proceeds, the statement
1688 i_7 = ASSERT_EXPR <...> we would never be visited because the
1689 conditional 'if (i_5 < 5)' always evaluates to false. However,
1690 this extra check does not hurt and may protect against future
1691 changes to VRP that may get into a situation similar to the
1692 NULL pointer dereference example.
1693
1694 Note that these compatibility tests are only needed when dealing
1695 with ranges or a mix of range and anti-range. If VAR_VR and VR_P
1696 are both anti-ranges, they will always be compatible, because two
1697 anti-ranges will always have a non-empty intersection. */
1698
1699 var_vr = get_value_range (var);
1700
1701 /* We may need to make adjustments when VR_P and VAR_VR are numeric
1702 ranges or anti-ranges. */
1703 if (vr_p->type == VR_VARYING
1704 || vr_p->type == VR_UNDEFINED
1705 || var_vr->type == VR_VARYING
1706 || var_vr->type == VR_UNDEFINED
1707 || symbolic_range_p (vr_p)
1708 || symbolic_range_p (var_vr))
1709 return;
1710
1711 if (var_vr->type == VR_RANGE && vr_p->type == VR_RANGE)
1712 {
1713 /* If the two ranges have a non-empty intersection, we can
1714 refine the resulting range. Since the assert expression
1715 creates an equivalency and at the same time it asserts a
1716 predicate, we can take the intersection of the two ranges to
1717 get better precision. */
1718 if (value_ranges_intersect_p (var_vr, vr_p))
1719 {
1720 /* Use the larger of the two minimums. */
1721 if (compare_values (vr_p->min, var_vr->min) == -1)
1722 min = var_vr->min;
1723 else
1724 min = vr_p->min;
1725
1726 /* Use the smaller of the two maximums. */
1727 if (compare_values (vr_p->max, var_vr->max) == 1)
1728 max = var_vr->max;
1729 else
1730 max = vr_p->max;
1731
1732 set_value_range (vr_p, vr_p->type, min, max, vr_p->equiv);
1733 }
1734 else
1735 {
1736 /* The two ranges do not intersect, set the new range to
1737 VARYING, because we will not be able to do anything
1738 meaningful with it. */
1739 set_value_range_to_varying (vr_p);
1740 }
1741 }
1742 else if ((var_vr->type == VR_RANGE && vr_p->type == VR_ANTI_RANGE)
1743 || (var_vr->type == VR_ANTI_RANGE && vr_p->type == VR_RANGE))
1744 {
1745 /* A range and an anti-range will cancel each other only if
1746 their ends are the same. For instance, in the example above,
1747 p_8's range ~[0, 0] and p_6's range [0, 0] are incompatible,
1748 so VR_P should be set to VR_VARYING. */
1749 if (compare_values (var_vr->min, vr_p->min) == 0
1750 && compare_values (var_vr->max, vr_p->max) == 0)
1751 set_value_range_to_varying (vr_p);
1752 else
1753 {
1754 tree min, max, anti_min, anti_max, real_min, real_max;
1755 int cmp;
1756
1757 /* We want to compute the logical AND of the two ranges;
1758 there are three cases to consider.
1759
1760
1761 1. The VR_ANTI_RANGE range is completely within the
1762 VR_RANGE and the endpoints of the ranges are
1763 different. In that case the resulting range
1764 should be whichever range is more precise.
1765 Typically that will be the VR_RANGE.
1766
1767 2. The VR_ANTI_RANGE is completely disjoint from
1768 the VR_RANGE. In this case the resulting range
1769 should be the VR_RANGE.
1770
1771 3. There is some overlap between the VR_ANTI_RANGE
1772 and the VR_RANGE.
1773
1774 3a. If the high limit of the VR_ANTI_RANGE resides
1775 within the VR_RANGE, then the result is a new
1776 VR_RANGE starting at the high limit of the
1777 VR_ANTI_RANGE + 1 and extending to the
1778 high limit of the original VR_RANGE.
1779
1780 3b. If the low limit of the VR_ANTI_RANGE resides
1781 within the VR_RANGE, then the result is a new
1782 VR_RANGE starting at the low limit of the original
1783 VR_RANGE and extending to the low limit of the
1784 VR_ANTI_RANGE - 1. */
1785 if (vr_p->type == VR_ANTI_RANGE)
1786 {
1787 anti_min = vr_p->min;
1788 anti_max = vr_p->max;
1789 real_min = var_vr->min;
1790 real_max = var_vr->max;
1791 }
1792 else
1793 {
1794 anti_min = var_vr->min;
1795 anti_max = var_vr->max;
1796 real_min = vr_p->min;
1797 real_max = vr_p->max;
1798 }
1799
1800
1801 /* Case 1, VR_ANTI_RANGE completely within VR_RANGE,
1802 not including any endpoints. */
1803 if (compare_values (anti_max, real_max) == -1
1804 && compare_values (anti_min, real_min) == 1)
1805 {
1806 /* If the range is covering the whole valid range of
1807 the type keep the anti-range. */
1808 if (!vrp_val_is_min (real_min)
1809 || !vrp_val_is_max (real_max))
1810 set_value_range (vr_p, VR_RANGE, real_min,
1811 real_max, vr_p->equiv);
1812 }
1813 /* Case 2, VR_ANTI_RANGE completely disjoint from
1814 VR_RANGE. */
1815 else if (compare_values (anti_min, real_max) == 1
1816 || compare_values (anti_max, real_min) == -1)
1817 {
1818 set_value_range (vr_p, VR_RANGE, real_min,
1819 real_max, vr_p->equiv);
1820 }
1821 /* Case 3a, the anti-range extends into the low
1822 part of the real range. Thus creating a new
1823 low for the real range. */
1824 else if (((cmp = compare_values (anti_max, real_min)) == 1
1825 || cmp == 0)
1826 && compare_values (anti_max, real_max) == -1)
1827 {
1828 gcc_assert (!is_positive_overflow_infinity (anti_max));
1829 if (needs_overflow_infinity (TREE_TYPE (anti_max))
1830 && vrp_val_is_max (anti_max))
1831 {
1832 if (!supports_overflow_infinity (TREE_TYPE (var_vr->min)))
1833 {
1834 set_value_range_to_varying (vr_p);
1835 return;
1836 }
1837 min = positive_overflow_infinity (TREE_TYPE (var_vr->min));
1838 }
1839 else if (!POINTER_TYPE_P (TREE_TYPE (var_vr->min)))
1840 min = fold_build2 (PLUS_EXPR, TREE_TYPE (var_vr->min),
1841 anti_max,
1842 build_int_cst (TREE_TYPE (var_vr->min), 1));
1843 else
1844 min = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (var_vr->min),
1845 anti_max, size_int (1));
1846 max = real_max;
1847 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1848 }
1849 /* Case 3b, the anti-range extends into the high
1850 part of the real range. Thus creating a new
1851 higher for the real range. */
1852 else if (compare_values (anti_min, real_min) == 1
1853 && ((cmp = compare_values (anti_min, real_max)) == -1
1854 || cmp == 0))
1855 {
1856 gcc_assert (!is_negative_overflow_infinity (anti_min));
1857 if (needs_overflow_infinity (TREE_TYPE (anti_min))
1858 && vrp_val_is_min (anti_min))
1859 {
1860 if (!supports_overflow_infinity (TREE_TYPE (var_vr->min)))
1861 {
1862 set_value_range_to_varying (vr_p);
1863 return;
1864 }
1865 max = negative_overflow_infinity (TREE_TYPE (var_vr->min));
1866 }
1867 else if (!POINTER_TYPE_P (TREE_TYPE (var_vr->min)))
1868 max = fold_build2 (MINUS_EXPR, TREE_TYPE (var_vr->min),
1869 anti_min,
1870 build_int_cst (TREE_TYPE (var_vr->min), 1));
1871 else
1872 max = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (var_vr->min),
1873 anti_min,
1874 size_int (-1));
1875 min = real_min;
1876 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1877 }
1878 }
1879 }
1880 }
1881
1882
1883 /* Extract range information from SSA name VAR and store it in VR. If
1884 VAR has an interesting range, use it. Otherwise, create the
1885 range [VAR, VAR] and return it. This is useful in situations where
1886 we may have conditionals testing values of VARYING names. For
1887 instance,
1888
1889 x_3 = y_5;
1890 if (x_3 > y_5)
1891 ...
1892
1893 Even if y_5 is deemed VARYING, we can determine that x_3 > y_5 is
1894 always false. */
1895
1896 static void
1897 extract_range_from_ssa_name (value_range_t *vr, tree var)
1898 {
1899 value_range_t *var_vr = get_value_range (var);
1900
1901 if (var_vr->type != VR_UNDEFINED && var_vr->type != VR_VARYING)
1902 copy_value_range (vr, var_vr);
1903 else
1904 set_value_range (vr, VR_RANGE, var, var, NULL);
1905
1906 add_equivalence (&vr->equiv, var);
1907 }
1908
1909
1910 /* Wrapper around int_const_binop. If the operation overflows and we
1911 are not using wrapping arithmetic, then adjust the result to be
1912 -INF or +INF depending on CODE, VAL1 and VAL2. This can return
1913 NULL_TREE if we need to use an overflow infinity representation but
1914 the type does not support it. */
1915
1916 static tree
1917 vrp_int_const_binop (enum tree_code code, tree val1, tree val2)
1918 {
1919 tree res;
1920
1921 res = int_const_binop (code, val1, val2, 0);
1922
1923 /* If we are using unsigned arithmetic, operate symbolically
1924 on -INF and +INF as int_const_binop only handles signed overflow. */
1925 if (TYPE_UNSIGNED (TREE_TYPE (val1)))
1926 {
1927 int checkz = compare_values (res, val1);
1928 bool overflow = false;
1929
1930 /* Ensure that res = val1 [+*] val2 >= val1
1931 or that res = val1 - val2 <= val1. */
1932 if ((code == PLUS_EXPR
1933 && !(checkz == 1 || checkz == 0))
1934 || (code == MINUS_EXPR
1935 && !(checkz == 0 || checkz == -1)))
1936 {
1937 overflow = true;
1938 }
1939 /* Checking for multiplication overflow is done by dividing the
1940 output of the multiplication by the first input of the
1941 multiplication. If the result of that division operation is
1942 not equal to the second input of the multiplication, then the
1943 multiplication overflowed. */
1944 else if (code == MULT_EXPR && !integer_zerop (val1))
1945 {
1946 tree tmp = int_const_binop (TRUNC_DIV_EXPR,
1947 res,
1948 val1, 0);
1949 int check = compare_values (tmp, val2);
1950
1951 if (check != 0)
1952 overflow = true;
1953 }
1954
1955 if (overflow)
1956 {
1957 res = copy_node (res);
1958 TREE_OVERFLOW (res) = 1;
1959 }
1960
1961 }
1962 else if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (val1)))
1963 /* If the singed operation wraps then int_const_binop has done
1964 everything we want. */
1965 ;
1966 else if ((TREE_OVERFLOW (res)
1967 && !TREE_OVERFLOW (val1)
1968 && !TREE_OVERFLOW (val2))
1969 || is_overflow_infinity (val1)
1970 || is_overflow_infinity (val2))
1971 {
1972 /* If the operation overflowed but neither VAL1 nor VAL2 are
1973 overflown, return -INF or +INF depending on the operation
1974 and the combination of signs of the operands. */
1975 int sgn1 = tree_int_cst_sgn (val1);
1976 int sgn2 = tree_int_cst_sgn (val2);
1977
1978 if (needs_overflow_infinity (TREE_TYPE (res))
1979 && !supports_overflow_infinity (TREE_TYPE (res)))
1980 return NULL_TREE;
1981
1982 /* We have to punt on adding infinities of different signs,
1983 since we can't tell what the sign of the result should be.
1984 Likewise for subtracting infinities of the same sign. */
1985 if (((code == PLUS_EXPR && sgn1 != sgn2)
1986 || (code == MINUS_EXPR && sgn1 == sgn2))
1987 && is_overflow_infinity (val1)
1988 && is_overflow_infinity (val2))
1989 return NULL_TREE;
1990
1991 /* Don't try to handle division or shifting of infinities. */
1992 if ((code == TRUNC_DIV_EXPR
1993 || code == FLOOR_DIV_EXPR
1994 || code == CEIL_DIV_EXPR
1995 || code == EXACT_DIV_EXPR
1996 || code == ROUND_DIV_EXPR
1997 || code == RSHIFT_EXPR)
1998 && (is_overflow_infinity (val1)
1999 || is_overflow_infinity (val2)))
2000 return NULL_TREE;
2001
2002 /* Notice that we only need to handle the restricted set of
2003 operations handled by extract_range_from_binary_expr.
2004 Among them, only multiplication, addition and subtraction
2005 can yield overflow without overflown operands because we
2006 are working with integral types only... except in the
2007 case VAL1 = -INF and VAL2 = -1 which overflows to +INF
2008 for division too. */
2009
2010 /* For multiplication, the sign of the overflow is given
2011 by the comparison of the signs of the operands. */
2012 if ((code == MULT_EXPR && sgn1 == sgn2)
2013 /* For addition, the operands must be of the same sign
2014 to yield an overflow. Its sign is therefore that
2015 of one of the operands, for example the first. For
2016 infinite operands X + -INF is negative, not positive. */
2017 || (code == PLUS_EXPR
2018 && (sgn1 >= 0
2019 ? !is_negative_overflow_infinity (val2)
2020 : is_positive_overflow_infinity (val2)))
2021 /* For subtraction, non-infinite operands must be of
2022 different signs to yield an overflow. Its sign is
2023 therefore that of the first operand or the opposite of
2024 that of the second operand. A first operand of 0 counts
2025 as positive here, for the corner case 0 - (-INF), which
2026 overflows, but must yield +INF. For infinite operands 0
2027 - INF is negative, not positive. */
2028 || (code == MINUS_EXPR
2029 && (sgn1 >= 0
2030 ? !is_positive_overflow_infinity (val2)
2031 : is_negative_overflow_infinity (val2)))
2032 /* We only get in here with positive shift count, so the
2033 overflow direction is the same as the sign of val1.
2034 Actually rshift does not overflow at all, but we only
2035 handle the case of shifting overflowed -INF and +INF. */
2036 || (code == RSHIFT_EXPR
2037 && sgn1 >= 0)
2038 /* For division, the only case is -INF / -1 = +INF. */
2039 || code == TRUNC_DIV_EXPR
2040 || code == FLOOR_DIV_EXPR
2041 || code == CEIL_DIV_EXPR
2042 || code == EXACT_DIV_EXPR
2043 || code == ROUND_DIV_EXPR)
2044 return (needs_overflow_infinity (TREE_TYPE (res))
2045 ? positive_overflow_infinity (TREE_TYPE (res))
2046 : TYPE_MAX_VALUE (TREE_TYPE (res)));
2047 else
2048 return (needs_overflow_infinity (TREE_TYPE (res))
2049 ? negative_overflow_infinity (TREE_TYPE (res))
2050 : TYPE_MIN_VALUE (TREE_TYPE (res)));
2051 }
2052
2053 return res;
2054 }
2055
2056
2057 /* Extract range information from a binary expression EXPR based on
2058 the ranges of each of its operands and the expression code. */
2059
2060 static void
2061 extract_range_from_binary_expr (value_range_t *vr,
2062 enum tree_code code,
2063 tree expr_type, tree op0, tree op1)
2064 {
2065 enum value_range_type type;
2066 tree min, max;
2067 int cmp;
2068 value_range_t vr0 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
2069 value_range_t vr1 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
2070
2071 /* Not all binary expressions can be applied to ranges in a
2072 meaningful way. Handle only arithmetic operations. */
2073 if (code != PLUS_EXPR
2074 && code != MINUS_EXPR
2075 && code != POINTER_PLUS_EXPR
2076 && code != MULT_EXPR
2077 && code != TRUNC_DIV_EXPR
2078 && code != FLOOR_DIV_EXPR
2079 && code != CEIL_DIV_EXPR
2080 && code != EXACT_DIV_EXPR
2081 && code != ROUND_DIV_EXPR
2082 && code != RSHIFT_EXPR
2083 && code != MIN_EXPR
2084 && code != MAX_EXPR
2085 && code != BIT_AND_EXPR
2086 && code != BIT_IOR_EXPR
2087 && code != TRUTH_AND_EXPR
2088 && code != TRUTH_OR_EXPR)
2089 {
2090 /* We can still do constant propagation here. */
2091 tree const_op0 = op_with_constant_singleton_value_range (op0);
2092 tree const_op1 = op_with_constant_singleton_value_range (op1);
2093 if (const_op0 || const_op1)
2094 {
2095 tree tem = fold_binary (code, expr_type,
2096 const_op0 ? const_op0 : op0,
2097 const_op1 ? const_op1 : op1);
2098 if (tem
2099 && is_gimple_min_invariant (tem)
2100 && !is_overflow_infinity (tem))
2101 {
2102 set_value_range (vr, VR_RANGE, tem, tem, NULL);
2103 return;
2104 }
2105 }
2106 set_value_range_to_varying (vr);
2107 return;
2108 }
2109
2110 /* Get value ranges for each operand. For constant operands, create
2111 a new value range with the operand to simplify processing. */
2112 if (TREE_CODE (op0) == SSA_NAME)
2113 vr0 = *(get_value_range (op0));
2114 else if (is_gimple_min_invariant (op0))
2115 set_value_range_to_value (&vr0, op0, NULL);
2116 else
2117 set_value_range_to_varying (&vr0);
2118
2119 if (TREE_CODE (op1) == SSA_NAME)
2120 vr1 = *(get_value_range (op1));
2121 else if (is_gimple_min_invariant (op1))
2122 set_value_range_to_value (&vr1, op1, NULL);
2123 else
2124 set_value_range_to_varying (&vr1);
2125
2126 /* If either range is UNDEFINED, so is the result. */
2127 if (vr0.type == VR_UNDEFINED || vr1.type == VR_UNDEFINED)
2128 {
2129 set_value_range_to_undefined (vr);
2130 return;
2131 }
2132
2133 /* The type of the resulting value range defaults to VR0.TYPE. */
2134 type = vr0.type;
2135
2136 /* Refuse to operate on VARYING ranges, ranges of different kinds
2137 and symbolic ranges. As an exception, we allow BIT_AND_EXPR
2138 because we may be able to derive a useful range even if one of
2139 the operands is VR_VARYING or symbolic range. Similarly for
2140 divisions. TODO, we may be able to derive anti-ranges in
2141 some cases. */
2142 if (code != BIT_AND_EXPR
2143 && code != TRUTH_AND_EXPR
2144 && code != TRUTH_OR_EXPR
2145 && code != TRUNC_DIV_EXPR
2146 && code != FLOOR_DIV_EXPR
2147 && code != CEIL_DIV_EXPR
2148 && code != EXACT_DIV_EXPR
2149 && code != ROUND_DIV_EXPR
2150 && (vr0.type == VR_VARYING
2151 || vr1.type == VR_VARYING
2152 || vr0.type != vr1.type
2153 || symbolic_range_p (&vr0)
2154 || symbolic_range_p (&vr1)))
2155 {
2156 set_value_range_to_varying (vr);
2157 return;
2158 }
2159
2160 /* Now evaluate the expression to determine the new range. */
2161 if (POINTER_TYPE_P (expr_type)
2162 || POINTER_TYPE_P (TREE_TYPE (op0))
2163 || POINTER_TYPE_P (TREE_TYPE (op1)))
2164 {
2165 if (code == MIN_EXPR || code == MAX_EXPR)
2166 {
2167 /* For MIN/MAX expressions with pointers, we only care about
2168 nullness, if both are non null, then the result is nonnull.
2169 If both are null, then the result is null. Otherwise they
2170 are varying. */
2171 if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1))
2172 set_value_range_to_nonnull (vr, expr_type);
2173 else if (range_is_null (&vr0) && range_is_null (&vr1))
2174 set_value_range_to_null (vr, expr_type);
2175 else
2176 set_value_range_to_varying (vr);
2177
2178 return;
2179 }
2180 gcc_assert (code == POINTER_PLUS_EXPR);
2181 /* For pointer types, we are really only interested in asserting
2182 whether the expression evaluates to non-NULL. */
2183 if (range_is_nonnull (&vr0) || range_is_nonnull (&vr1))
2184 set_value_range_to_nonnull (vr, expr_type);
2185 else if (range_is_null (&vr0) && range_is_null (&vr1))
2186 set_value_range_to_null (vr, expr_type);
2187 else
2188 set_value_range_to_varying (vr);
2189
2190 return;
2191 }
2192
2193 /* For integer ranges, apply the operation to each end of the
2194 range and see what we end up with. */
2195 if (code == TRUTH_AND_EXPR
2196 || code == TRUTH_OR_EXPR)
2197 {
2198 /* If one of the operands is zero, we know that the whole
2199 expression evaluates zero. */
2200 if (code == TRUTH_AND_EXPR
2201 && ((vr0.type == VR_RANGE
2202 && integer_zerop (vr0.min)
2203 && integer_zerop (vr0.max))
2204 || (vr1.type == VR_RANGE
2205 && integer_zerop (vr1.min)
2206 && integer_zerop (vr1.max))))
2207 {
2208 type = VR_RANGE;
2209 min = max = build_int_cst (expr_type, 0);
2210 }
2211 /* If one of the operands is one, we know that the whole
2212 expression evaluates one. */
2213 else if (code == TRUTH_OR_EXPR
2214 && ((vr0.type == VR_RANGE
2215 && integer_onep (vr0.min)
2216 && integer_onep (vr0.max))
2217 || (vr1.type == VR_RANGE
2218 && integer_onep (vr1.min)
2219 && integer_onep (vr1.max))))
2220 {
2221 type = VR_RANGE;
2222 min = max = build_int_cst (expr_type, 1);
2223 }
2224 else if (vr0.type != VR_VARYING
2225 && vr1.type != VR_VARYING
2226 && vr0.type == vr1.type
2227 && !symbolic_range_p (&vr0)
2228 && !overflow_infinity_range_p (&vr0)
2229 && !symbolic_range_p (&vr1)
2230 && !overflow_infinity_range_p (&vr1))
2231 {
2232 /* Boolean expressions cannot be folded with int_const_binop. */
2233 min = fold_binary (code, expr_type, vr0.min, vr1.min);
2234 max = fold_binary (code, expr_type, vr0.max, vr1.max);
2235 }
2236 else
2237 {
2238 /* The result of a TRUTH_*_EXPR is always true or false. */
2239 set_value_range_to_truthvalue (vr, expr_type);
2240 return;
2241 }
2242 }
2243 else if (code == PLUS_EXPR
2244 || code == MIN_EXPR
2245 || code == MAX_EXPR)
2246 {
2247 /* If we have a PLUS_EXPR with two VR_ANTI_RANGEs, drop to
2248 VR_VARYING. It would take more effort to compute a precise
2249 range for such a case. For example, if we have op0 == 1 and
2250 op1 == -1 with their ranges both being ~[0,0], we would have
2251 op0 + op1 == 0, so we cannot claim that the sum is in ~[0,0].
2252 Note that we are guaranteed to have vr0.type == vr1.type at
2253 this point. */
2254 if (code == PLUS_EXPR && vr0.type == VR_ANTI_RANGE)
2255 {
2256 set_value_range_to_varying (vr);
2257 return;
2258 }
2259
2260 /* For operations that make the resulting range directly
2261 proportional to the original ranges, apply the operation to
2262 the same end of each range. */
2263 min = vrp_int_const_binop (code, vr0.min, vr1.min);
2264 max = vrp_int_const_binop (code, vr0.max, vr1.max);
2265
2266 /* If both additions overflowed the range kind is still correct.
2267 This happens regularly with subtracting something in unsigned
2268 arithmetic.
2269 ??? See PR30318 for all the cases we do not handle. */
2270 if (code == PLUS_EXPR
2271 && (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
2272 && (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
2273 {
2274 min = build_int_cst_wide (TREE_TYPE (min),
2275 TREE_INT_CST_LOW (min),
2276 TREE_INT_CST_HIGH (min));
2277 max = build_int_cst_wide (TREE_TYPE (max),
2278 TREE_INT_CST_LOW (max),
2279 TREE_INT_CST_HIGH (max));
2280 }
2281 }
2282 else if (code == MULT_EXPR
2283 || code == TRUNC_DIV_EXPR
2284 || code == FLOOR_DIV_EXPR
2285 || code == CEIL_DIV_EXPR
2286 || code == EXACT_DIV_EXPR
2287 || code == ROUND_DIV_EXPR
2288 || code == RSHIFT_EXPR)
2289 {
2290 tree val[4];
2291 size_t i;
2292 bool sop;
2293
2294 /* If we have an unsigned MULT_EXPR with two VR_ANTI_RANGEs,
2295 drop to VR_VARYING. It would take more effort to compute a
2296 precise range for such a case. For example, if we have
2297 op0 == 65536 and op1 == 65536 with their ranges both being
2298 ~[0,0] on a 32-bit machine, we would have op0 * op1 == 0, so
2299 we cannot claim that the product is in ~[0,0]. Note that we
2300 are guaranteed to have vr0.type == vr1.type at this
2301 point. */
2302 if (code == MULT_EXPR
2303 && vr0.type == VR_ANTI_RANGE
2304 && !TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (op0)))
2305 {
2306 set_value_range_to_varying (vr);
2307 return;
2308 }
2309
2310 /* If we have a RSHIFT_EXPR with any shift values outside [0..prec-1],
2311 then drop to VR_VARYING. Outside of this range we get undefined
2312 behavior from the shift operation. We cannot even trust
2313 SHIFT_COUNT_TRUNCATED at this stage, because that applies to rtl
2314 shifts, and the operation at the tree level may be widened. */
2315 if (code == RSHIFT_EXPR)
2316 {
2317 if (vr1.type == VR_ANTI_RANGE
2318 || !vrp_expr_computes_nonnegative (op1, &sop)
2319 || (operand_less_p
2320 (build_int_cst (TREE_TYPE (vr1.max),
2321 TYPE_PRECISION (expr_type) - 1),
2322 vr1.max) != 0))
2323 {
2324 set_value_range_to_varying (vr);
2325 return;
2326 }
2327 }
2328
2329 else if ((code == TRUNC_DIV_EXPR
2330 || code == FLOOR_DIV_EXPR
2331 || code == CEIL_DIV_EXPR
2332 || code == EXACT_DIV_EXPR
2333 || code == ROUND_DIV_EXPR)
2334 && (vr0.type != VR_RANGE || symbolic_range_p (&vr0)))
2335 {
2336 /* For division, if op1 has VR_RANGE but op0 does not, something
2337 can be deduced just from that range. Say [min, max] / [4, max]
2338 gives [min / 4, max / 4] range. */
2339 if (vr1.type == VR_RANGE
2340 && !symbolic_range_p (&vr1)
2341 && !range_includes_zero_p (&vr1))
2342 {
2343 vr0.type = type = VR_RANGE;
2344 vr0.min = vrp_val_min (TREE_TYPE (op0));
2345 vr0.max = vrp_val_max (TREE_TYPE (op1));
2346 }
2347 else
2348 {
2349 set_value_range_to_varying (vr);
2350 return;
2351 }
2352 }
2353
2354 /* For divisions, if op0 is VR_RANGE, we can deduce a range
2355 even if op1 is VR_VARYING, VR_ANTI_RANGE, symbolic or can
2356 include 0. */
2357 if ((code == TRUNC_DIV_EXPR
2358 || code == FLOOR_DIV_EXPR
2359 || code == CEIL_DIV_EXPR
2360 || code == EXACT_DIV_EXPR
2361 || code == ROUND_DIV_EXPR)
2362 && vr0.type == VR_RANGE
2363 && (vr1.type != VR_RANGE
2364 || symbolic_range_p (&vr1)
2365 || range_includes_zero_p (&vr1)))
2366 {
2367 tree zero = build_int_cst (TREE_TYPE (vr0.min), 0);
2368 int cmp;
2369
2370 sop = false;
2371 min = NULL_TREE;
2372 max = NULL_TREE;
2373 if (vrp_expr_computes_nonnegative (op1, &sop) && !sop)
2374 {
2375 /* For unsigned division or when divisor is known
2376 to be non-negative, the range has to cover
2377 all numbers from 0 to max for positive max
2378 and all numbers from min to 0 for negative min. */
2379 cmp = compare_values (vr0.max, zero);
2380 if (cmp == -1)
2381 max = zero;
2382 else if (cmp == 0 || cmp == 1)
2383 max = vr0.max;
2384 else
2385 type = VR_VARYING;
2386 cmp = compare_values (vr0.min, zero);
2387 if (cmp == 1)
2388 min = zero;
2389 else if (cmp == 0 || cmp == -1)
2390 min = vr0.min;
2391 else
2392 type = VR_VARYING;
2393 }
2394 else
2395 {
2396 /* Otherwise the range is -max .. max or min .. -min
2397 depending on which bound is bigger in absolute value,
2398 as the division can change the sign. */
2399 abs_extent_range (vr, vr0.min, vr0.max);
2400 return;
2401 }
2402 if (type == VR_VARYING)
2403 {
2404 set_value_range_to_varying (vr);
2405 return;
2406 }
2407 }
2408
2409 /* Multiplications and divisions are a bit tricky to handle,
2410 depending on the mix of signs we have in the two ranges, we
2411 need to operate on different values to get the minimum and
2412 maximum values for the new range. One approach is to figure
2413 out all the variations of range combinations and do the
2414 operations.
2415
2416 However, this involves several calls to compare_values and it
2417 is pretty convoluted. It's simpler to do the 4 operations
2418 (MIN0 OP MIN1, MIN0 OP MAX1, MAX0 OP MIN1 and MAX0 OP MAX0 OP
2419 MAX1) and then figure the smallest and largest values to form
2420 the new range. */
2421 else
2422 {
2423 gcc_assert ((vr0.type == VR_RANGE
2424 || (code == MULT_EXPR && vr0.type == VR_ANTI_RANGE))
2425 && vr0.type == vr1.type);
2426
2427 /* Compute the 4 cross operations. */
2428 sop = false;
2429 val[0] = vrp_int_const_binop (code, vr0.min, vr1.min);
2430 if (val[0] == NULL_TREE)
2431 sop = true;
2432
2433 if (vr1.max == vr1.min)
2434 val[1] = NULL_TREE;
2435 else
2436 {
2437 val[1] = vrp_int_const_binop (code, vr0.min, vr1.max);
2438 if (val[1] == NULL_TREE)
2439 sop = true;
2440 }
2441
2442 if (vr0.max == vr0.min)
2443 val[2] = NULL_TREE;
2444 else
2445 {
2446 val[2] = vrp_int_const_binop (code, vr0.max, vr1.min);
2447 if (val[2] == NULL_TREE)
2448 sop = true;
2449 }
2450
2451 if (vr0.min == vr0.max || vr1.min == vr1.max)
2452 val[3] = NULL_TREE;
2453 else
2454 {
2455 val[3] = vrp_int_const_binop (code, vr0.max, vr1.max);
2456 if (val[3] == NULL_TREE)
2457 sop = true;
2458 }
2459
2460 if (sop)
2461 {
2462 set_value_range_to_varying (vr);
2463 return;
2464 }
2465
2466 /* Set MIN to the minimum of VAL[i] and MAX to the maximum
2467 of VAL[i]. */
2468 min = val[0];
2469 max = val[0];
2470 for (i = 1; i < 4; i++)
2471 {
2472 if (!is_gimple_min_invariant (min)
2473 || (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
2474 || !is_gimple_min_invariant (max)
2475 || (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
2476 break;
2477
2478 if (val[i])
2479 {
2480 if (!is_gimple_min_invariant (val[i])
2481 || (TREE_OVERFLOW (val[i])
2482 && !is_overflow_infinity (val[i])))
2483 {
2484 /* If we found an overflowed value, set MIN and MAX
2485 to it so that we set the resulting range to
2486 VARYING. */
2487 min = max = val[i];
2488 break;
2489 }
2490
2491 if (compare_values (val[i], min) == -1)
2492 min = val[i];
2493
2494 if (compare_values (val[i], max) == 1)
2495 max = val[i];
2496 }
2497 }
2498 }
2499 }
2500 else if (code == MINUS_EXPR)
2501 {
2502 /* If we have a MINUS_EXPR with two VR_ANTI_RANGEs, drop to
2503 VR_VARYING. It would take more effort to compute a precise
2504 range for such a case. For example, if we have op0 == 1 and
2505 op1 == 1 with their ranges both being ~[0,0], we would have
2506 op0 - op1 == 0, so we cannot claim that the difference is in
2507 ~[0,0]. Note that we are guaranteed to have
2508 vr0.type == vr1.type at this point. */
2509 if (vr0.type == VR_ANTI_RANGE)
2510 {
2511 set_value_range_to_varying (vr);
2512 return;
2513 }
2514
2515 /* For MINUS_EXPR, apply the operation to the opposite ends of
2516 each range. */
2517 min = vrp_int_const_binop (code, vr0.min, vr1.max);
2518 max = vrp_int_const_binop (code, vr0.max, vr1.min);
2519 }
2520 else if (code == BIT_AND_EXPR)
2521 {
2522 bool vr0_int_cst_singleton_p, vr1_int_cst_singleton_p;
2523
2524 vr0_int_cst_singleton_p = range_int_cst_singleton_p (&vr0);
2525 vr1_int_cst_singleton_p = range_int_cst_singleton_p (&vr1);
2526
2527 if (vr0_int_cst_singleton_p && vr1_int_cst_singleton_p)
2528 min = max = int_const_binop (code, vr0.max, vr1.max, 0);
2529 else if (vr0_int_cst_singleton_p
2530 && tree_int_cst_sgn (vr0.max) >= 0)
2531 {
2532 min = build_int_cst (expr_type, 0);
2533 max = vr0.max;
2534 }
2535 else if (vr1_int_cst_singleton_p
2536 && tree_int_cst_sgn (vr1.max) >= 0)
2537 {
2538 type = VR_RANGE;
2539 min = build_int_cst (expr_type, 0);
2540 max = vr1.max;
2541 }
2542 else
2543 {
2544 set_value_range_to_varying (vr);
2545 return;
2546 }
2547 }
2548 else if (code == BIT_IOR_EXPR)
2549 {
2550 if (range_int_cst_p (&vr0)
2551 && range_int_cst_p (&vr1)
2552 && tree_int_cst_sgn (vr0.min) >= 0
2553 && tree_int_cst_sgn (vr1.min) >= 0)
2554 {
2555 double_int vr0_max = tree_to_double_int (vr0.max);
2556 double_int vr1_max = tree_to_double_int (vr1.max);
2557 double_int ior_max;
2558
2559 /* Set all bits to the right of the most significant one to 1.
2560 For example, [0, 4] | [4, 4] = [4, 7]. */
2561 ior_max.low = vr0_max.low | vr1_max.low;
2562 ior_max.high = vr0_max.high | vr1_max.high;
2563 if (ior_max.high != 0)
2564 {
2565 ior_max.low = ~(unsigned HOST_WIDE_INT)0u;
2566 ior_max.high |= ((HOST_WIDE_INT) 1
2567 << floor_log2 (ior_max.high)) - 1;
2568 }
2569 else if (ior_max.low != 0)
2570 ior_max.low |= ((unsigned HOST_WIDE_INT) 1u
2571 << floor_log2 (ior_max.low)) - 1;
2572
2573 /* Both of these endpoints are conservative. */
2574 min = vrp_int_const_binop (MAX_EXPR, vr0.min, vr1.min);
2575 max = double_int_to_tree (expr_type, ior_max);
2576 }
2577 else
2578 {
2579 set_value_range_to_varying (vr);
2580 return;
2581 }
2582 }
2583 else
2584 gcc_unreachable ();
2585
2586 /* If either MIN or MAX overflowed, then set the resulting range to
2587 VARYING. But we do accept an overflow infinity
2588 representation. */
2589 if (min == NULL_TREE
2590 || !is_gimple_min_invariant (min)
2591 || (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
2592 || max == NULL_TREE
2593 || !is_gimple_min_invariant (max)
2594 || (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
2595 {
2596 set_value_range_to_varying (vr);
2597 return;
2598 }
2599
2600 /* We punt if:
2601 1) [-INF, +INF]
2602 2) [-INF, +-INF(OVF)]
2603 3) [+-INF(OVF), +INF]
2604 4) [+-INF(OVF), +-INF(OVF)]
2605 We learn nothing when we have INF and INF(OVF) on both sides.
2606 Note that we do accept [-INF, -INF] and [+INF, +INF] without
2607 overflow. */
2608 if ((vrp_val_is_min (min) || is_overflow_infinity (min))
2609 && (vrp_val_is_max (max) || is_overflow_infinity (max)))
2610 {
2611 set_value_range_to_varying (vr);
2612 return;
2613 }
2614
2615 cmp = compare_values (min, max);
2616 if (cmp == -2 || cmp == 1)
2617 {
2618 /* If the new range has its limits swapped around (MIN > MAX),
2619 then the operation caused one of them to wrap around, mark
2620 the new range VARYING. */
2621 set_value_range_to_varying (vr);
2622 }
2623 else
2624 set_value_range (vr, type, min, max, NULL);
2625 }
2626
2627
2628 /* Extract range information from a unary expression EXPR based on
2629 the range of its operand and the expression code. */
2630
2631 static void
2632 extract_range_from_unary_expr (value_range_t *vr, enum tree_code code,
2633 tree type, tree op0)
2634 {
2635 tree min, max;
2636 int cmp;
2637 value_range_t vr0 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
2638
2639 /* Refuse to operate on certain unary expressions for which we
2640 cannot easily determine a resulting range. */
2641 if (code == FIX_TRUNC_EXPR
2642 || code == FLOAT_EXPR
2643 || code == BIT_NOT_EXPR
2644 || code == CONJ_EXPR)
2645 {
2646 /* We can still do constant propagation here. */
2647 if ((op0 = op_with_constant_singleton_value_range (op0)) != NULL_TREE)
2648 {
2649 tree tem = fold_unary (code, type, op0);
2650 if (tem
2651 && is_gimple_min_invariant (tem)
2652 && !is_overflow_infinity (tem))
2653 {
2654 set_value_range (vr, VR_RANGE, tem, tem, NULL);
2655 return;
2656 }
2657 }
2658 set_value_range_to_varying (vr);
2659 return;
2660 }
2661
2662 /* Get value ranges for the operand. For constant operands, create
2663 a new value range with the operand to simplify processing. */
2664 if (TREE_CODE (op0) == SSA_NAME)
2665 vr0 = *(get_value_range (op0));
2666 else if (is_gimple_min_invariant (op0))
2667 set_value_range_to_value (&vr0, op0, NULL);
2668 else
2669 set_value_range_to_varying (&vr0);
2670
2671 /* If VR0 is UNDEFINED, so is the result. */
2672 if (vr0.type == VR_UNDEFINED)
2673 {
2674 set_value_range_to_undefined (vr);
2675 return;
2676 }
2677
2678 /* Refuse to operate on symbolic ranges, or if neither operand is
2679 a pointer or integral type. */
2680 if ((!INTEGRAL_TYPE_P (TREE_TYPE (op0))
2681 && !POINTER_TYPE_P (TREE_TYPE (op0)))
2682 || (vr0.type != VR_VARYING
2683 && symbolic_range_p (&vr0)))
2684 {
2685 set_value_range_to_varying (vr);
2686 return;
2687 }
2688
2689 /* If the expression involves pointers, we are only interested in
2690 determining if it evaluates to NULL [0, 0] or non-NULL (~[0, 0]). */
2691 if (POINTER_TYPE_P (type) || POINTER_TYPE_P (TREE_TYPE (op0)))
2692 {
2693 bool sop;
2694
2695 sop = false;
2696 if (range_is_nonnull (&vr0)
2697 || (tree_unary_nonzero_warnv_p (code, type, op0, &sop)
2698 && !sop))
2699 set_value_range_to_nonnull (vr, type);
2700 else if (range_is_null (&vr0))
2701 set_value_range_to_null (vr, type);
2702 else
2703 set_value_range_to_varying (vr);
2704
2705 return;
2706 }
2707
2708 /* Handle unary expressions on integer ranges. */
2709 if (CONVERT_EXPR_CODE_P (code)
2710 && INTEGRAL_TYPE_P (type)
2711 && INTEGRAL_TYPE_P (TREE_TYPE (op0)))
2712 {
2713 tree inner_type = TREE_TYPE (op0);
2714 tree outer_type = type;
2715
2716 /* If VR0 is varying and we increase the type precision, assume
2717 a full range for the following transformation. */
2718 if (vr0.type == VR_VARYING
2719 && TYPE_PRECISION (inner_type) < TYPE_PRECISION (outer_type))
2720 {
2721 vr0.type = VR_RANGE;
2722 vr0.min = TYPE_MIN_VALUE (inner_type);
2723 vr0.max = TYPE_MAX_VALUE (inner_type);
2724 }
2725
2726 /* If VR0 is a constant range or anti-range and the conversion is
2727 not truncating we can convert the min and max values and
2728 canonicalize the resulting range. Otherwise we can do the
2729 conversion if the size of the range is less than what the
2730 precision of the target type can represent and the range is
2731 not an anti-range. */
2732 if ((vr0.type == VR_RANGE
2733 || vr0.type == VR_ANTI_RANGE)
2734 && TREE_CODE (vr0.min) == INTEGER_CST
2735 && TREE_CODE (vr0.max) == INTEGER_CST
2736 && (!is_overflow_infinity (vr0.min)
2737 || (vr0.type == VR_RANGE
2738 && TYPE_PRECISION (outer_type) > TYPE_PRECISION (inner_type)
2739 && needs_overflow_infinity (outer_type)
2740 && supports_overflow_infinity (outer_type)))
2741 && (!is_overflow_infinity (vr0.max)
2742 || (vr0.type == VR_RANGE
2743 && TYPE_PRECISION (outer_type) > TYPE_PRECISION (inner_type)
2744 && needs_overflow_infinity (outer_type)
2745 && supports_overflow_infinity (outer_type)))
2746 && (TYPE_PRECISION (outer_type) >= TYPE_PRECISION (inner_type)
2747 || (vr0.type == VR_RANGE
2748 && integer_zerop (int_const_binop (RSHIFT_EXPR,
2749 int_const_binop (MINUS_EXPR, vr0.max, vr0.min, 0),
2750 size_int (TYPE_PRECISION (outer_type)), 0)))))
2751 {
2752 tree new_min, new_max;
2753 new_min = force_fit_type_double (outer_type,
2754 TREE_INT_CST_LOW (vr0.min),
2755 TREE_INT_CST_HIGH (vr0.min), 0, 0);
2756 new_max = force_fit_type_double (outer_type,
2757 TREE_INT_CST_LOW (vr0.max),
2758 TREE_INT_CST_HIGH (vr0.max), 0, 0);
2759 if (is_overflow_infinity (vr0.min))
2760 new_min = negative_overflow_infinity (outer_type);
2761 if (is_overflow_infinity (vr0.max))
2762 new_max = positive_overflow_infinity (outer_type);
2763 set_and_canonicalize_value_range (vr, vr0.type,
2764 new_min, new_max, NULL);
2765 return;
2766 }
2767
2768 set_value_range_to_varying (vr);
2769 return;
2770 }
2771
2772 /* Conversion of a VR_VARYING value to a wider type can result
2773 in a usable range. So wait until after we've handled conversions
2774 before dropping the result to VR_VARYING if we had a source
2775 operand that is VR_VARYING. */
2776 if (vr0.type == VR_VARYING)
2777 {
2778 set_value_range_to_varying (vr);
2779 return;
2780 }
2781
2782 /* Apply the operation to each end of the range and see what we end
2783 up with. */
2784 if (code == NEGATE_EXPR
2785 && !TYPE_UNSIGNED (type))
2786 {
2787 /* NEGATE_EXPR flips the range around. We need to treat
2788 TYPE_MIN_VALUE specially. */
2789 if (is_positive_overflow_infinity (vr0.max))
2790 min = negative_overflow_infinity (type);
2791 else if (is_negative_overflow_infinity (vr0.max))
2792 min = positive_overflow_infinity (type);
2793 else if (!vrp_val_is_min (vr0.max))
2794 min = fold_unary_to_constant (code, type, vr0.max);
2795 else if (needs_overflow_infinity (type))
2796 {
2797 if (supports_overflow_infinity (type)
2798 && !is_overflow_infinity (vr0.min)
2799 && !vrp_val_is_min (vr0.min))
2800 min = positive_overflow_infinity (type);
2801 else
2802 {
2803 set_value_range_to_varying (vr);
2804 return;
2805 }
2806 }
2807 else
2808 min = TYPE_MIN_VALUE (type);
2809
2810 if (is_positive_overflow_infinity (vr0.min))
2811 max = negative_overflow_infinity (type);
2812 else if (is_negative_overflow_infinity (vr0.min))
2813 max = positive_overflow_infinity (type);
2814 else if (!vrp_val_is_min (vr0.min))
2815 max = fold_unary_to_constant (code, type, vr0.min);
2816 else if (needs_overflow_infinity (type))
2817 {
2818 if (supports_overflow_infinity (type))
2819 max = positive_overflow_infinity (type);
2820 else
2821 {
2822 set_value_range_to_varying (vr);
2823 return;
2824 }
2825 }
2826 else
2827 max = TYPE_MIN_VALUE (type);
2828 }
2829 else if (code == NEGATE_EXPR
2830 && TYPE_UNSIGNED (type))
2831 {
2832 if (!range_includes_zero_p (&vr0))
2833 {
2834 max = fold_unary_to_constant (code, type, vr0.min);
2835 min = fold_unary_to_constant (code, type, vr0.max);
2836 }
2837 else
2838 {
2839 if (range_is_null (&vr0))
2840 set_value_range_to_null (vr, type);
2841 else
2842 set_value_range_to_varying (vr);
2843 return;
2844 }
2845 }
2846 else if (code == ABS_EXPR
2847 && !TYPE_UNSIGNED (type))
2848 {
2849 /* -TYPE_MIN_VALUE = TYPE_MIN_VALUE with flag_wrapv so we can't get a
2850 useful range. */
2851 if (!TYPE_OVERFLOW_UNDEFINED (type)
2852 && ((vr0.type == VR_RANGE
2853 && vrp_val_is_min (vr0.min))
2854 || (vr0.type == VR_ANTI_RANGE
2855 && !vrp_val_is_min (vr0.min)
2856 && !range_includes_zero_p (&vr0))))
2857 {
2858 set_value_range_to_varying (vr);
2859 return;
2860 }
2861
2862 /* ABS_EXPR may flip the range around, if the original range
2863 included negative values. */
2864 if (is_overflow_infinity (vr0.min))
2865 min = positive_overflow_infinity (type);
2866 else if (!vrp_val_is_min (vr0.min))
2867 min = fold_unary_to_constant (code, type, vr0.min);
2868 else if (!needs_overflow_infinity (type))
2869 min = TYPE_MAX_VALUE (type);
2870 else if (supports_overflow_infinity (type))
2871 min = positive_overflow_infinity (type);
2872 else
2873 {
2874 set_value_range_to_varying (vr);
2875 return;
2876 }
2877
2878 if (is_overflow_infinity (vr0.max))
2879 max = positive_overflow_infinity (type);
2880 else if (!vrp_val_is_min (vr0.max))
2881 max = fold_unary_to_constant (code, type, vr0.max);
2882 else if (!needs_overflow_infinity (type))
2883 max = TYPE_MAX_VALUE (type);
2884 else if (supports_overflow_infinity (type)
2885 /* We shouldn't generate [+INF, +INF] as set_value_range
2886 doesn't like this and ICEs. */
2887 && !is_positive_overflow_infinity (min))
2888 max = positive_overflow_infinity (type);
2889 else
2890 {
2891 set_value_range_to_varying (vr);
2892 return;
2893 }
2894
2895 cmp = compare_values (min, max);
2896
2897 /* If a VR_ANTI_RANGEs contains zero, then we have
2898 ~[-INF, min(MIN, MAX)]. */
2899 if (vr0.type == VR_ANTI_RANGE)
2900 {
2901 if (range_includes_zero_p (&vr0))
2902 {
2903 /* Take the lower of the two values. */
2904 if (cmp != 1)
2905 max = min;
2906
2907 /* Create ~[-INF, min (abs(MIN), abs(MAX))]
2908 or ~[-INF + 1, min (abs(MIN), abs(MAX))] when
2909 flag_wrapv is set and the original anti-range doesn't include
2910 TYPE_MIN_VALUE, remember -TYPE_MIN_VALUE = TYPE_MIN_VALUE. */
2911 if (TYPE_OVERFLOW_WRAPS (type))
2912 {
2913 tree type_min_value = TYPE_MIN_VALUE (type);
2914
2915 min = (vr0.min != type_min_value
2916 ? int_const_binop (PLUS_EXPR, type_min_value,
2917 integer_one_node, 0)
2918 : type_min_value);
2919 }
2920 else
2921 {
2922 if (overflow_infinity_range_p (&vr0))
2923 min = negative_overflow_infinity (type);
2924 else
2925 min = TYPE_MIN_VALUE (type);
2926 }
2927 }
2928 else
2929 {
2930 /* All else has failed, so create the range [0, INF], even for
2931 flag_wrapv since TYPE_MIN_VALUE is in the original
2932 anti-range. */
2933 vr0.type = VR_RANGE;
2934 min = build_int_cst (type, 0);
2935 if (needs_overflow_infinity (type))
2936 {
2937 if (supports_overflow_infinity (type))
2938 max = positive_overflow_infinity (type);
2939 else
2940 {
2941 set_value_range_to_varying (vr);
2942 return;
2943 }
2944 }
2945 else
2946 max = TYPE_MAX_VALUE (type);
2947 }
2948 }
2949
2950 /* If the range contains zero then we know that the minimum value in the
2951 range will be zero. */
2952 else if (range_includes_zero_p (&vr0))
2953 {
2954 if (cmp == 1)
2955 max = min;
2956 min = build_int_cst (type, 0);
2957 }
2958 else
2959 {
2960 /* If the range was reversed, swap MIN and MAX. */
2961 if (cmp == 1)
2962 {
2963 tree t = min;
2964 min = max;
2965 max = t;
2966 }
2967 }
2968 }
2969 else
2970 {
2971 /* Otherwise, operate on each end of the range. */
2972 min = fold_unary_to_constant (code, type, vr0.min);
2973 max = fold_unary_to_constant (code, type, vr0.max);
2974
2975 if (needs_overflow_infinity (type))
2976 {
2977 gcc_assert (code != NEGATE_EXPR && code != ABS_EXPR);
2978
2979 /* If both sides have overflowed, we don't know
2980 anything. */
2981 if ((is_overflow_infinity (vr0.min)
2982 || TREE_OVERFLOW (min))
2983 && (is_overflow_infinity (vr0.max)
2984 || TREE_OVERFLOW (max)))
2985 {
2986 set_value_range_to_varying (vr);
2987 return;
2988 }
2989
2990 if (is_overflow_infinity (vr0.min))
2991 min = vr0.min;
2992 else if (TREE_OVERFLOW (min))
2993 {
2994 if (supports_overflow_infinity (type))
2995 min = (tree_int_cst_sgn (min) >= 0
2996 ? positive_overflow_infinity (TREE_TYPE (min))
2997 : negative_overflow_infinity (TREE_TYPE (min)));
2998 else
2999 {
3000 set_value_range_to_varying (vr);
3001 return;
3002 }
3003 }
3004
3005 if (is_overflow_infinity (vr0.max))
3006 max = vr0.max;
3007 else if (TREE_OVERFLOW (max))
3008 {
3009 if (supports_overflow_infinity (type))
3010 max = (tree_int_cst_sgn (max) >= 0
3011 ? positive_overflow_infinity (TREE_TYPE (max))
3012 : negative_overflow_infinity (TREE_TYPE (max)));
3013 else
3014 {
3015 set_value_range_to_varying (vr);
3016 return;
3017 }
3018 }
3019 }
3020 }
3021
3022 cmp = compare_values (min, max);
3023 if (cmp == -2 || cmp == 1)
3024 {
3025 /* If the new range has its limits swapped around (MIN > MAX),
3026 then the operation caused one of them to wrap around, mark
3027 the new range VARYING. */
3028 set_value_range_to_varying (vr);
3029 }
3030 else
3031 set_value_range (vr, vr0.type, min, max, NULL);
3032 }
3033
3034
3035 /* Extract range information from a conditional expression EXPR based on
3036 the ranges of each of its operands and the expression code. */
3037
3038 static void
3039 extract_range_from_cond_expr (value_range_t *vr, tree expr)
3040 {
3041 tree op0, op1;
3042 value_range_t vr0 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
3043 value_range_t vr1 = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
3044
3045 /* Get value ranges for each operand. For constant operands, create
3046 a new value range with the operand to simplify processing. */
3047 op0 = COND_EXPR_THEN (expr);
3048 if (TREE_CODE (op0) == SSA_NAME)
3049 vr0 = *(get_value_range (op0));
3050 else if (is_gimple_min_invariant (op0))
3051 set_value_range_to_value (&vr0, op0, NULL);
3052 else
3053 set_value_range_to_varying (&vr0);
3054
3055 op1 = COND_EXPR_ELSE (expr);
3056 if (TREE_CODE (op1) == SSA_NAME)
3057 vr1 = *(get_value_range (op1));
3058 else if (is_gimple_min_invariant (op1))
3059 set_value_range_to_value (&vr1, op1, NULL);
3060 else
3061 set_value_range_to_varying (&vr1);
3062
3063 /* The resulting value range is the union of the operand ranges */
3064 vrp_meet (&vr0, &vr1);
3065 copy_value_range (vr, &vr0);
3066 }
3067
3068
3069 /* Extract range information from a comparison expression EXPR based
3070 on the range of its operand and the expression code. */
3071
3072 static void
3073 extract_range_from_comparison (value_range_t *vr, enum tree_code code,
3074 tree type, tree op0, tree op1)
3075 {
3076 bool sop = false;
3077 tree val;
3078
3079 val = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, false, &sop,
3080 NULL);
3081
3082 /* A disadvantage of using a special infinity as an overflow
3083 representation is that we lose the ability to record overflow
3084 when we don't have an infinity. So we have to ignore a result
3085 which relies on overflow. */
3086
3087 if (val && !is_overflow_infinity (val) && !sop)
3088 {
3089 /* Since this expression was found on the RHS of an assignment,
3090 its type may be different from _Bool. Convert VAL to EXPR's
3091 type. */
3092 val = fold_convert (type, val);
3093 if (is_gimple_min_invariant (val))
3094 set_value_range_to_value (vr, val, vr->equiv);
3095 else
3096 set_value_range (vr, VR_RANGE, val, val, vr->equiv);
3097 }
3098 else
3099 /* The result of a comparison is always true or false. */
3100 set_value_range_to_truthvalue (vr, type);
3101 }
3102
3103 /* Try to derive a nonnegative or nonzero range out of STMT relying
3104 primarily on generic routines in fold in conjunction with range data.
3105 Store the result in *VR */
3106
3107 static void
3108 extract_range_basic (value_range_t *vr, gimple stmt)
3109 {
3110 bool sop = false;
3111 tree type = gimple_expr_type (stmt);
3112
3113 if (INTEGRAL_TYPE_P (type)
3114 && gimple_stmt_nonnegative_warnv_p (stmt, &sop))
3115 set_value_range_to_nonnegative (vr, type,
3116 sop || stmt_overflow_infinity (stmt));
3117 else if (vrp_stmt_computes_nonzero (stmt, &sop)
3118 && !sop)
3119 set_value_range_to_nonnull (vr, type);
3120 else
3121 set_value_range_to_varying (vr);
3122 }
3123
3124
3125 /* Try to compute a useful range out of assignment STMT and store it
3126 in *VR. */
3127
3128 static void
3129 extract_range_from_assignment (value_range_t *vr, gimple stmt)
3130 {
3131 enum tree_code code = gimple_assign_rhs_code (stmt);
3132
3133 if (code == ASSERT_EXPR)
3134 extract_range_from_assert (vr, gimple_assign_rhs1 (stmt));
3135 else if (code == SSA_NAME)
3136 extract_range_from_ssa_name (vr, gimple_assign_rhs1 (stmt));
3137 else if (TREE_CODE_CLASS (code) == tcc_binary
3138 || code == TRUTH_AND_EXPR
3139 || code == TRUTH_OR_EXPR
3140 || code == TRUTH_XOR_EXPR)
3141 extract_range_from_binary_expr (vr, gimple_assign_rhs_code (stmt),
3142 gimple_expr_type (stmt),
3143 gimple_assign_rhs1 (stmt),
3144 gimple_assign_rhs2 (stmt));
3145 else if (TREE_CODE_CLASS (code) == tcc_unary)
3146 extract_range_from_unary_expr (vr, gimple_assign_rhs_code (stmt),
3147 gimple_expr_type (stmt),
3148 gimple_assign_rhs1 (stmt));
3149 else if (code == COND_EXPR)
3150 extract_range_from_cond_expr (vr, gimple_assign_rhs1 (stmt));
3151 else if (TREE_CODE_CLASS (code) == tcc_comparison)
3152 extract_range_from_comparison (vr, gimple_assign_rhs_code (stmt),
3153 gimple_expr_type (stmt),
3154 gimple_assign_rhs1 (stmt),
3155 gimple_assign_rhs2 (stmt));
3156 else if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS
3157 && is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
3158 set_value_range_to_value (vr, gimple_assign_rhs1 (stmt), NULL);
3159 else
3160 set_value_range_to_varying (vr);
3161
3162 if (vr->type == VR_VARYING)
3163 extract_range_basic (vr, stmt);
3164 }
3165
3166 /* Given a range VR, a LOOP and a variable VAR, determine whether it
3167 would be profitable to adjust VR using scalar evolution information
3168 for VAR. If so, update VR with the new limits. */
3169
3170 static void
3171 adjust_range_with_scev (value_range_t *vr, struct loop *loop,
3172 gimple stmt, tree var)
3173 {
3174 tree init, step, chrec, tmin, tmax, min, max, type, tem;
3175 enum ev_direction dir;
3176
3177 /* TODO. Don't adjust anti-ranges. An anti-range may provide
3178 better opportunities than a regular range, but I'm not sure. */
3179 if (vr->type == VR_ANTI_RANGE)
3180 return;
3181
3182 chrec = instantiate_parameters (loop, analyze_scalar_evolution (loop, var));
3183
3184 /* Like in PR19590, scev can return a constant function. */
3185 if (is_gimple_min_invariant (chrec))
3186 {
3187 set_value_range_to_value (vr, chrec, vr->equiv);
3188 return;
3189 }
3190
3191 if (TREE_CODE (chrec) != POLYNOMIAL_CHREC)
3192 return;
3193
3194 init = initial_condition_in_loop_num (chrec, loop->num);
3195 tem = op_with_constant_singleton_value_range (init);
3196 if (tem)
3197 init = tem;
3198 step = evolution_part_in_loop_num (chrec, loop->num);
3199 tem = op_with_constant_singleton_value_range (step);
3200 if (tem)
3201 step = tem;
3202
3203 /* If STEP is symbolic, we can't know whether INIT will be the
3204 minimum or maximum value in the range. Also, unless INIT is
3205 a simple expression, compare_values and possibly other functions
3206 in tree-vrp won't be able to handle it. */
3207 if (step == NULL_TREE
3208 || !is_gimple_min_invariant (step)
3209 || !valid_value_p (init))
3210 return;
3211
3212 dir = scev_direction (chrec);
3213 if (/* Do not adjust ranges if we do not know whether the iv increases
3214 or decreases, ... */
3215 dir == EV_DIR_UNKNOWN
3216 /* ... or if it may wrap. */
3217 || scev_probably_wraps_p (init, step, stmt, get_chrec_loop (chrec),
3218 true))
3219 return;
3220
3221 /* We use TYPE_MIN_VALUE and TYPE_MAX_VALUE here instead of
3222 negative_overflow_infinity and positive_overflow_infinity,
3223 because we have concluded that the loop probably does not
3224 wrap. */
3225
3226 type = TREE_TYPE (var);
3227 if (POINTER_TYPE_P (type) || !TYPE_MIN_VALUE (type))
3228 tmin = lower_bound_in_type (type, type);
3229 else
3230 tmin = TYPE_MIN_VALUE (type);
3231 if (POINTER_TYPE_P (type) || !TYPE_MAX_VALUE (type))
3232 tmax = upper_bound_in_type (type, type);
3233 else
3234 tmax = TYPE_MAX_VALUE (type);
3235
3236 if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED)
3237 {
3238 min = tmin;
3239 max = tmax;
3240
3241 /* For VARYING or UNDEFINED ranges, just about anything we get
3242 from scalar evolutions should be better. */
3243
3244 if (dir == EV_DIR_DECREASES)
3245 max = init;
3246 else
3247 min = init;
3248
3249 /* If we would create an invalid range, then just assume we
3250 know absolutely nothing. This may be over-conservative,
3251 but it's clearly safe, and should happen only in unreachable
3252 parts of code, or for invalid programs. */
3253 if (compare_values (min, max) == 1)
3254 return;
3255
3256 set_value_range (vr, VR_RANGE, min, max, vr->equiv);
3257 }
3258 else if (vr->type == VR_RANGE)
3259 {
3260 min = vr->min;
3261 max = vr->max;
3262
3263 if (dir == EV_DIR_DECREASES)
3264 {
3265 /* INIT is the maximum value. If INIT is lower than VR->MAX
3266 but no smaller than VR->MIN, set VR->MAX to INIT. */
3267 if (compare_values (init, max) == -1)
3268 {
3269 max = init;
3270
3271 /* If we just created an invalid range with the minimum
3272 greater than the maximum, we fail conservatively.
3273 This should happen only in unreachable
3274 parts of code, or for invalid programs. */
3275 if (compare_values (min, max) == 1)
3276 return;
3277 }
3278
3279 /* According to the loop information, the variable does not
3280 overflow. If we think it does, probably because of an
3281 overflow due to arithmetic on a different INF value,
3282 reset now. */
3283 if (is_negative_overflow_infinity (min))
3284 min = tmin;
3285 }
3286 else
3287 {
3288 /* If INIT is bigger than VR->MIN, set VR->MIN to INIT. */
3289 if (compare_values (init, min) == 1)
3290 {
3291 min = init;
3292
3293 /* Again, avoid creating invalid range by failing. */
3294 if (compare_values (min, max) == 1)
3295 return;
3296 }
3297
3298 if (is_positive_overflow_infinity (max))
3299 max = tmax;
3300 }
3301
3302 set_value_range (vr, VR_RANGE, min, max, vr->equiv);
3303 }
3304 }
3305
3306 /* Return true if VAR may overflow at STMT. This checks any available
3307 loop information to see if we can determine that VAR does not
3308 overflow. */
3309
3310 static bool
3311 vrp_var_may_overflow (tree var, gimple stmt)
3312 {
3313 struct loop *l;
3314 tree chrec, init, step;
3315
3316 if (current_loops == NULL)
3317 return true;
3318
3319 l = loop_containing_stmt (stmt);
3320 if (l == NULL
3321 || !loop_outer (l))
3322 return true;
3323
3324 chrec = instantiate_parameters (l, analyze_scalar_evolution (l, var));
3325 if (TREE_CODE (chrec) != POLYNOMIAL_CHREC)
3326 return true;
3327
3328 init = initial_condition_in_loop_num (chrec, l->num);
3329 step = evolution_part_in_loop_num (chrec, l->num);
3330
3331 if (step == NULL_TREE
3332 || !is_gimple_min_invariant (step)
3333 || !valid_value_p (init))
3334 return true;
3335
3336 /* If we get here, we know something useful about VAR based on the
3337 loop information. If it wraps, it may overflow. */
3338
3339 if (scev_probably_wraps_p (init, step, stmt, get_chrec_loop (chrec),
3340 true))
3341 return true;
3342
3343 if (dump_file && (dump_flags & TDF_DETAILS) != 0)
3344 {
3345 print_generic_expr (dump_file, var, 0);
3346 fprintf (dump_file, ": loop information indicates does not overflow\n");
3347 }
3348
3349 return false;
3350 }
3351
3352
3353 /* Given two numeric value ranges VR0, VR1 and a comparison code COMP:
3354
3355 - Return BOOLEAN_TRUE_NODE if VR0 COMP VR1 always returns true for
3356 all the values in the ranges.
3357
3358 - Return BOOLEAN_FALSE_NODE if the comparison always returns false.
3359
3360 - Return NULL_TREE if it is not always possible to determine the
3361 value of the comparison.
3362
3363 Also set *STRICT_OVERFLOW_P to indicate whether a range with an
3364 overflow infinity was used in the test. */
3365
3366
3367 static tree
3368 compare_ranges (enum tree_code comp, value_range_t *vr0, value_range_t *vr1,
3369 bool *strict_overflow_p)
3370 {
3371 /* VARYING or UNDEFINED ranges cannot be compared. */
3372 if (vr0->type == VR_VARYING
3373 || vr0->type == VR_UNDEFINED
3374 || vr1->type == VR_VARYING
3375 || vr1->type == VR_UNDEFINED)
3376 return NULL_TREE;
3377
3378 /* Anti-ranges need to be handled separately. */
3379 if (vr0->type == VR_ANTI_RANGE || vr1->type == VR_ANTI_RANGE)
3380 {
3381 /* If both are anti-ranges, then we cannot compute any
3382 comparison. */
3383 if (vr0->type == VR_ANTI_RANGE && vr1->type == VR_ANTI_RANGE)
3384 return NULL_TREE;
3385
3386 /* These comparisons are never statically computable. */
3387 if (comp == GT_EXPR
3388 || comp == GE_EXPR
3389 || comp == LT_EXPR
3390 || comp == LE_EXPR)
3391 return NULL_TREE;
3392
3393 /* Equality can be computed only between a range and an
3394 anti-range. ~[VAL1, VAL2] == [VAL1, VAL2] is always false. */
3395 if (vr0->type == VR_RANGE)
3396 {
3397 /* To simplify processing, make VR0 the anti-range. */
3398 value_range_t *tmp = vr0;
3399 vr0 = vr1;
3400 vr1 = tmp;
3401 }
3402
3403 gcc_assert (comp == NE_EXPR || comp == EQ_EXPR);
3404
3405 if (compare_values_warnv (vr0->min, vr1->min, strict_overflow_p) == 0
3406 && compare_values_warnv (vr0->max, vr1->max, strict_overflow_p) == 0)
3407 return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node;
3408
3409 return NULL_TREE;
3410 }
3411
3412 if (!usable_range_p (vr0, strict_overflow_p)
3413 || !usable_range_p (vr1, strict_overflow_p))
3414 return NULL_TREE;
3415
3416 /* Simplify processing. If COMP is GT_EXPR or GE_EXPR, switch the
3417 operands around and change the comparison code. */
3418 if (comp == GT_EXPR || comp == GE_EXPR)
3419 {
3420 value_range_t *tmp;
3421 comp = (comp == GT_EXPR) ? LT_EXPR : LE_EXPR;
3422 tmp = vr0;
3423 vr0 = vr1;
3424 vr1 = tmp;
3425 }
3426
3427 if (comp == EQ_EXPR)
3428 {
3429 /* Equality may only be computed if both ranges represent
3430 exactly one value. */
3431 if (compare_values_warnv (vr0->min, vr0->max, strict_overflow_p) == 0
3432 && compare_values_warnv (vr1->min, vr1->max, strict_overflow_p) == 0)
3433 {
3434 int cmp_min = compare_values_warnv (vr0->min, vr1->min,
3435 strict_overflow_p);
3436 int cmp_max = compare_values_warnv (vr0->max, vr1->max,
3437 strict_overflow_p);
3438 if (cmp_min == 0 && cmp_max == 0)
3439 return boolean_true_node;
3440 else if (cmp_min != -2 && cmp_max != -2)
3441 return boolean_false_node;
3442 }
3443 /* If [V0_MIN, V1_MAX] < [V1_MIN, V1_MAX] then V0 != V1. */
3444 else if (compare_values_warnv (vr0->min, vr1->max,
3445 strict_overflow_p) == 1
3446 || compare_values_warnv (vr1->min, vr0->max,
3447 strict_overflow_p) == 1)
3448 return boolean_false_node;
3449
3450 return NULL_TREE;
3451 }
3452 else if (comp == NE_EXPR)
3453 {
3454 int cmp1, cmp2;
3455
3456 /* If VR0 is completely to the left or completely to the right
3457 of VR1, they are always different. Notice that we need to
3458 make sure that both comparisons yield similar results to
3459 avoid comparing values that cannot be compared at
3460 compile-time. */
3461 cmp1 = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p);
3462 cmp2 = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p);
3463 if ((cmp1 == -1 && cmp2 == -1) || (cmp1 == 1 && cmp2 == 1))
3464 return boolean_true_node;
3465
3466 /* If VR0 and VR1 represent a single value and are identical,
3467 return false. */
3468 else if (compare_values_warnv (vr0->min, vr0->max,
3469 strict_overflow_p) == 0
3470 && compare_values_warnv (vr1->min, vr1->max,
3471 strict_overflow_p) == 0
3472 && compare_values_warnv (vr0->min, vr1->min,
3473 strict_overflow_p) == 0
3474 && compare_values_warnv (vr0->max, vr1->max,
3475 strict_overflow_p) == 0)
3476 return boolean_false_node;
3477
3478 /* Otherwise, they may or may not be different. */
3479 else
3480 return NULL_TREE;
3481 }
3482 else if (comp == LT_EXPR || comp == LE_EXPR)
3483 {
3484 int tst;
3485
3486 /* If VR0 is to the left of VR1, return true. */
3487 tst = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p);
3488 if ((comp == LT_EXPR && tst == -1)
3489 || (comp == LE_EXPR && (tst == -1 || tst == 0)))
3490 {
3491 if (overflow_infinity_range_p (vr0)
3492 || overflow_infinity_range_p (vr1))
3493 *strict_overflow_p = true;
3494 return boolean_true_node;
3495 }
3496
3497 /* If VR0 is to the right of VR1, return false. */
3498 tst = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p);
3499 if ((comp == LT_EXPR && (tst == 0 || tst == 1))
3500 || (comp == LE_EXPR && tst == 1))
3501 {
3502 if (overflow_infinity_range_p (vr0)
3503 || overflow_infinity_range_p (vr1))
3504 *strict_overflow_p = true;
3505 return boolean_false_node;
3506 }
3507
3508 /* Otherwise, we don't know. */
3509 return NULL_TREE;
3510 }
3511
3512 gcc_unreachable ();
3513 }
3514
3515
3516 /* Given a value range VR, a value VAL and a comparison code COMP, return
3517 BOOLEAN_TRUE_NODE if VR COMP VAL always returns true for all the
3518 values in VR. Return BOOLEAN_FALSE_NODE if the comparison
3519 always returns false. Return NULL_TREE if it is not always
3520 possible to determine the value of the comparison. Also set
3521 *STRICT_OVERFLOW_P to indicate whether a range with an overflow
3522 infinity was used in the test. */
3523
3524 static tree
3525 compare_range_with_value (enum tree_code comp, value_range_t *vr, tree val,
3526 bool *strict_overflow_p)
3527 {
3528 if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED)
3529 return NULL_TREE;
3530
3531 /* Anti-ranges need to be handled separately. */
3532 if (vr->type == VR_ANTI_RANGE)
3533 {
3534 /* For anti-ranges, the only predicates that we can compute at
3535 compile time are equality and inequality. */
3536 if (comp == GT_EXPR
3537 || comp == GE_EXPR
3538 || comp == LT_EXPR
3539 || comp == LE_EXPR)
3540 return NULL_TREE;
3541
3542 /* ~[VAL_1, VAL_2] OP VAL is known if VAL_1 <= VAL <= VAL_2. */
3543 if (value_inside_range (val, vr) == 1)
3544 return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node;
3545
3546 return NULL_TREE;
3547 }
3548
3549 if (!usable_range_p (vr, strict_overflow_p))
3550 return NULL_TREE;
3551
3552 if (comp == EQ_EXPR)
3553 {
3554 /* EQ_EXPR may only be computed if VR represents exactly
3555 one value. */
3556 if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0)
3557 {
3558 int cmp = compare_values_warnv (vr->min, val, strict_overflow_p);
3559 if (cmp == 0)
3560 return boolean_true_node;
3561 else if (cmp == -1 || cmp == 1 || cmp == 2)
3562 return boolean_false_node;
3563 }
3564 else if (compare_values_warnv (val, vr->min, strict_overflow_p) == -1
3565 || compare_values_warnv (vr->max, val, strict_overflow_p) == -1)
3566 return boolean_false_node;
3567
3568 return NULL_TREE;
3569 }
3570 else if (comp == NE_EXPR)
3571 {
3572 /* If VAL is not inside VR, then they are always different. */
3573 if (compare_values_warnv (vr->max, val, strict_overflow_p) == -1
3574 || compare_values_warnv (vr->min, val, strict_overflow_p) == 1)
3575 return boolean_true_node;
3576
3577 /* If VR represents exactly one value equal to VAL, then return
3578 false. */
3579 if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0
3580 && compare_values_warnv (vr->min, val, strict_overflow_p) == 0)
3581 return boolean_false_node;
3582
3583 /* Otherwise, they may or may not be different. */
3584 return NULL_TREE;
3585 }
3586 else if (comp == LT_EXPR || comp == LE_EXPR)
3587 {
3588 int tst;
3589
3590 /* If VR is to the left of VAL, return true. */
3591 tst = compare_values_warnv (vr->max, val, strict_overflow_p);
3592 if ((comp == LT_EXPR && tst == -1)
3593 || (comp == LE_EXPR && (tst == -1 || tst == 0)))
3594 {
3595 if (overflow_infinity_range_p (vr))
3596 *strict_overflow_p = true;
3597 return boolean_true_node;
3598 }
3599
3600 /* If VR is to the right of VAL, return false. */
3601 tst = compare_values_warnv (vr->min, val, strict_overflow_p);
3602 if ((comp == LT_EXPR && (tst == 0 || tst == 1))
3603 || (comp == LE_EXPR && tst == 1))
3604 {
3605 if (overflow_infinity_range_p (vr))
3606 *strict_overflow_p = true;
3607 return boolean_false_node;
3608 }
3609
3610 /* Otherwise, we don't know. */
3611 return NULL_TREE;
3612 }
3613 else if (comp == GT_EXPR || comp == GE_EXPR)
3614 {
3615 int tst;
3616
3617 /* If VR is to the right of VAL, return true. */
3618 tst = compare_values_warnv (vr->min, val, strict_overflow_p);
3619 if ((comp == GT_EXPR && tst == 1)
3620 || (comp == GE_EXPR && (tst == 0 || tst == 1)))
3621 {
3622 if (overflow_infinity_range_p (vr))
3623 *strict_overflow_p = true;
3624 return boolean_true_node;
3625 }
3626
3627 /* If VR is to the left of VAL, return false. */
3628 tst = compare_values_warnv (vr->max, val, strict_overflow_p);
3629 if ((comp == GT_EXPR && (tst == -1 || tst == 0))
3630 || (comp == GE_EXPR && tst == -1))
3631 {
3632 if (overflow_infinity_range_p (vr))
3633 *strict_overflow_p = true;
3634 return boolean_false_node;
3635 }
3636
3637 /* Otherwise, we don't know. */
3638 return NULL_TREE;
3639 }
3640
3641 gcc_unreachable ();
3642 }
3643
3644
3645 /* Debugging dumps. */
3646
3647 void dump_value_range (FILE *, value_range_t *);
3648 void debug_value_range (value_range_t *);
3649 void dump_all_value_ranges (FILE *);
3650 void debug_all_value_ranges (void);
3651 void dump_vr_equiv (FILE *, bitmap);
3652 void debug_vr_equiv (bitmap);
3653
3654
3655 /* Dump value range VR to FILE. */
3656
3657 void
3658 dump_value_range (FILE *file, value_range_t *vr)
3659 {
3660 if (vr == NULL)
3661 fprintf (file, "[]");
3662 else if (vr->type == VR_UNDEFINED)
3663 fprintf (file, "UNDEFINED");
3664 else if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
3665 {
3666 tree type = TREE_TYPE (vr->min);
3667
3668 fprintf (file, "%s[", (vr->type == VR_ANTI_RANGE) ? "~" : "");
3669
3670 if (is_negative_overflow_infinity (vr->min))
3671 fprintf (file, "-INF(OVF)");
3672 else if (INTEGRAL_TYPE_P (type)
3673 && !TYPE_UNSIGNED (type)
3674 && vrp_val_is_min (vr->min))
3675 fprintf (file, "-INF");
3676 else
3677 print_generic_expr (file, vr->min, 0);
3678
3679 fprintf (file, ", ");
3680
3681 if (is_positive_overflow_infinity (vr->max))
3682 fprintf (file, "+INF(OVF)");
3683 else if (INTEGRAL_TYPE_P (type)
3684 && vrp_val_is_max (vr->max))
3685 fprintf (file, "+INF");
3686 else
3687 print_generic_expr (file, vr->max, 0);
3688
3689 fprintf (file, "]");
3690
3691 if (vr->equiv)
3692 {
3693 bitmap_iterator bi;
3694 unsigned i, c = 0;
3695
3696 fprintf (file, " EQUIVALENCES: { ");
3697
3698 EXECUTE_IF_SET_IN_BITMAP (vr->equiv, 0, i, bi)
3699 {
3700 print_generic_expr (file, ssa_name (i), 0);
3701 fprintf (file, " ");
3702 c++;
3703 }
3704
3705 fprintf (file, "} (%u elements)", c);
3706 }
3707 }
3708 else if (vr->type == VR_VARYING)
3709 fprintf (file, "VARYING");
3710 else
3711 fprintf (file, "INVALID RANGE");
3712 }
3713
3714
3715 /* Dump value range VR to stderr. */
3716
3717 void
3718 debug_value_range (value_range_t *vr)
3719 {
3720 dump_value_range (stderr, vr);
3721 fprintf (stderr, "\n");
3722 }
3723
3724
3725 /* Dump value ranges of all SSA_NAMEs to FILE. */
3726
3727 void
3728 dump_all_value_ranges (FILE *file)
3729 {
3730 size_t i;
3731
3732 for (i = 0; i < num_ssa_names; i++)
3733 {
3734 if (vr_value[i])
3735 {
3736 print_generic_expr (file, ssa_name (i), 0);
3737 fprintf (file, ": ");
3738 dump_value_range (file, vr_value[i]);
3739 fprintf (file, "\n");
3740 }
3741 }
3742
3743 fprintf (file, "\n");
3744 }
3745
3746
3747 /* Dump all value ranges to stderr. */
3748
3749 void
3750 debug_all_value_ranges (void)
3751 {
3752 dump_all_value_ranges (stderr);
3753 }
3754
3755
3756 /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
3757 create a new SSA name N and return the assertion assignment
3758 'V = ASSERT_EXPR <V, V OP W>'. */
3759
3760 static gimple
3761 build_assert_expr_for (tree cond, tree v)
3762 {
3763 tree n;
3764 gimple assertion;
3765
3766 gcc_assert (TREE_CODE (v) == SSA_NAME);
3767 n = duplicate_ssa_name (v, NULL);
3768
3769 if (COMPARISON_CLASS_P (cond))
3770 {
3771 tree a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond);
3772 assertion = gimple_build_assign (n, a);
3773 }
3774 else if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
3775 {
3776 /* Given !V, build the assignment N = false. */
3777 tree op0 = TREE_OPERAND (cond, 0);
3778 gcc_assert (op0 == v);
3779 assertion = gimple_build_assign (n, boolean_false_node);
3780 }
3781 else if (TREE_CODE (cond) == SSA_NAME)
3782 {
3783 /* Given V, build the assignment N = true. */
3784 gcc_assert (v == cond);
3785 assertion = gimple_build_assign (n, boolean_true_node);
3786 }
3787 else
3788 gcc_unreachable ();
3789
3790 SSA_NAME_DEF_STMT (n) = assertion;
3791
3792 /* The new ASSERT_EXPR, creates a new SSA name that replaces the
3793 operand of the ASSERT_EXPR. Register the new name and the old one
3794 in the replacement table so that we can fix the SSA web after
3795 adding all the ASSERT_EXPRs. */
3796 register_new_name_mapping (n, v);
3797
3798 return assertion;
3799 }
3800
3801
3802 /* Return false if EXPR is a predicate expression involving floating
3803 point values. */
3804
3805 static inline bool
3806 fp_predicate (gimple stmt)
3807 {
3808 GIMPLE_CHECK (stmt, GIMPLE_COND);
3809
3810 return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)));
3811 }
3812
3813
3814 /* If the range of values taken by OP can be inferred after STMT executes,
3815 return the comparison code (COMP_CODE_P) and value (VAL_P) that
3816 describes the inferred range. Return true if a range could be
3817 inferred. */
3818
3819 static bool
3820 infer_value_range (gimple stmt, tree op, enum tree_code *comp_code_p, tree *val_p)
3821 {
3822 *val_p = NULL_TREE;
3823 *comp_code_p = ERROR_MARK;
3824
3825 /* Do not attempt to infer anything in names that flow through
3826 abnormal edges. */
3827 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op))
3828 return false;
3829
3830 /* Similarly, don't infer anything from statements that may throw
3831 exceptions. */
3832 if (stmt_could_throw_p (stmt))
3833 return false;
3834
3835 /* If STMT is the last statement of a basic block with no
3836 successors, there is no point inferring anything about any of its
3837 operands. We would not be able to find a proper insertion point
3838 for the assertion, anyway. */
3839 if (stmt_ends_bb_p (stmt) && EDGE_COUNT (gimple_bb (stmt)->succs) == 0)
3840 return false;
3841
3842 /* We can only assume that a pointer dereference will yield
3843 non-NULL if -fdelete-null-pointer-checks is enabled. */
3844 if (flag_delete_null_pointer_checks
3845 && POINTER_TYPE_P (TREE_TYPE (op))
3846 && gimple_code (stmt) != GIMPLE_ASM)
3847 {
3848 unsigned num_uses, num_loads, num_stores;
3849
3850 count_uses_and_derefs (op, stmt, &num_uses, &num_loads, &num_stores);
3851 if (num_loads + num_stores > 0)
3852 {
3853 *val_p = build_int_cst (TREE_TYPE (op), 0);
3854 *comp_code_p = NE_EXPR;
3855 return true;
3856 }
3857 }
3858
3859 return false;
3860 }
3861
3862
3863 void dump_asserts_for (FILE *, tree);
3864 void debug_asserts_for (tree);
3865 void dump_all_asserts (FILE *);
3866 void debug_all_asserts (void);
3867
3868 /* Dump all the registered assertions for NAME to FILE. */
3869
3870 void
3871 dump_asserts_for (FILE *file, tree name)
3872 {
3873 assert_locus_t loc;
3874
3875 fprintf (file, "Assertions to be inserted for ");
3876 print_generic_expr (file, name, 0);
3877 fprintf (file, "\n");
3878
3879 loc = asserts_for[SSA_NAME_VERSION (name)];
3880 while (loc)
3881 {
3882 fprintf (file, "\t");
3883 print_gimple_stmt (file, gsi_stmt (loc->si), 0, 0);
3884 fprintf (file, "\n\tBB #%d", loc->bb->index);
3885 if (loc->e)
3886 {
3887 fprintf (file, "\n\tEDGE %d->%d", loc->e->src->index,
3888 loc->e->dest->index);
3889 dump_edge_info (file, loc->e, 0);
3890 }
3891 fprintf (file, "\n\tPREDICATE: ");
3892 print_generic_expr (file, name, 0);
3893 fprintf (file, " %s ", tree_code_name[(int)loc->comp_code]);
3894 print_generic_expr (file, loc->val, 0);
3895 fprintf (file, "\n\n");
3896 loc = loc->next;
3897 }
3898
3899 fprintf (file, "\n");
3900 }
3901
3902
3903 /* Dump all the registered assertions for NAME to stderr. */
3904
3905 void
3906 debug_asserts_for (tree name)
3907 {
3908 dump_asserts_for (stderr, name);
3909 }
3910
3911
3912 /* Dump all the registered assertions for all the names to FILE. */
3913
3914 void
3915 dump_all_asserts (FILE *file)
3916 {
3917 unsigned i;
3918 bitmap_iterator bi;
3919
3920 fprintf (file, "\nASSERT_EXPRs to be inserted\n\n");
3921 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
3922 dump_asserts_for (file, ssa_name (i));
3923 fprintf (file, "\n");
3924 }
3925
3926
3927 /* Dump all the registered assertions for all the names to stderr. */
3928
3929 void
3930 debug_all_asserts (void)
3931 {
3932 dump_all_asserts (stderr);
3933 }
3934
3935
3936 /* If NAME doesn't have an ASSERT_EXPR registered for asserting
3937 'EXPR COMP_CODE VAL' at a location that dominates block BB or
3938 E->DEST, then register this location as a possible insertion point
3939 for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
3940
3941 BB, E and SI provide the exact insertion point for the new
3942 ASSERT_EXPR. If BB is NULL, then the ASSERT_EXPR is to be inserted
3943 on edge E. Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
3944 BB. If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
3945 must not be NULL. */
3946
3947 static void
3948 register_new_assert_for (tree name, tree expr,
3949 enum tree_code comp_code,
3950 tree val,
3951 basic_block bb,
3952 edge e,
3953 gimple_stmt_iterator si)
3954 {
3955 assert_locus_t n, loc, last_loc;
3956 basic_block dest_bb;
3957
3958 #if defined ENABLE_CHECKING
3959 gcc_assert (bb == NULL || e == NULL);
3960
3961 if (e == NULL)
3962 gcc_assert (gimple_code (gsi_stmt (si)) != GIMPLE_COND
3963 && gimple_code (gsi_stmt (si)) != GIMPLE_SWITCH);
3964 #endif
3965
3966 /* Never build an assert comparing against an integer constant with
3967 TREE_OVERFLOW set. This confuses our undefined overflow warning
3968 machinery. */
3969 if (TREE_CODE (val) == INTEGER_CST
3970 && TREE_OVERFLOW (val))
3971 val = build_int_cst_wide (TREE_TYPE (val),
3972 TREE_INT_CST_LOW (val), TREE_INT_CST_HIGH (val));
3973
3974 /* The new assertion A will be inserted at BB or E. We need to
3975 determine if the new location is dominated by a previously
3976 registered location for A. If we are doing an edge insertion,
3977 assume that A will be inserted at E->DEST. Note that this is not
3978 necessarily true.
3979
3980 If E is a critical edge, it will be split. But even if E is
3981 split, the new block will dominate the same set of blocks that
3982 E->DEST dominates.
3983
3984 The reverse, however, is not true, blocks dominated by E->DEST
3985 will not be dominated by the new block created to split E. So,
3986 if the insertion location is on a critical edge, we will not use
3987 the new location to move another assertion previously registered
3988 at a block dominated by E->DEST. */
3989 dest_bb = (bb) ? bb : e->dest;
3990
3991 /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and
3992 VAL at a block dominating DEST_BB, then we don't need to insert a new
3993 one. Similarly, if the same assertion already exists at a block
3994 dominated by DEST_BB and the new location is not on a critical
3995 edge, then update the existing location for the assertion (i.e.,
3996 move the assertion up in the dominance tree).
3997
3998 Note, this is implemented as a simple linked list because there
3999 should not be more than a handful of assertions registered per
4000 name. If this becomes a performance problem, a table hashed by
4001 COMP_CODE and VAL could be implemented. */
4002 loc = asserts_for[SSA_NAME_VERSION (name)];
4003 last_loc = loc;
4004 while (loc)
4005 {
4006 if (loc->comp_code == comp_code
4007 && (loc->val == val
4008 || operand_equal_p (loc->val, val, 0))
4009 && (loc->expr == expr
4010 || operand_equal_p (loc->expr, expr, 0)))
4011 {
4012 /* If the assertion NAME COMP_CODE VAL has already been
4013 registered at a basic block that dominates DEST_BB, then
4014 we don't need to insert the same assertion again. Note
4015 that we don't check strict dominance here to avoid
4016 replicating the same assertion inside the same basic
4017 block more than once (e.g., when a pointer is
4018 dereferenced several times inside a block).
4019
4020 An exception to this rule are edge insertions. If the
4021 new assertion is to be inserted on edge E, then it will
4022 dominate all the other insertions that we may want to
4023 insert in DEST_BB. So, if we are doing an edge
4024 insertion, don't do this dominance check. */
4025 if (e == NULL
4026 && dominated_by_p (CDI_DOMINATORS, dest_bb, loc->bb))
4027 return;
4028
4029 /* Otherwise, if E is not a critical edge and DEST_BB
4030 dominates the existing location for the assertion, move
4031 the assertion up in the dominance tree by updating its
4032 location information. */
4033 if ((e == NULL || !EDGE_CRITICAL_P (e))
4034 && dominated_by_p (CDI_DOMINATORS, loc->bb, dest_bb))
4035 {
4036 loc->bb = dest_bb;
4037 loc->e = e;
4038 loc->si = si;
4039 return;
4040 }
4041 }
4042
4043 /* Update the last node of the list and move to the next one. */
4044 last_loc = loc;
4045 loc = loc->next;
4046 }
4047
4048 /* If we didn't find an assertion already registered for
4049 NAME COMP_CODE VAL, add a new one at the end of the list of
4050 assertions associated with NAME. */
4051 n = XNEW (struct assert_locus_d);
4052 n->bb = dest_bb;
4053 n->e = e;
4054 n->si = si;
4055 n->comp_code = comp_code;
4056 n->val = val;
4057 n->expr = expr;
4058 n->next = NULL;
4059
4060 if (last_loc)
4061 last_loc->next = n;
4062 else
4063 asserts_for[SSA_NAME_VERSION (name)] = n;
4064
4065 bitmap_set_bit (need_assert_for, SSA_NAME_VERSION (name));
4066 }
4067
4068 /* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME.
4069 Extract a suitable test code and value and store them into *CODE_P and
4070 *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P.
4071
4072 If no extraction was possible, return FALSE, otherwise return TRUE.
4073
4074 If INVERT is true, then we invert the result stored into *CODE_P. */
4075
4076 static bool
4077 extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code,
4078 tree cond_op0, tree cond_op1,
4079 bool invert, enum tree_code *code_p,
4080 tree *val_p)
4081 {
4082 enum tree_code comp_code;
4083 tree val;
4084
4085 /* Otherwise, we have a comparison of the form NAME COMP VAL
4086 or VAL COMP NAME. */
4087 if (name == cond_op1)
4088 {
4089 /* If the predicate is of the form VAL COMP NAME, flip
4090 COMP around because we need to register NAME as the
4091 first operand in the predicate. */
4092 comp_code = swap_tree_comparison (cond_code);
4093 val = cond_op0;
4094 }
4095 else
4096 {
4097 /* The comparison is of the form NAME COMP VAL, so the
4098 comparison code remains unchanged. */
4099 comp_code = cond_code;
4100 val = cond_op1;
4101 }
4102
4103 /* Invert the comparison code as necessary. */
4104 if (invert)
4105 comp_code = invert_tree_comparison (comp_code, 0);
4106
4107 /* VRP does not handle float types. */
4108 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (val)))
4109 return false;
4110
4111 /* Do not register always-false predicates.
4112 FIXME: this works around a limitation in fold() when dealing with
4113 enumerations. Given 'enum { N1, N2 } x;', fold will not
4114 fold 'if (x > N2)' to 'if (0)'. */
4115 if ((comp_code == GT_EXPR || comp_code == LT_EXPR)
4116 && INTEGRAL_TYPE_P (TREE_TYPE (val)))
4117 {
4118 tree min = TYPE_MIN_VALUE (TREE_TYPE (val));
4119 tree max = TYPE_MAX_VALUE (TREE_TYPE (val));
4120
4121 if (comp_code == GT_EXPR
4122 && (!max
4123 || compare_values (val, max) == 0))
4124 return false;
4125
4126 if (comp_code == LT_EXPR
4127 && (!min
4128 || compare_values (val, min) == 0))
4129 return false;
4130 }
4131 *code_p = comp_code;
4132 *val_p = val;
4133 return true;
4134 }
4135
4136 /* Try to register an edge assertion for SSA name NAME on edge E for
4137 the condition COND contributing to the conditional jump pointed to by BSI.
4138 Invert the condition COND if INVERT is true.
4139 Return true if an assertion for NAME could be registered. */
4140
4141 static bool
4142 register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi,
4143 enum tree_code cond_code,
4144 tree cond_op0, tree cond_op1, bool invert)
4145 {
4146 tree val;
4147 enum tree_code comp_code;
4148 bool retval = false;
4149
4150 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
4151 cond_op0,
4152 cond_op1,
4153 invert, &comp_code, &val))
4154 return false;
4155
4156 /* Only register an ASSERT_EXPR if NAME was found in the sub-graph
4157 reachable from E. */
4158 if (live_on_edge (e, name)
4159 && !has_single_use (name))
4160 {
4161 register_new_assert_for (name, name, comp_code, val, NULL, e, bsi);
4162 retval = true;
4163 }
4164
4165 /* In the case of NAME <= CST and NAME being defined as
4166 NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2
4167 and NAME2 <= CST - CST2. We can do the same for NAME > CST.
4168 This catches range and anti-range tests. */
4169 if ((comp_code == LE_EXPR
4170 || comp_code == GT_EXPR)
4171 && TREE_CODE (val) == INTEGER_CST
4172 && TYPE_UNSIGNED (TREE_TYPE (val)))
4173 {
4174 gimple def_stmt = SSA_NAME_DEF_STMT (name);
4175 tree cst2 = NULL_TREE, name2 = NULL_TREE, name3 = NULL_TREE;
4176
4177 /* Extract CST2 from the (optional) addition. */
4178 if (is_gimple_assign (def_stmt)
4179 && gimple_assign_rhs_code (def_stmt) == PLUS_EXPR)
4180 {
4181 name2 = gimple_assign_rhs1 (def_stmt);
4182 cst2 = gimple_assign_rhs2 (def_stmt);
4183 if (TREE_CODE (name2) == SSA_NAME
4184 && TREE_CODE (cst2) == INTEGER_CST)
4185 def_stmt = SSA_NAME_DEF_STMT (name2);
4186 }
4187
4188 /* Extract NAME2 from the (optional) sign-changing cast. */
4189 if (gimple_assign_cast_p (def_stmt))
4190 {
4191 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt))
4192 && ! TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
4193 && (TYPE_PRECISION (gimple_expr_type (def_stmt))
4194 == TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))))
4195 name3 = gimple_assign_rhs1 (def_stmt);
4196 }
4197
4198 /* If name3 is used later, create an ASSERT_EXPR for it. */
4199 if (name3 != NULL_TREE
4200 && TREE_CODE (name3) == SSA_NAME
4201 && (cst2 == NULL_TREE
4202 || TREE_CODE (cst2) == INTEGER_CST)
4203 && INTEGRAL_TYPE_P (TREE_TYPE (name3))
4204 && live_on_edge (e, name3)
4205 && !has_single_use (name3))
4206 {
4207 tree tmp;
4208
4209 /* Build an expression for the range test. */
4210 tmp = build1 (NOP_EXPR, TREE_TYPE (name), name3);
4211 if (cst2 != NULL_TREE)
4212 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
4213
4214 if (dump_file)
4215 {
4216 fprintf (dump_file, "Adding assert for ");
4217 print_generic_expr (dump_file, name3, 0);
4218 fprintf (dump_file, " from ");
4219 print_generic_expr (dump_file, tmp, 0);
4220 fprintf (dump_file, "\n");
4221 }
4222
4223 register_new_assert_for (name3, tmp, comp_code, val, NULL, e, bsi);
4224
4225 retval = true;
4226 }
4227
4228 /* If name2 is used later, create an ASSERT_EXPR for it. */
4229 if (name2 != NULL_TREE
4230 && TREE_CODE (name2) == SSA_NAME
4231 && TREE_CODE (cst2) == INTEGER_CST
4232 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
4233 && live_on_edge (e, name2)
4234 && !has_single_use (name2))
4235 {
4236 tree tmp;
4237
4238 /* Build an expression for the range test. */
4239 tmp = name2;
4240 if (TREE_TYPE (name) != TREE_TYPE (name2))
4241 tmp = build1 (NOP_EXPR, TREE_TYPE (name), tmp);
4242 if (cst2 != NULL_TREE)
4243 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
4244
4245 if (dump_file)
4246 {
4247 fprintf (dump_file, "Adding assert for ");
4248 print_generic_expr (dump_file, name2, 0);
4249 fprintf (dump_file, " from ");
4250 print_generic_expr (dump_file, tmp, 0);
4251 fprintf (dump_file, "\n");
4252 }
4253
4254 register_new_assert_for (name2, tmp, comp_code, val, NULL, e, bsi);
4255
4256 retval = true;
4257 }
4258 }
4259
4260 return retval;
4261 }
4262
4263 /* OP is an operand of a truth value expression which is known to have
4264 a particular value. Register any asserts for OP and for any
4265 operands in OP's defining statement.
4266
4267 If CODE is EQ_EXPR, then we want to register OP is zero (false),
4268 if CODE is NE_EXPR, then we want to register OP is nonzero (true). */
4269
4270 static bool
4271 register_edge_assert_for_1 (tree op, enum tree_code code,
4272 edge e, gimple_stmt_iterator bsi)
4273 {
4274 bool retval = false;
4275 gimple op_def;
4276 tree val;
4277 enum tree_code rhs_code;
4278
4279 /* We only care about SSA_NAMEs. */
4280 if (TREE_CODE (op) != SSA_NAME)
4281 return false;
4282
4283 /* We know that OP will have a zero or nonzero value. If OP is used
4284 more than once go ahead and register an assert for OP.
4285
4286 The FOUND_IN_SUBGRAPH support is not helpful in this situation as
4287 it will always be set for OP (because OP is used in a COND_EXPR in
4288 the subgraph). */
4289 if (!has_single_use (op))
4290 {
4291 val = build_int_cst (TREE_TYPE (op), 0);
4292 register_new_assert_for (op, op, code, val, NULL, e, bsi);
4293 retval = true;
4294 }
4295
4296 /* Now look at how OP is set. If it's set from a comparison,
4297 a truth operation or some bit operations, then we may be able
4298 to register information about the operands of that assignment. */
4299 op_def = SSA_NAME_DEF_STMT (op);
4300 if (gimple_code (op_def) != GIMPLE_ASSIGN)
4301 return retval;
4302
4303 rhs_code = gimple_assign_rhs_code (op_def);
4304
4305 if (TREE_CODE_CLASS (rhs_code) == tcc_comparison)
4306 {
4307 bool invert = (code == EQ_EXPR ? true : false);
4308 tree op0 = gimple_assign_rhs1 (op_def);
4309 tree op1 = gimple_assign_rhs2 (op_def);
4310
4311 if (TREE_CODE (op0) == SSA_NAME)
4312 retval |= register_edge_assert_for_2 (op0, e, bsi, rhs_code, op0, op1,
4313 invert);
4314 if (TREE_CODE (op1) == SSA_NAME)
4315 retval |= register_edge_assert_for_2 (op1, e, bsi, rhs_code, op0, op1,
4316 invert);
4317 }
4318 else if ((code == NE_EXPR
4319 && (gimple_assign_rhs_code (op_def) == TRUTH_AND_EXPR
4320 || gimple_assign_rhs_code (op_def) == BIT_AND_EXPR))
4321 || (code == EQ_EXPR
4322 && (gimple_assign_rhs_code (op_def) == TRUTH_OR_EXPR
4323 || gimple_assign_rhs_code (op_def) == BIT_IOR_EXPR)))
4324 {
4325 /* Recurse on each operand. */
4326 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def),
4327 code, e, bsi);
4328 retval |= register_edge_assert_for_1 (gimple_assign_rhs2 (op_def),
4329 code, e, bsi);
4330 }
4331 else if (gimple_assign_rhs_code (op_def) == TRUTH_NOT_EXPR)
4332 {
4333 /* Recurse, flipping CODE. */
4334 code = invert_tree_comparison (code, false);
4335 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def),
4336 code, e, bsi);
4337 }
4338 else if (gimple_assign_rhs_code (op_def) == SSA_NAME)
4339 {
4340 /* Recurse through the copy. */
4341 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def),
4342 code, e, bsi);
4343 }
4344 else if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (op_def)))
4345 {
4346 /* Recurse through the type conversion. */
4347 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def),
4348 code, e, bsi);
4349 }
4350
4351 return retval;
4352 }
4353
4354 /* Try to register an edge assertion for SSA name NAME on edge E for
4355 the condition COND contributing to the conditional jump pointed to by SI.
4356 Return true if an assertion for NAME could be registered. */
4357
4358 static bool
4359 register_edge_assert_for (tree name, edge e, gimple_stmt_iterator si,
4360 enum tree_code cond_code, tree cond_op0,
4361 tree cond_op1)
4362 {
4363 tree val;
4364 enum tree_code comp_code;
4365 bool retval = false;
4366 bool is_else_edge = (e->flags & EDGE_FALSE_VALUE) != 0;
4367
4368 /* Do not attempt to infer anything in names that flow through
4369 abnormal edges. */
4370 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
4371 return false;
4372
4373 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
4374 cond_op0, cond_op1,
4375 is_else_edge,
4376 &comp_code, &val))
4377 return false;
4378
4379 /* Register ASSERT_EXPRs for name. */
4380 retval |= register_edge_assert_for_2 (name, e, si, cond_code, cond_op0,
4381 cond_op1, is_else_edge);
4382
4383
4384 /* If COND is effectively an equality test of an SSA_NAME against
4385 the value zero or one, then we may be able to assert values
4386 for SSA_NAMEs which flow into COND. */
4387
4388 /* In the case of NAME == 1 or NAME != 0, for TRUTH_AND_EXPR defining
4389 statement of NAME we can assert both operands of the TRUTH_AND_EXPR
4390 have nonzero value. */
4391 if (((comp_code == EQ_EXPR && integer_onep (val))
4392 || (comp_code == NE_EXPR && integer_zerop (val))))
4393 {
4394 gimple def_stmt = SSA_NAME_DEF_STMT (name);
4395
4396 if (is_gimple_assign (def_stmt)
4397 && (gimple_assign_rhs_code (def_stmt) == TRUTH_AND_EXPR
4398 || gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR))
4399 {
4400 tree op0 = gimple_assign_rhs1 (def_stmt);
4401 tree op1 = gimple_assign_rhs2 (def_stmt);
4402 retval |= register_edge_assert_for_1 (op0, NE_EXPR, e, si);
4403 retval |= register_edge_assert_for_1 (op1, NE_EXPR, e, si);
4404 }
4405 }
4406
4407 /* In the case of NAME == 0 or NAME != 1, for TRUTH_OR_EXPR defining
4408 statement of NAME we can assert both operands of the TRUTH_OR_EXPR
4409 have zero value. */
4410 if (((comp_code == EQ_EXPR && integer_zerop (val))
4411 || (comp_code == NE_EXPR && integer_onep (val))))
4412 {
4413 gimple def_stmt = SSA_NAME_DEF_STMT (name);
4414
4415 if (is_gimple_assign (def_stmt)
4416 && (gimple_assign_rhs_code (def_stmt) == TRUTH_OR_EXPR
4417 /* For BIT_IOR_EXPR only if NAME == 0 both operands have
4418 necessarily zero value. */
4419 || (comp_code == EQ_EXPR
4420 && (gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR))))
4421 {
4422 tree op0 = gimple_assign_rhs1 (def_stmt);
4423 tree op1 = gimple_assign_rhs2 (def_stmt);
4424 retval |= register_edge_assert_for_1 (op0, EQ_EXPR, e, si);
4425 retval |= register_edge_assert_for_1 (op1, EQ_EXPR, e, si);
4426 }
4427 }
4428
4429 return retval;
4430 }
4431
4432
4433 /* Determine whether the outgoing edges of BB should receive an
4434 ASSERT_EXPR for each of the operands of BB's LAST statement.
4435 The last statement of BB must be a COND_EXPR.
4436
4437 If any of the sub-graphs rooted at BB have an interesting use of
4438 the predicate operands, an assert location node is added to the
4439 list of assertions for the corresponding operands. */
4440
4441 static bool
4442 find_conditional_asserts (basic_block bb, gimple last)
4443 {
4444 bool need_assert;
4445 gimple_stmt_iterator bsi;
4446 tree op;
4447 edge_iterator ei;
4448 edge e;
4449 ssa_op_iter iter;
4450
4451 need_assert = false;
4452 bsi = gsi_for_stmt (last);
4453
4454 /* Look for uses of the operands in each of the sub-graphs
4455 rooted at BB. We need to check each of the outgoing edges
4456 separately, so that we know what kind of ASSERT_EXPR to
4457 insert. */
4458 FOR_EACH_EDGE (e, ei, bb->succs)
4459 {
4460 if (e->dest == bb)
4461 continue;
4462
4463 /* Register the necessary assertions for each operand in the
4464 conditional predicate. */
4465 FOR_EACH_SSA_TREE_OPERAND (op, last, iter, SSA_OP_USE)
4466 {
4467 need_assert |= register_edge_assert_for (op, e, bsi,
4468 gimple_cond_code (last),
4469 gimple_cond_lhs (last),
4470 gimple_cond_rhs (last));
4471 }
4472 }
4473
4474 return need_assert;
4475 }
4476
4477 /* Compare two case labels sorting first by the destination label uid
4478 and then by the case value. */
4479
4480 static int
4481 compare_case_labels (const void *p1, const void *p2)
4482 {
4483 const_tree const case1 = *(const_tree const*)p1;
4484 const_tree const case2 = *(const_tree const*)p2;
4485 unsigned int uid1 = DECL_UID (CASE_LABEL (case1));
4486 unsigned int uid2 = DECL_UID (CASE_LABEL (case2));
4487
4488 if (uid1 < uid2)
4489 return -1;
4490 else if (uid1 == uid2)
4491 {
4492 /* Make sure the default label is first in a group. */
4493 if (!CASE_LOW (case1))
4494 return -1;
4495 else if (!CASE_LOW (case2))
4496 return 1;
4497 else
4498 return tree_int_cst_compare (CASE_LOW (case1), CASE_LOW (case2));
4499 }
4500 else
4501 return 1;
4502 }
4503
4504 /* Determine whether the outgoing edges of BB should receive an
4505 ASSERT_EXPR for each of the operands of BB's LAST statement.
4506 The last statement of BB must be a SWITCH_EXPR.
4507
4508 If any of the sub-graphs rooted at BB have an interesting use of
4509 the predicate operands, an assert location node is added to the
4510 list of assertions for the corresponding operands. */
4511
4512 static bool
4513 find_switch_asserts (basic_block bb, gimple last)
4514 {
4515 bool need_assert;
4516 gimple_stmt_iterator bsi;
4517 tree op;
4518 edge e;
4519 tree vec2;
4520 size_t n = gimple_switch_num_labels(last);
4521 #if GCC_VERSION >= 4000
4522 unsigned int idx;
4523 #else
4524 /* Work around GCC 3.4 bug (PR 37086). */
4525 volatile unsigned int idx;
4526 #endif
4527
4528 need_assert = false;
4529 bsi = gsi_for_stmt (last);
4530 op = gimple_switch_index (last);
4531 if (TREE_CODE (op) != SSA_NAME)
4532 return false;
4533
4534 /* Build a vector of case labels sorted by destination label. */
4535 vec2 = make_tree_vec (n);
4536 for (idx = 0; idx < n; ++idx)
4537 TREE_VEC_ELT (vec2, idx) = gimple_switch_label (last, idx);
4538 qsort (&TREE_VEC_ELT (vec2, 0), n, sizeof (tree), compare_case_labels);
4539
4540 for (idx = 0; idx < n; ++idx)
4541 {
4542 tree min, max;
4543 tree cl = TREE_VEC_ELT (vec2, idx);
4544
4545 min = CASE_LOW (cl);
4546 max = CASE_HIGH (cl);
4547
4548 /* If there are multiple case labels with the same destination
4549 we need to combine them to a single value range for the edge. */
4550 if (idx + 1 < n
4551 && CASE_LABEL (cl) == CASE_LABEL (TREE_VEC_ELT (vec2, idx + 1)))
4552 {
4553 /* Skip labels until the last of the group. */
4554 do {
4555 ++idx;
4556 } while (idx < n
4557 && CASE_LABEL (cl) == CASE_LABEL (TREE_VEC_ELT (vec2, idx)));
4558 --idx;
4559
4560 /* Pick up the maximum of the case label range. */
4561 if (CASE_HIGH (TREE_VEC_ELT (vec2, idx)))
4562 max = CASE_HIGH (TREE_VEC_ELT (vec2, idx));
4563 else
4564 max = CASE_LOW (TREE_VEC_ELT (vec2, idx));
4565 }
4566
4567 /* Nothing to do if the range includes the default label until we
4568 can register anti-ranges. */
4569 if (min == NULL_TREE)
4570 continue;
4571
4572 /* Find the edge to register the assert expr on. */
4573 e = find_edge (bb, label_to_block (CASE_LABEL (cl)));
4574
4575 /* Register the necessary assertions for the operand in the
4576 SWITCH_EXPR. */
4577 need_assert |= register_edge_assert_for (op, e, bsi,
4578 max ? GE_EXPR : EQ_EXPR,
4579 op,
4580 fold_convert (TREE_TYPE (op),
4581 min));
4582 if (max)
4583 {
4584 need_assert |= register_edge_assert_for (op, e, bsi, LE_EXPR,
4585 op,
4586 fold_convert (TREE_TYPE (op),
4587 max));
4588 }
4589 }
4590
4591 return need_assert;
4592 }
4593
4594
4595 /* Traverse all the statements in block BB looking for statements that
4596 may generate useful assertions for the SSA names in their operand.
4597 If a statement produces a useful assertion A for name N_i, then the
4598 list of assertions already generated for N_i is scanned to
4599 determine if A is actually needed.
4600
4601 If N_i already had the assertion A at a location dominating the
4602 current location, then nothing needs to be done. Otherwise, the
4603 new location for A is recorded instead.
4604
4605 1- For every statement S in BB, all the variables used by S are
4606 added to bitmap FOUND_IN_SUBGRAPH.
4607
4608 2- If statement S uses an operand N in a way that exposes a known
4609 value range for N, then if N was not already generated by an
4610 ASSERT_EXPR, create a new assert location for N. For instance,
4611 if N is a pointer and the statement dereferences it, we can
4612 assume that N is not NULL.
4613
4614 3- COND_EXPRs are a special case of #2. We can derive range
4615 information from the predicate but need to insert different
4616 ASSERT_EXPRs for each of the sub-graphs rooted at the
4617 conditional block. If the last statement of BB is a conditional
4618 expression of the form 'X op Y', then
4619
4620 a) Remove X and Y from the set FOUND_IN_SUBGRAPH.
4621
4622 b) If the conditional is the only entry point to the sub-graph
4623 corresponding to the THEN_CLAUSE, recurse into it. On
4624 return, if X and/or Y are marked in FOUND_IN_SUBGRAPH, then
4625 an ASSERT_EXPR is added for the corresponding variable.
4626
4627 c) Repeat step (b) on the ELSE_CLAUSE.
4628
4629 d) Mark X and Y in FOUND_IN_SUBGRAPH.
4630
4631 For instance,
4632
4633 if (a == 9)
4634 b = a;
4635 else
4636 b = c + 1;
4637
4638 In this case, an assertion on the THEN clause is useful to
4639 determine that 'a' is always 9 on that edge. However, an assertion
4640 on the ELSE clause would be unnecessary.
4641
4642 4- If BB does not end in a conditional expression, then we recurse
4643 into BB's dominator children.
4644
4645 At the end of the recursive traversal, every SSA name will have a
4646 list of locations where ASSERT_EXPRs should be added. When a new
4647 location for name N is found, it is registered by calling
4648 register_new_assert_for. That function keeps track of all the
4649 registered assertions to prevent adding unnecessary assertions.
4650 For instance, if a pointer P_4 is dereferenced more than once in a
4651 dominator tree, only the location dominating all the dereference of
4652 P_4 will receive an ASSERT_EXPR.
4653
4654 If this function returns true, then it means that there are names
4655 for which we need to generate ASSERT_EXPRs. Those assertions are
4656 inserted by process_assert_insertions. */
4657
4658 static bool
4659 find_assert_locations_1 (basic_block bb, sbitmap live)
4660 {
4661 gimple_stmt_iterator si;
4662 gimple last;
4663 gimple phi;
4664 bool need_assert;
4665
4666 need_assert = false;
4667 last = last_stmt (bb);
4668
4669 /* If BB's last statement is a conditional statement involving integer
4670 operands, determine if we need to add ASSERT_EXPRs. */
4671 if (last
4672 && gimple_code (last) == GIMPLE_COND
4673 && !fp_predicate (last)
4674 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
4675 need_assert |= find_conditional_asserts (bb, last);
4676
4677 /* If BB's last statement is a switch statement involving integer
4678 operands, determine if we need to add ASSERT_EXPRs. */
4679 if (last
4680 && gimple_code (last) == GIMPLE_SWITCH
4681 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
4682 need_assert |= find_switch_asserts (bb, last);
4683
4684 /* Traverse all the statements in BB marking used names and looking
4685 for statements that may infer assertions for their used operands. */
4686 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
4687 {
4688 gimple stmt;
4689 tree op;
4690 ssa_op_iter i;
4691
4692 stmt = gsi_stmt (si);
4693
4694 if (is_gimple_debug (stmt))
4695 continue;
4696
4697 /* See if we can derive an assertion for any of STMT's operands. */
4698 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
4699 {
4700 tree value;
4701 enum tree_code comp_code;
4702
4703 /* Mark OP in our live bitmap. */
4704 SET_BIT (live, SSA_NAME_VERSION (op));
4705
4706 /* If OP is used in such a way that we can infer a value
4707 range for it, and we don't find a previous assertion for
4708 it, create a new assertion location node for OP. */
4709 if (infer_value_range (stmt, op, &comp_code, &value))
4710 {
4711 /* If we are able to infer a nonzero value range for OP,
4712 then walk backwards through the use-def chain to see if OP
4713 was set via a typecast.
4714
4715 If so, then we can also infer a nonzero value range
4716 for the operand of the NOP_EXPR. */
4717 if (comp_code == NE_EXPR && integer_zerop (value))
4718 {
4719 tree t = op;
4720 gimple def_stmt = SSA_NAME_DEF_STMT (t);
4721
4722 while (is_gimple_assign (def_stmt)
4723 && gimple_assign_rhs_code (def_stmt) == NOP_EXPR
4724 && TREE_CODE
4725 (gimple_assign_rhs1 (def_stmt)) == SSA_NAME
4726 && POINTER_TYPE_P
4727 (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))
4728 {
4729 t = gimple_assign_rhs1 (def_stmt);
4730 def_stmt = SSA_NAME_DEF_STMT (t);
4731
4732 /* Note we want to register the assert for the
4733 operand of the NOP_EXPR after SI, not after the
4734 conversion. */
4735 if (! has_single_use (t))
4736 {
4737 register_new_assert_for (t, t, comp_code, value,
4738 bb, NULL, si);
4739 need_assert = true;
4740 }
4741 }
4742 }
4743
4744 /* If OP is used only once, namely in this STMT, don't
4745 bother creating an ASSERT_EXPR for it. Such an
4746 ASSERT_EXPR would do nothing but increase compile time. */
4747 if (!has_single_use (op))
4748 {
4749 register_new_assert_for (op, op, comp_code, value,
4750 bb, NULL, si);
4751 need_assert = true;
4752 }
4753 }
4754 }
4755 }
4756
4757 /* Traverse all PHI nodes in BB marking used operands. */
4758 for (si = gsi_start_phis (bb); !gsi_end_p(si); gsi_next (&si))
4759 {
4760 use_operand_p arg_p;
4761 ssa_op_iter i;
4762 phi = gsi_stmt (si);
4763
4764 FOR_EACH_PHI_ARG (arg_p, phi, i, SSA_OP_USE)
4765 {
4766 tree arg = USE_FROM_PTR (arg_p);
4767 if (TREE_CODE (arg) == SSA_NAME)
4768 SET_BIT (live, SSA_NAME_VERSION (arg));
4769 }
4770 }
4771
4772 return need_assert;
4773 }
4774
4775 /* Do an RPO walk over the function computing SSA name liveness
4776 on-the-fly and deciding on assert expressions to insert.
4777 Returns true if there are assert expressions to be inserted. */
4778
4779 static bool
4780 find_assert_locations (void)
4781 {
4782 int *rpo = XCNEWVEC (int, last_basic_block + NUM_FIXED_BLOCKS);
4783 int *bb_rpo = XCNEWVEC (int, last_basic_block + NUM_FIXED_BLOCKS);
4784 int *last_rpo = XCNEWVEC (int, last_basic_block + NUM_FIXED_BLOCKS);
4785 int rpo_cnt, i;
4786 bool need_asserts;
4787
4788 live = XCNEWVEC (sbitmap, last_basic_block + NUM_FIXED_BLOCKS);
4789 rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
4790 for (i = 0; i < rpo_cnt; ++i)
4791 bb_rpo[rpo[i]] = i;
4792
4793 need_asserts = false;
4794 for (i = rpo_cnt-1; i >= 0; --i)
4795 {
4796 basic_block bb = BASIC_BLOCK (rpo[i]);
4797 edge e;
4798 edge_iterator ei;
4799
4800 if (!live[rpo[i]])
4801 {
4802 live[rpo[i]] = sbitmap_alloc (num_ssa_names);
4803 sbitmap_zero (live[rpo[i]]);
4804 }
4805
4806 /* Process BB and update the live information with uses in
4807 this block. */
4808 need_asserts |= find_assert_locations_1 (bb, live[rpo[i]]);
4809
4810 /* Merge liveness into the predecessor blocks and free it. */
4811 if (!sbitmap_empty_p (live[rpo[i]]))
4812 {
4813 int pred_rpo = i;
4814 FOR_EACH_EDGE (e, ei, bb->preds)
4815 {
4816 int pred = e->src->index;
4817 if (e->flags & EDGE_DFS_BACK)
4818 continue;
4819
4820 if (!live[pred])
4821 {
4822 live[pred] = sbitmap_alloc (num_ssa_names);
4823 sbitmap_zero (live[pred]);
4824 }
4825 sbitmap_a_or_b (live[pred], live[pred], live[rpo[i]]);
4826
4827 if (bb_rpo[pred] < pred_rpo)
4828 pred_rpo = bb_rpo[pred];
4829 }
4830
4831 /* Record the RPO number of the last visited block that needs
4832 live information from this block. */
4833 last_rpo[rpo[i]] = pred_rpo;
4834 }
4835 else
4836 {
4837 sbitmap_free (live[rpo[i]]);
4838 live[rpo[i]] = NULL;
4839 }
4840
4841 /* We can free all successors live bitmaps if all their
4842 predecessors have been visited already. */
4843 FOR_EACH_EDGE (e, ei, bb->succs)
4844 if (last_rpo[e->dest->index] == i
4845 && live[e->dest->index])
4846 {
4847 sbitmap_free (live[e->dest->index]);
4848 live[e->dest->index] = NULL;
4849 }
4850 }
4851
4852 XDELETEVEC (rpo);
4853 XDELETEVEC (bb_rpo);
4854 XDELETEVEC (last_rpo);
4855 for (i = 0; i < last_basic_block + NUM_FIXED_BLOCKS; ++i)
4856 if (live[i])
4857 sbitmap_free (live[i]);
4858 XDELETEVEC (live);
4859
4860 return need_asserts;
4861 }
4862
4863 /* Create an ASSERT_EXPR for NAME and insert it in the location
4864 indicated by LOC. Return true if we made any edge insertions. */
4865
4866 static bool
4867 process_assert_insertions_for (tree name, assert_locus_t loc)
4868 {
4869 /* Build the comparison expression NAME_i COMP_CODE VAL. */
4870 gimple stmt;
4871 tree cond;
4872 gimple assert_stmt;
4873 edge_iterator ei;
4874 edge e;
4875
4876 /* If we have X <=> X do not insert an assert expr for that. */
4877 if (loc->expr == loc->val)
4878 return false;
4879
4880 cond = build2 (loc->comp_code, boolean_type_node, loc->expr, loc->val);
4881 assert_stmt = build_assert_expr_for (cond, name);
4882 if (loc->e)
4883 {
4884 /* We have been asked to insert the assertion on an edge. This
4885 is used only by COND_EXPR and SWITCH_EXPR assertions. */
4886 #if defined ENABLE_CHECKING
4887 gcc_assert (gimple_code (gsi_stmt (loc->si)) == GIMPLE_COND
4888 || gimple_code (gsi_stmt (loc->si)) == GIMPLE_SWITCH);
4889 #endif
4890
4891 gsi_insert_on_edge (loc->e, assert_stmt);
4892 return true;
4893 }
4894
4895 /* Otherwise, we can insert right after LOC->SI iff the
4896 statement must not be the last statement in the block. */
4897 stmt = gsi_stmt (loc->si);
4898 if (!stmt_ends_bb_p (stmt))
4899 {
4900 gsi_insert_after (&loc->si, assert_stmt, GSI_SAME_STMT);
4901 return false;
4902 }
4903
4904 /* If STMT must be the last statement in BB, we can only insert new
4905 assertions on the non-abnormal edge out of BB. Note that since
4906 STMT is not control flow, there may only be one non-abnormal edge
4907 out of BB. */
4908 FOR_EACH_EDGE (e, ei, loc->bb->succs)
4909 if (!(e->flags & EDGE_ABNORMAL))
4910 {
4911 gsi_insert_on_edge (e, assert_stmt);
4912 return true;
4913 }
4914
4915 gcc_unreachable ();
4916 }
4917
4918
4919 /* Process all the insertions registered for every name N_i registered
4920 in NEED_ASSERT_FOR. The list of assertions to be inserted are
4921 found in ASSERTS_FOR[i]. */
4922
4923 static void
4924 process_assert_insertions (void)
4925 {
4926 unsigned i;
4927 bitmap_iterator bi;
4928 bool update_edges_p = false;
4929 int num_asserts = 0;
4930
4931 if (dump_file && (dump_flags & TDF_DETAILS))
4932 dump_all_asserts (dump_file);
4933
4934 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
4935 {
4936 assert_locus_t loc = asserts_for[i];
4937 gcc_assert (loc);
4938
4939 while (loc)
4940 {
4941 assert_locus_t next = loc->next;
4942 update_edges_p |= process_assert_insertions_for (ssa_name (i), loc);
4943 free (loc);
4944 loc = next;
4945 num_asserts++;
4946 }
4947 }
4948
4949 if (update_edges_p)
4950 gsi_commit_edge_inserts ();
4951
4952 statistics_counter_event (cfun, "Number of ASSERT_EXPR expressions inserted",
4953 num_asserts);
4954 }
4955
4956
4957 /* Traverse the flowgraph looking for conditional jumps to insert range
4958 expressions. These range expressions are meant to provide information
4959 to optimizations that need to reason in terms of value ranges. They
4960 will not be expanded into RTL. For instance, given:
4961
4962 x = ...
4963 y = ...
4964 if (x < y)
4965 y = x - 2;
4966 else
4967 x = y + 3;
4968
4969 this pass will transform the code into:
4970
4971 x = ...
4972 y = ...
4973 if (x < y)
4974 {
4975 x = ASSERT_EXPR <x, x < y>
4976 y = x - 2
4977 }
4978 else
4979 {
4980 y = ASSERT_EXPR <y, x <= y>
4981 x = y + 3
4982 }
4983
4984 The idea is that once copy and constant propagation have run, other
4985 optimizations will be able to determine what ranges of values can 'x'
4986 take in different paths of the code, simply by checking the reaching
4987 definition of 'x'. */
4988
4989 static void
4990 insert_range_assertions (void)
4991 {
4992 need_assert_for = BITMAP_ALLOC (NULL);
4993 asserts_for = XCNEWVEC (assert_locus_t, num_ssa_names);
4994
4995 calculate_dominance_info (CDI_DOMINATORS);
4996
4997 if (find_assert_locations ())
4998 {
4999 process_assert_insertions ();
5000 update_ssa (TODO_update_ssa_no_phi);
5001 }
5002
5003 if (dump_file && (dump_flags & TDF_DETAILS))
5004 {
5005 fprintf (dump_file, "\nSSA form after inserting ASSERT_EXPRs\n");
5006 dump_function_to_file (current_function_decl, dump_file, dump_flags);
5007 }
5008
5009 free (asserts_for);
5010 BITMAP_FREE (need_assert_for);
5011 }
5012
5013 /* Checks one ARRAY_REF in REF, located at LOCUS. Ignores flexible arrays
5014 and "struct" hacks. If VRP can determine that the
5015 array subscript is a constant, check if it is outside valid
5016 range. If the array subscript is a RANGE, warn if it is
5017 non-overlapping with valid range.
5018 IGNORE_OFF_BY_ONE is true if the ARRAY_REF is inside a ADDR_EXPR. */
5019
5020 static void
5021 check_array_ref (location_t location, tree ref, bool ignore_off_by_one)
5022 {
5023 value_range_t* vr = NULL;
5024 tree low_sub, up_sub;
5025 tree low_bound, up_bound, up_bound_p1;
5026 tree base;
5027
5028 if (TREE_NO_WARNING (ref))
5029 return;
5030
5031 low_sub = up_sub = TREE_OPERAND (ref, 1);
5032 up_bound = array_ref_up_bound (ref);
5033
5034 /* Can not check flexible arrays. */
5035 if (!up_bound
5036 || TREE_CODE (up_bound) != INTEGER_CST)
5037 return;
5038
5039 /* Accesses to trailing arrays via pointers may access storage
5040 beyond the types array bounds. */
5041 base = get_base_address (ref);
5042 if (base
5043 && INDIRECT_REF_P (base))
5044 {
5045 tree cref, next = NULL_TREE;
5046
5047 if (TREE_CODE (TREE_OPERAND (ref, 0)) != COMPONENT_REF)
5048 return;
5049
5050 cref = TREE_OPERAND (ref, 0);
5051 if (TREE_CODE (TREE_TYPE (TREE_OPERAND (cref, 0))) == RECORD_TYPE)
5052 for (next = TREE_CHAIN (TREE_OPERAND (cref, 1));
5053 next && TREE_CODE (next) != FIELD_DECL;
5054 next = TREE_CHAIN (next))
5055 ;
5056
5057 /* If this is the last field in a struct type or a field in a
5058 union type do not warn. */
5059 if (!next)
5060 return;
5061 }
5062
5063 low_bound = array_ref_low_bound (ref);
5064 up_bound_p1 = int_const_binop (PLUS_EXPR, up_bound, integer_one_node, 0);
5065
5066 if (TREE_CODE (low_sub) == SSA_NAME)
5067 {
5068 vr = get_value_range (low_sub);
5069 if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
5070 {
5071 low_sub = vr->type == VR_RANGE ? vr->max : vr->min;
5072 up_sub = vr->type == VR_RANGE ? vr->min : vr->max;
5073 }
5074 }
5075
5076 if (vr && vr->type == VR_ANTI_RANGE)
5077 {
5078 if (TREE_CODE (up_sub) == INTEGER_CST
5079 && tree_int_cst_lt (up_bound, up_sub)
5080 && TREE_CODE (low_sub) == INTEGER_CST
5081 && tree_int_cst_lt (low_sub, low_bound))
5082 {
5083 warning_at (location, OPT_Warray_bounds,
5084 "array subscript is outside array bounds");
5085 TREE_NO_WARNING (ref) = 1;
5086 }
5087 }
5088 else if (TREE_CODE (up_sub) == INTEGER_CST
5089 && (ignore_off_by_one
5090 ? (tree_int_cst_lt (up_bound, up_sub)
5091 && !tree_int_cst_equal (up_bound_p1, up_sub))
5092 : (tree_int_cst_lt (up_bound, up_sub)
5093 || tree_int_cst_equal (up_bound_p1, up_sub))))
5094 {
5095 warning_at (location, OPT_Warray_bounds,
5096 "array subscript is above array bounds");
5097 TREE_NO_WARNING (ref) = 1;
5098 }
5099 else if (TREE_CODE (low_sub) == INTEGER_CST
5100 && tree_int_cst_lt (low_sub, low_bound))
5101 {
5102 warning_at (location, OPT_Warray_bounds,
5103 "array subscript is below array bounds");
5104 TREE_NO_WARNING (ref) = 1;
5105 }
5106 }
5107
5108 /* Searches if the expr T, located at LOCATION computes
5109 address of an ARRAY_REF, and call check_array_ref on it. */
5110
5111 static void
5112 search_for_addr_array (tree t, location_t location)
5113 {
5114 while (TREE_CODE (t) == SSA_NAME)
5115 {
5116 gimple g = SSA_NAME_DEF_STMT (t);
5117
5118 if (gimple_code (g) != GIMPLE_ASSIGN)
5119 return;
5120
5121 if (get_gimple_rhs_class (gimple_assign_rhs_code (g))
5122 != GIMPLE_SINGLE_RHS)
5123 return;
5124
5125 t = gimple_assign_rhs1 (g);
5126 }
5127
5128
5129 /* We are only interested in addresses of ARRAY_REF's. */
5130 if (TREE_CODE (t) != ADDR_EXPR)
5131 return;
5132
5133 /* Check each ARRAY_REFs in the reference chain. */
5134 do
5135 {
5136 if (TREE_CODE (t) == ARRAY_REF)
5137 check_array_ref (location, t, true /*ignore_off_by_one*/);
5138
5139 t = TREE_OPERAND (t, 0);
5140 }
5141 while (handled_component_p (t));
5142 }
5143
5144 /* walk_tree() callback that checks if *TP is
5145 an ARRAY_REF inside an ADDR_EXPR (in which an array
5146 subscript one outside the valid range is allowed). Call
5147 check_array_ref for each ARRAY_REF found. The location is
5148 passed in DATA. */
5149
5150 static tree
5151 check_array_bounds (tree *tp, int *walk_subtree, void *data)
5152 {
5153 tree t = *tp;
5154 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
5155 location_t location;
5156
5157 if (EXPR_HAS_LOCATION (t))
5158 location = EXPR_LOCATION (t);
5159 else
5160 {
5161 location_t *locp = (location_t *) wi->info;
5162 location = *locp;
5163 }
5164
5165 *walk_subtree = TRUE;
5166
5167 if (TREE_CODE (t) == ARRAY_REF)
5168 check_array_ref (location, t, false /*ignore_off_by_one*/);
5169
5170 if (TREE_CODE (t) == INDIRECT_REF
5171 || (TREE_CODE (t) == RETURN_EXPR && TREE_OPERAND (t, 0)))
5172 search_for_addr_array (TREE_OPERAND (t, 0), location);
5173
5174 if (TREE_CODE (t) == ADDR_EXPR)
5175 *walk_subtree = FALSE;
5176
5177 return NULL_TREE;
5178 }
5179
5180 /* Walk over all statements of all reachable BBs and call check_array_bounds
5181 on them. */
5182
5183 static void
5184 check_all_array_refs (void)
5185 {
5186 basic_block bb;
5187 gimple_stmt_iterator si;
5188
5189 FOR_EACH_BB (bb)
5190 {
5191 edge_iterator ei;
5192 edge e;
5193 bool executable = false;
5194
5195 /* Skip blocks that were found to be unreachable. */
5196 FOR_EACH_EDGE (e, ei, bb->preds)
5197 executable |= !!(e->flags & EDGE_EXECUTABLE);
5198 if (!executable)
5199 continue;
5200
5201 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
5202 {
5203 gimple stmt = gsi_stmt (si);
5204 struct walk_stmt_info wi;
5205 if (!gimple_has_location (stmt))
5206 continue;
5207
5208 if (is_gimple_call (stmt))
5209 {
5210 size_t i;
5211 size_t n = gimple_call_num_args (stmt);
5212 for (i = 0; i < n; i++)
5213 {
5214 tree arg = gimple_call_arg (stmt, i);
5215 search_for_addr_array (arg, gimple_location (stmt));
5216 }
5217 }
5218 else
5219 {
5220 memset (&wi, 0, sizeof (wi));
5221 wi.info = CONST_CAST (void *, (const void *)
5222 gimple_location_ptr (stmt));
5223
5224 walk_gimple_op (gsi_stmt (si),
5225 check_array_bounds,
5226 &wi);
5227 }
5228 }
5229 }
5230 }
5231
5232 /* Convert range assertion expressions into the implied copies and
5233 copy propagate away the copies. Doing the trivial copy propagation
5234 here avoids the need to run the full copy propagation pass after
5235 VRP.
5236
5237 FIXME, this will eventually lead to copy propagation removing the
5238 names that had useful range information attached to them. For
5239 instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>,
5240 then N_i will have the range [3, +INF].
5241
5242 However, by converting the assertion into the implied copy
5243 operation N_i = N_j, we will then copy-propagate N_j into the uses
5244 of N_i and lose the range information. We may want to hold on to
5245 ASSERT_EXPRs a little while longer as the ranges could be used in
5246 things like jump threading.
5247
5248 The problem with keeping ASSERT_EXPRs around is that passes after
5249 VRP need to handle them appropriately.
5250
5251 Another approach would be to make the range information a first
5252 class property of the SSA_NAME so that it can be queried from
5253 any pass. This is made somewhat more complex by the need for
5254 multiple ranges to be associated with one SSA_NAME. */
5255
5256 static void
5257 remove_range_assertions (void)
5258 {
5259 basic_block bb;
5260 gimple_stmt_iterator si;
5261
5262 /* Note that the BSI iterator bump happens at the bottom of the
5263 loop and no bump is necessary if we're removing the statement
5264 referenced by the current BSI. */
5265 FOR_EACH_BB (bb)
5266 for (si = gsi_start_bb (bb); !gsi_end_p (si);)
5267 {
5268 gimple stmt = gsi_stmt (si);
5269 gimple use_stmt;
5270
5271 if (is_gimple_assign (stmt)
5272 && gimple_assign_rhs_code (stmt) == ASSERT_EXPR)
5273 {
5274 tree rhs = gimple_assign_rhs1 (stmt);
5275 tree var;
5276 tree cond = fold (ASSERT_EXPR_COND (rhs));
5277 use_operand_p use_p;
5278 imm_use_iterator iter;
5279
5280 gcc_assert (cond != boolean_false_node);
5281
5282 /* Propagate the RHS into every use of the LHS. */
5283 var = ASSERT_EXPR_VAR (rhs);
5284 FOR_EACH_IMM_USE_STMT (use_stmt, iter,
5285 gimple_assign_lhs (stmt))
5286 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
5287 {
5288 SET_USE (use_p, var);
5289 gcc_assert (TREE_CODE (var) == SSA_NAME);
5290 }
5291
5292 /* And finally, remove the copy, it is not needed. */
5293 gsi_remove (&si, true);
5294 release_defs (stmt);
5295 }
5296 else
5297 gsi_next (&si);
5298 }
5299 }
5300
5301
5302 /* Return true if STMT is interesting for VRP. */
5303
5304 static bool
5305 stmt_interesting_for_vrp (gimple stmt)
5306 {
5307 if (gimple_code (stmt) == GIMPLE_PHI
5308 && is_gimple_reg (gimple_phi_result (stmt))
5309 && (INTEGRAL_TYPE_P (TREE_TYPE (gimple_phi_result (stmt)))
5310 || POINTER_TYPE_P (TREE_TYPE (gimple_phi_result (stmt)))))
5311 return true;
5312 else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
5313 {
5314 tree lhs = gimple_get_lhs (stmt);
5315
5316 /* In general, assignments with virtual operands are not useful
5317 for deriving ranges, with the obvious exception of calls to
5318 builtin functions. */
5319 if (lhs && TREE_CODE (lhs) == SSA_NAME
5320 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
5321 || POINTER_TYPE_P (TREE_TYPE (lhs)))
5322 && ((is_gimple_call (stmt)
5323 && gimple_call_fndecl (stmt) != NULL_TREE
5324 && DECL_IS_BUILTIN (gimple_call_fndecl (stmt)))
5325 || !gimple_vuse (stmt)))
5326 return true;
5327 }
5328 else if (gimple_code (stmt) == GIMPLE_COND
5329 || gimple_code (stmt) == GIMPLE_SWITCH)
5330 return true;
5331
5332 return false;
5333 }
5334
5335
5336 /* Initialize local data structures for VRP. */
5337
5338 static void
5339 vrp_initialize (void)
5340 {
5341 basic_block bb;
5342
5343 vr_value = XCNEWVEC (value_range_t *, num_ssa_names);
5344 vr_phi_edge_counts = XCNEWVEC (int, num_ssa_names);
5345
5346 FOR_EACH_BB (bb)
5347 {
5348 gimple_stmt_iterator si;
5349
5350 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
5351 {
5352 gimple phi = gsi_stmt (si);
5353 if (!stmt_interesting_for_vrp (phi))
5354 {
5355 tree lhs = PHI_RESULT (phi);
5356 set_value_range_to_varying (get_value_range (lhs));
5357 prop_set_simulate_again (phi, false);
5358 }
5359 else
5360 prop_set_simulate_again (phi, true);
5361 }
5362
5363 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
5364 {
5365 gimple stmt = gsi_stmt (si);
5366
5367 /* If the statement is a control insn, then we do not
5368 want to avoid simulating the statement once. Failure
5369 to do so means that those edges will never get added. */
5370 if (stmt_ends_bb_p (stmt))
5371 prop_set_simulate_again (stmt, true);
5372 else if (!stmt_interesting_for_vrp (stmt))
5373 {
5374 ssa_op_iter i;
5375 tree def;
5376 FOR_EACH_SSA_TREE_OPERAND (def, stmt, i, SSA_OP_DEF)
5377 set_value_range_to_varying (get_value_range (def));
5378 prop_set_simulate_again (stmt, false);
5379 }
5380 else
5381 prop_set_simulate_again (stmt, true);
5382 }
5383 }
5384 }
5385
5386
5387 /* Visit assignment STMT. If it produces an interesting range, record
5388 the SSA name in *OUTPUT_P. */
5389
5390 static enum ssa_prop_result
5391 vrp_visit_assignment_or_call (gimple stmt, tree *output_p)
5392 {
5393 tree def, lhs;
5394 ssa_op_iter iter;
5395 enum gimple_code code = gimple_code (stmt);
5396 lhs = gimple_get_lhs (stmt);
5397
5398 /* We only keep track of ranges in integral and pointer types. */
5399 if (TREE_CODE (lhs) == SSA_NAME
5400 && ((INTEGRAL_TYPE_P (TREE_TYPE (lhs))
5401 /* It is valid to have NULL MIN/MAX values on a type. See
5402 build_range_type. */
5403 && TYPE_MIN_VALUE (TREE_TYPE (lhs))
5404 && TYPE_MAX_VALUE (TREE_TYPE (lhs)))
5405 || POINTER_TYPE_P (TREE_TYPE (lhs))))
5406 {
5407 value_range_t new_vr = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
5408
5409 if (code == GIMPLE_CALL)
5410 extract_range_basic (&new_vr, stmt);
5411 else
5412 extract_range_from_assignment (&new_vr, stmt);
5413
5414 if (update_value_range (lhs, &new_vr))
5415 {
5416 *output_p = lhs;
5417
5418 if (dump_file && (dump_flags & TDF_DETAILS))
5419 {
5420 fprintf (dump_file, "Found new range for ");
5421 print_generic_expr (dump_file, lhs, 0);
5422 fprintf (dump_file, ": ");
5423 dump_value_range (dump_file, &new_vr);
5424 fprintf (dump_file, "\n\n");
5425 }
5426
5427 if (new_vr.type == VR_VARYING)
5428 return SSA_PROP_VARYING;
5429
5430 return SSA_PROP_INTERESTING;
5431 }
5432
5433 return SSA_PROP_NOT_INTERESTING;
5434 }
5435
5436 /* Every other statement produces no useful ranges. */
5437 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_DEF)
5438 set_value_range_to_varying (get_value_range (def));
5439
5440 return SSA_PROP_VARYING;
5441 }
5442
5443 /* Helper that gets the value range of the SSA_NAME with version I
5444 or a symbolic range containing the SSA_NAME only if the value range
5445 is varying or undefined. */
5446
5447 static inline value_range_t
5448 get_vr_for_comparison (int i)
5449 {
5450 value_range_t vr = *(vr_value[i]);
5451
5452 /* If name N_i does not have a valid range, use N_i as its own
5453 range. This allows us to compare against names that may
5454 have N_i in their ranges. */
5455 if (vr.type == VR_VARYING || vr.type == VR_UNDEFINED)
5456 {
5457 vr.type = VR_RANGE;
5458 vr.min = ssa_name (i);
5459 vr.max = ssa_name (i);
5460 }
5461
5462 return vr;
5463 }
5464
5465 /* Compare all the value ranges for names equivalent to VAR with VAL
5466 using comparison code COMP. Return the same value returned by
5467 compare_range_with_value, including the setting of
5468 *STRICT_OVERFLOW_P. */
5469
5470 static tree
5471 compare_name_with_value (enum tree_code comp, tree var, tree val,
5472 bool *strict_overflow_p)
5473 {
5474 bitmap_iterator bi;
5475 unsigned i;
5476 bitmap e;
5477 tree retval, t;
5478 int used_strict_overflow;
5479 bool sop;
5480 value_range_t equiv_vr;
5481
5482 /* Get the set of equivalences for VAR. */
5483 e = get_value_range (var)->equiv;
5484
5485 /* Start at -1. Set it to 0 if we do a comparison without relying
5486 on overflow, or 1 if all comparisons rely on overflow. */
5487 used_strict_overflow = -1;
5488
5489 /* Compare vars' value range with val. */
5490 equiv_vr = get_vr_for_comparison (SSA_NAME_VERSION (var));
5491 sop = false;
5492 retval = compare_range_with_value (comp, &equiv_vr, val, &sop);
5493 if (retval)
5494 used_strict_overflow = sop ? 1 : 0;
5495
5496 /* If the equiv set is empty we have done all work we need to do. */
5497 if (e == NULL)
5498 {
5499 if (retval
5500 && used_strict_overflow > 0)
5501 *strict_overflow_p = true;
5502 return retval;
5503 }
5504
5505 EXECUTE_IF_SET_IN_BITMAP (e, 0, i, bi)
5506 {
5507 equiv_vr = get_vr_for_comparison (i);
5508 sop = false;
5509 t = compare_range_with_value (comp, &equiv_vr, val, &sop);
5510 if (t)
5511 {
5512 /* If we get different answers from different members
5513 of the equivalence set this check must be in a dead
5514 code region. Folding it to a trap representation
5515 would be correct here. For now just return don't-know. */
5516 if (retval != NULL
5517 && t != retval)
5518 {
5519 retval = NULL_TREE;
5520 break;
5521 }
5522 retval = t;
5523
5524 if (!sop)
5525 used_strict_overflow = 0;
5526 else if (used_strict_overflow < 0)
5527 used_strict_overflow = 1;
5528 }
5529 }
5530
5531 if (retval
5532 && used_strict_overflow > 0)
5533 *strict_overflow_p = true;
5534
5535 return retval;
5536 }
5537
5538
5539 /* Given a comparison code COMP and names N1 and N2, compare all the
5540 ranges equivalent to N1 against all the ranges equivalent to N2
5541 to determine the value of N1 COMP N2. Return the same value
5542 returned by compare_ranges. Set *STRICT_OVERFLOW_P to indicate
5543 whether we relied on an overflow infinity in the comparison. */
5544
5545
5546 static tree
5547 compare_names (enum tree_code comp, tree n1, tree n2,
5548 bool *strict_overflow_p)
5549 {
5550 tree t, retval;
5551 bitmap e1, e2;
5552 bitmap_iterator bi1, bi2;
5553 unsigned i1, i2;
5554 int used_strict_overflow;
5555 static bitmap_obstack *s_obstack = NULL;
5556 static bitmap s_e1 = NULL, s_e2 = NULL;
5557
5558 /* Compare the ranges of every name equivalent to N1 against the
5559 ranges of every name equivalent to N2. */
5560 e1 = get_value_range (n1)->equiv;
5561 e2 = get_value_range (n2)->equiv;
5562
5563 /* Use the fake bitmaps if e1 or e2 are not available. */
5564 if (s_obstack == NULL)
5565 {
5566 s_obstack = XNEW (bitmap_obstack);
5567 bitmap_obstack_initialize (s_obstack);
5568 s_e1 = BITMAP_ALLOC (s_obstack);
5569 s_e2 = BITMAP_ALLOC (s_obstack);
5570 }
5571 if (e1 == NULL)
5572 e1 = s_e1;
5573 if (e2 == NULL)
5574 e2 = s_e2;
5575
5576 /* Add N1 and N2 to their own set of equivalences to avoid
5577 duplicating the body of the loop just to check N1 and N2
5578 ranges. */
5579 bitmap_set_bit (e1, SSA_NAME_VERSION (n1));
5580 bitmap_set_bit (e2, SSA_NAME_VERSION (n2));
5581
5582 /* If the equivalence sets have a common intersection, then the two
5583 names can be compared without checking their ranges. */
5584 if (bitmap_intersect_p (e1, e2))
5585 {
5586 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
5587 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
5588
5589 return (comp == EQ_EXPR || comp == GE_EXPR || comp == LE_EXPR)
5590 ? boolean_true_node
5591 : boolean_false_node;
5592 }
5593
5594 /* Start at -1. Set it to 0 if we do a comparison without relying
5595 on overflow, or 1 if all comparisons rely on overflow. */
5596 used_strict_overflow = -1;
5597
5598 /* Otherwise, compare all the equivalent ranges. First, add N1 and
5599 N2 to their own set of equivalences to avoid duplicating the body
5600 of the loop just to check N1 and N2 ranges. */
5601 EXECUTE_IF_SET_IN_BITMAP (e1, 0, i1, bi1)
5602 {
5603 value_range_t vr1 = get_vr_for_comparison (i1);
5604
5605 t = retval = NULL_TREE;
5606 EXECUTE_IF_SET_IN_BITMAP (e2, 0, i2, bi2)
5607 {
5608 bool sop = false;
5609
5610 value_range_t vr2 = get_vr_for_comparison (i2);
5611
5612 t = compare_ranges (comp, &vr1, &vr2, &sop);
5613 if (t)
5614 {
5615 /* If we get different answers from different members
5616 of the equivalence set this check must be in a dead
5617 code region. Folding it to a trap representation
5618 would be correct here. For now just return don't-know. */
5619 if (retval != NULL
5620 && t != retval)
5621 {
5622 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
5623 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
5624 return NULL_TREE;
5625 }
5626 retval = t;
5627
5628 if (!sop)
5629 used_strict_overflow = 0;
5630 else if (used_strict_overflow < 0)
5631 used_strict_overflow = 1;
5632 }
5633 }
5634
5635 if (retval)
5636 {
5637 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
5638 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
5639 if (used_strict_overflow > 0)
5640 *strict_overflow_p = true;
5641 return retval;
5642 }
5643 }
5644
5645 /* None of the equivalent ranges are useful in computing this
5646 comparison. */
5647 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
5648 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
5649 return NULL_TREE;
5650 }
5651
5652 /* Helper function for vrp_evaluate_conditional_warnv. */
5653
5654 static tree
5655 vrp_evaluate_conditional_warnv_with_ops_using_ranges (enum tree_code code,
5656 tree op0, tree op1,
5657 bool * strict_overflow_p)
5658 {
5659 value_range_t *vr0, *vr1;
5660
5661 vr0 = (TREE_CODE (op0) == SSA_NAME) ? get_value_range (op0) : NULL;
5662 vr1 = (TREE_CODE (op1) == SSA_NAME) ? get_value_range (op1) : NULL;
5663
5664 if (vr0 && vr1)
5665 return compare_ranges (code, vr0, vr1, strict_overflow_p);
5666 else if (vr0 && vr1 == NULL)
5667 return compare_range_with_value (code, vr0, op1, strict_overflow_p);
5668 else if (vr0 == NULL && vr1)
5669 return (compare_range_with_value
5670 (swap_tree_comparison (code), vr1, op0, strict_overflow_p));
5671 return NULL;
5672 }
5673
5674 /* Helper function for vrp_evaluate_conditional_warnv. */
5675
5676 static tree
5677 vrp_evaluate_conditional_warnv_with_ops (enum tree_code code, tree op0,
5678 tree op1, bool use_equiv_p,
5679 bool *strict_overflow_p, bool *only_ranges)
5680 {
5681 tree ret;
5682 if (only_ranges)
5683 *only_ranges = true;
5684
5685 /* We only deal with integral and pointer types. */
5686 if (!INTEGRAL_TYPE_P (TREE_TYPE (op0))
5687 && !POINTER_TYPE_P (TREE_TYPE (op0)))
5688 return NULL_TREE;
5689
5690 if (use_equiv_p)
5691 {
5692 if (only_ranges
5693 && (ret = vrp_evaluate_conditional_warnv_with_ops_using_ranges
5694 (code, op0, op1, strict_overflow_p)))
5695 return ret;
5696 *only_ranges = false;
5697 if (TREE_CODE (op0) == SSA_NAME && TREE_CODE (op1) == SSA_NAME)
5698 return compare_names (code, op0, op1, strict_overflow_p);
5699 else if (TREE_CODE (op0) == SSA_NAME)
5700 return compare_name_with_value (code, op0, op1, strict_overflow_p);
5701 else if (TREE_CODE (op1) == SSA_NAME)
5702 return (compare_name_with_value
5703 (swap_tree_comparison (code), op1, op0, strict_overflow_p));
5704 }
5705 else
5706 return vrp_evaluate_conditional_warnv_with_ops_using_ranges (code, op0, op1,
5707 strict_overflow_p);
5708 return NULL_TREE;
5709 }
5710
5711 /* Given (CODE OP0 OP1) within STMT, try to simplify it based on value range
5712 information. Return NULL if the conditional can not be evaluated.
5713 The ranges of all the names equivalent with the operands in COND
5714 will be used when trying to compute the value. If the result is
5715 based on undefined signed overflow, issue a warning if
5716 appropriate. */
5717
5718 static tree
5719 vrp_evaluate_conditional (enum tree_code code, tree op0, tree op1, gimple stmt)
5720 {
5721 bool sop;
5722 tree ret;
5723 bool only_ranges;
5724
5725 /* Some passes and foldings leak constants with overflow flag set
5726 into the IL. Avoid doing wrong things with these and bail out. */
5727 if ((TREE_CODE (op0) == INTEGER_CST
5728 && TREE_OVERFLOW (op0))
5729 || (TREE_CODE (op1) == INTEGER_CST
5730 && TREE_OVERFLOW (op1)))
5731 return NULL_TREE;
5732
5733 sop = false;
5734 ret = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, true, &sop,
5735 &only_ranges);
5736
5737 if (ret && sop)
5738 {
5739 enum warn_strict_overflow_code wc;
5740 const char* warnmsg;
5741
5742 if (is_gimple_min_invariant (ret))
5743 {
5744 wc = WARN_STRICT_OVERFLOW_CONDITIONAL;
5745 warnmsg = G_("assuming signed overflow does not occur when "
5746 "simplifying conditional to constant");
5747 }
5748 else
5749 {
5750 wc = WARN_STRICT_OVERFLOW_COMPARISON;
5751 warnmsg = G_("assuming signed overflow does not occur when "
5752 "simplifying conditional");
5753 }
5754
5755 if (issue_strict_overflow_warning (wc))
5756 {
5757 location_t location;
5758
5759 if (!gimple_has_location (stmt))
5760 location = input_location;
5761 else
5762 location = gimple_location (stmt);
5763 warning_at (location, OPT_Wstrict_overflow, "%s", warnmsg);
5764 }
5765 }
5766
5767 if (warn_type_limits
5768 && ret && only_ranges
5769 && TREE_CODE_CLASS (code) == tcc_comparison
5770 && TREE_CODE (op0) == SSA_NAME)
5771 {
5772 /* If the comparison is being folded and the operand on the LHS
5773 is being compared against a constant value that is outside of
5774 the natural range of OP0's type, then the predicate will
5775 always fold regardless of the value of OP0. If -Wtype-limits
5776 was specified, emit a warning. */
5777 tree type = TREE_TYPE (op0);
5778 value_range_t *vr0 = get_value_range (op0);
5779
5780 if (vr0->type != VR_VARYING
5781 && INTEGRAL_TYPE_P (type)
5782 && vrp_val_is_min (vr0->min)
5783 && vrp_val_is_max (vr0->max)
5784 && is_gimple_min_invariant (op1))
5785 {
5786 location_t location;
5787
5788 if (!gimple_has_location (stmt))
5789 location = input_location;
5790 else
5791 location = gimple_location (stmt);
5792
5793 warning_at (location, OPT_Wtype_limits,
5794 integer_zerop (ret)
5795 ? G_("comparison always false "
5796 "due to limited range of data type")
5797 : G_("comparison always true "
5798 "due to limited range of data type"));
5799 }
5800 }
5801
5802 return ret;
5803 }
5804
5805
5806 /* Visit conditional statement STMT. If we can determine which edge
5807 will be taken out of STMT's basic block, record it in
5808 *TAKEN_EDGE_P and return SSA_PROP_INTERESTING. Otherwise, return
5809 SSA_PROP_VARYING. */
5810
5811 static enum ssa_prop_result
5812 vrp_visit_cond_stmt (gimple stmt, edge *taken_edge_p)
5813 {
5814 tree val;
5815 bool sop;
5816
5817 *taken_edge_p = NULL;
5818
5819 if (dump_file && (dump_flags & TDF_DETAILS))
5820 {
5821 tree use;
5822 ssa_op_iter i;
5823
5824 fprintf (dump_file, "\nVisiting conditional with predicate: ");
5825 print_gimple_stmt (dump_file, stmt, 0, 0);
5826 fprintf (dump_file, "\nWith known ranges\n");
5827
5828 FOR_EACH_SSA_TREE_OPERAND (use, stmt, i, SSA_OP_USE)
5829 {
5830 fprintf (dump_file, "\t");
5831 print_generic_expr (dump_file, use, 0);
5832 fprintf (dump_file, ": ");
5833 dump_value_range (dump_file, vr_value[SSA_NAME_VERSION (use)]);
5834 }
5835
5836 fprintf (dump_file, "\n");
5837 }
5838
5839 /* Compute the value of the predicate COND by checking the known
5840 ranges of each of its operands.
5841
5842 Note that we cannot evaluate all the equivalent ranges here
5843 because those ranges may not yet be final and with the current
5844 propagation strategy, we cannot determine when the value ranges
5845 of the names in the equivalence set have changed.
5846
5847 For instance, given the following code fragment
5848
5849 i_5 = PHI <8, i_13>
5850 ...
5851 i_14 = ASSERT_EXPR <i_5, i_5 != 0>
5852 if (i_14 == 1)
5853 ...
5854
5855 Assume that on the first visit to i_14, i_5 has the temporary
5856 range [8, 8] because the second argument to the PHI function is
5857 not yet executable. We derive the range ~[0, 0] for i_14 and the
5858 equivalence set { i_5 }. So, when we visit 'if (i_14 == 1)' for
5859 the first time, since i_14 is equivalent to the range [8, 8], we
5860 determine that the predicate is always false.
5861
5862 On the next round of propagation, i_13 is determined to be
5863 VARYING, which causes i_5 to drop down to VARYING. So, another
5864 visit to i_14 is scheduled. In this second visit, we compute the
5865 exact same range and equivalence set for i_14, namely ~[0, 0] and
5866 { i_5 }. But we did not have the previous range for i_5
5867 registered, so vrp_visit_assignment thinks that the range for
5868 i_14 has not changed. Therefore, the predicate 'if (i_14 == 1)'
5869 is not visited again, which stops propagation from visiting
5870 statements in the THEN clause of that if().
5871
5872 To properly fix this we would need to keep the previous range
5873 value for the names in the equivalence set. This way we would've
5874 discovered that from one visit to the other i_5 changed from
5875 range [8, 8] to VR_VARYING.
5876
5877 However, fixing this apparent limitation may not be worth the
5878 additional checking. Testing on several code bases (GCC, DLV,
5879 MICO, TRAMP3D and SPEC2000) showed that doing this results in
5880 4 more predicates folded in SPEC. */
5881 sop = false;
5882
5883 val = vrp_evaluate_conditional_warnv_with_ops (gimple_cond_code (stmt),
5884 gimple_cond_lhs (stmt),
5885 gimple_cond_rhs (stmt),
5886 false, &sop, NULL);
5887 if (val)
5888 {
5889 if (!sop)
5890 *taken_edge_p = find_taken_edge (gimple_bb (stmt), val);
5891 else
5892 {
5893 if (dump_file && (dump_flags & TDF_DETAILS))
5894 fprintf (dump_file,
5895 "\nIgnoring predicate evaluation because "
5896 "it assumes that signed overflow is undefined");
5897 val = NULL_TREE;
5898 }
5899 }
5900
5901 if (dump_file && (dump_flags & TDF_DETAILS))
5902 {
5903 fprintf (dump_file, "\nPredicate evaluates to: ");
5904 if (val == NULL_TREE)
5905 fprintf (dump_file, "DON'T KNOW\n");
5906 else
5907 print_generic_stmt (dump_file, val, 0);
5908 }
5909
5910 return (*taken_edge_p) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING;
5911 }
5912
5913 /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL
5914 that includes the value VAL. The search is restricted to the range
5915 [START_IDX, n - 1] where n is the size of VEC.
5916
5917 If there is a CASE_LABEL for VAL, its index is placed in IDX and true is
5918 returned.
5919
5920 If there is no CASE_LABEL for VAL and there is one that is larger than VAL,
5921 it is placed in IDX and false is returned.
5922
5923 If VAL is larger than any CASE_LABEL, n is placed on IDX and false is
5924 returned. */
5925
5926 static bool
5927 find_case_label_index (gimple stmt, size_t start_idx, tree val, size_t *idx)
5928 {
5929 size_t n = gimple_switch_num_labels (stmt);
5930 size_t low, high;
5931
5932 /* Find case label for minimum of the value range or the next one.
5933 At each iteration we are searching in [low, high - 1]. */
5934
5935 for (low = start_idx, high = n; high != low; )
5936 {
5937 tree t;
5938 int cmp;
5939 /* Note that i != high, so we never ask for n. */
5940 size_t i = (high + low) / 2;
5941 t = gimple_switch_label (stmt, i);
5942
5943 /* Cache the result of comparing CASE_LOW and val. */
5944 cmp = tree_int_cst_compare (CASE_LOW (t), val);
5945
5946 if (cmp == 0)
5947 {
5948 /* Ranges cannot be empty. */
5949 *idx = i;
5950 return true;
5951 }
5952 else if (cmp > 0)
5953 high = i;
5954 else
5955 {
5956 low = i + 1;
5957 if (CASE_HIGH (t) != NULL
5958 && tree_int_cst_compare (CASE_HIGH (t), val) >= 0)
5959 {
5960 *idx = i;
5961 return true;
5962 }
5963 }
5964 }
5965
5966 *idx = high;
5967 return false;
5968 }
5969
5970 /* Searches the case label vector VEC for the range of CASE_LABELs that is used
5971 for values between MIN and MAX. The first index is placed in MIN_IDX. The
5972 last index is placed in MAX_IDX. If the range of CASE_LABELs is empty
5973 then MAX_IDX < MIN_IDX.
5974 Returns true if the default label is not needed. */
5975
5976 static bool
5977 find_case_label_range (gimple stmt, tree min, tree max, size_t *min_idx,
5978 size_t *max_idx)
5979 {
5980 size_t i, j;
5981 bool min_take_default = !find_case_label_index (stmt, 1, min, &i);
5982 bool max_take_default = !find_case_label_index (stmt, i, max, &j);
5983
5984 if (i == j
5985 && min_take_default
5986 && max_take_default)
5987 {
5988 /* Only the default case label reached.
5989 Return an empty range. */
5990 *min_idx = 1;
5991 *max_idx = 0;
5992 return false;
5993 }
5994 else
5995 {
5996 bool take_default = min_take_default || max_take_default;
5997 tree low, high;
5998 size_t k;
5999
6000 if (max_take_default)
6001 j--;
6002
6003 /* If the case label range is continuous, we do not need
6004 the default case label. Verify that. */
6005 high = CASE_LOW (gimple_switch_label (stmt, i));
6006 if (CASE_HIGH (gimple_switch_label (stmt, i)))
6007 high = CASE_HIGH (gimple_switch_label (stmt, i));
6008 for (k = i + 1; k <= j; ++k)
6009 {
6010 low = CASE_LOW (gimple_switch_label (stmt, k));
6011 if (!integer_onep (int_const_binop (MINUS_EXPR, low, high, 0)))
6012 {
6013 take_default = true;
6014 break;
6015 }
6016 high = low;
6017 if (CASE_HIGH (gimple_switch_label (stmt, k)))
6018 high = CASE_HIGH (gimple_switch_label (stmt, k));
6019 }
6020
6021 *min_idx = i;
6022 *max_idx = j;
6023 return !take_default;
6024 }
6025 }
6026
6027 /* Visit switch statement STMT. If we can determine which edge
6028 will be taken out of STMT's basic block, record it in
6029 *TAKEN_EDGE_P and return SSA_PROP_INTERESTING. Otherwise, return
6030 SSA_PROP_VARYING. */
6031
6032 static enum ssa_prop_result
6033 vrp_visit_switch_stmt (gimple stmt, edge *taken_edge_p)
6034 {
6035 tree op, val;
6036 value_range_t *vr;
6037 size_t i = 0, j = 0;
6038 bool take_default;
6039
6040 *taken_edge_p = NULL;
6041 op = gimple_switch_index (stmt);
6042 if (TREE_CODE (op) != SSA_NAME)
6043 return SSA_PROP_VARYING;
6044
6045 vr = get_value_range (op);
6046 if (dump_file && (dump_flags & TDF_DETAILS))
6047 {
6048 fprintf (dump_file, "\nVisiting switch expression with operand ");
6049 print_generic_expr (dump_file, op, 0);
6050 fprintf (dump_file, " with known range ");
6051 dump_value_range (dump_file, vr);
6052 fprintf (dump_file, "\n");
6053 }
6054
6055 if (vr->type != VR_RANGE
6056 || symbolic_range_p (vr))
6057 return SSA_PROP_VARYING;
6058
6059 /* Find the single edge that is taken from the switch expression. */
6060 take_default = !find_case_label_range (stmt, vr->min, vr->max, &i, &j);
6061
6062 /* Check if the range spans no CASE_LABEL. If so, we only reach the default
6063 label */
6064 if (j < i)
6065 {
6066 gcc_assert (take_default);
6067 val = gimple_switch_default_label (stmt);
6068 }
6069 else
6070 {
6071 /* Check if labels with index i to j and maybe the default label
6072 are all reaching the same label. */
6073
6074 val = gimple_switch_label (stmt, i);
6075 if (take_default
6076 && CASE_LABEL (gimple_switch_default_label (stmt))
6077 != CASE_LABEL (val))
6078 {
6079 if (dump_file && (dump_flags & TDF_DETAILS))
6080 fprintf (dump_file, " not a single destination for this "
6081 "range\n");
6082 return SSA_PROP_VARYING;
6083 }
6084 for (++i; i <= j; ++i)
6085 {
6086 if (CASE_LABEL (gimple_switch_label (stmt, i)) != CASE_LABEL (val))
6087 {
6088 if (dump_file && (dump_flags & TDF_DETAILS))
6089 fprintf (dump_file, " not a single destination for this "
6090 "range\n");
6091 return SSA_PROP_VARYING;
6092 }
6093 }
6094 }
6095
6096 *taken_edge_p = find_edge (gimple_bb (stmt),
6097 label_to_block (CASE_LABEL (val)));
6098
6099 if (dump_file && (dump_flags & TDF_DETAILS))
6100 {
6101 fprintf (dump_file, " will take edge to ");
6102 print_generic_stmt (dump_file, CASE_LABEL (val), 0);
6103 }
6104
6105 return SSA_PROP_INTERESTING;
6106 }
6107
6108
6109 /* Evaluate statement STMT. If the statement produces a useful range,
6110 return SSA_PROP_INTERESTING and record the SSA name with the
6111 interesting range into *OUTPUT_P.
6112
6113 If STMT is a conditional branch and we can determine its truth
6114 value, the taken edge is recorded in *TAKEN_EDGE_P.
6115
6116 If STMT produces a varying value, return SSA_PROP_VARYING. */
6117
6118 static enum ssa_prop_result
6119 vrp_visit_stmt (gimple stmt, edge *taken_edge_p, tree *output_p)
6120 {
6121 tree def;
6122 ssa_op_iter iter;
6123
6124 if (dump_file && (dump_flags & TDF_DETAILS))
6125 {
6126 fprintf (dump_file, "\nVisiting statement:\n");
6127 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
6128 fprintf (dump_file, "\n");
6129 }
6130
6131 if (!stmt_interesting_for_vrp (stmt))
6132 gcc_assert (stmt_ends_bb_p (stmt));
6133 else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
6134 {
6135 /* In general, assignments with virtual operands are not useful
6136 for deriving ranges, with the obvious exception of calls to
6137 builtin functions. */
6138
6139 if ((is_gimple_call (stmt)
6140 && gimple_call_fndecl (stmt) != NULL_TREE
6141 && DECL_IS_BUILTIN (gimple_call_fndecl (stmt)))
6142 || !gimple_vuse (stmt))
6143 return vrp_visit_assignment_or_call (stmt, output_p);
6144 }
6145 else if (gimple_code (stmt) == GIMPLE_COND)
6146 return vrp_visit_cond_stmt (stmt, taken_edge_p);
6147 else if (gimple_code (stmt) == GIMPLE_SWITCH)
6148 return vrp_visit_switch_stmt (stmt, taken_edge_p);
6149
6150 /* All other statements produce nothing of interest for VRP, so mark
6151 their outputs varying and prevent further simulation. */
6152 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_DEF)
6153 set_value_range_to_varying (get_value_range (def));
6154
6155 return SSA_PROP_VARYING;
6156 }
6157
6158
6159 /* Meet operation for value ranges. Given two value ranges VR0 and
6160 VR1, store in VR0 a range that contains both VR0 and VR1. This
6161 may not be the smallest possible such range. */
6162
6163 static void
6164 vrp_meet (value_range_t *vr0, value_range_t *vr1)
6165 {
6166 if (vr0->type == VR_UNDEFINED)
6167 {
6168 copy_value_range (vr0, vr1);
6169 return;
6170 }
6171
6172 if (vr1->type == VR_UNDEFINED)
6173 {
6174 /* Nothing to do. VR0 already has the resulting range. */
6175 return;
6176 }
6177
6178 if (vr0->type == VR_VARYING)
6179 {
6180 /* Nothing to do. VR0 already has the resulting range. */
6181 return;
6182 }
6183
6184 if (vr1->type == VR_VARYING)
6185 {
6186 set_value_range_to_varying (vr0);
6187 return;
6188 }
6189
6190 if (vr0->type == VR_RANGE && vr1->type == VR_RANGE)
6191 {
6192 int cmp;
6193 tree min, max;
6194
6195 /* Compute the convex hull of the ranges. The lower limit of
6196 the new range is the minimum of the two ranges. If they
6197 cannot be compared, then give up. */
6198 cmp = compare_values (vr0->min, vr1->min);
6199 if (cmp == 0 || cmp == 1)
6200 min = vr1->min;
6201 else if (cmp == -1)
6202 min = vr0->min;
6203 else
6204 goto give_up;
6205
6206 /* Similarly, the upper limit of the new range is the maximum
6207 of the two ranges. If they cannot be compared, then
6208 give up. */
6209 cmp = compare_values (vr0->max, vr1->max);
6210 if (cmp == 0 || cmp == -1)
6211 max = vr1->max;
6212 else if (cmp == 1)
6213 max = vr0->max;
6214 else
6215 goto give_up;
6216
6217 /* Check for useless ranges. */
6218 if (INTEGRAL_TYPE_P (TREE_TYPE (min))
6219 && ((vrp_val_is_min (min) || is_overflow_infinity (min))
6220 && (vrp_val_is_max (max) || is_overflow_infinity (max))))
6221 goto give_up;
6222
6223 /* The resulting set of equivalences is the intersection of
6224 the two sets. */
6225 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
6226 bitmap_and_into (vr0->equiv, vr1->equiv);
6227 else if (vr0->equiv && !vr1->equiv)
6228 bitmap_clear (vr0->equiv);
6229
6230 set_value_range (vr0, vr0->type, min, max, vr0->equiv);
6231 }
6232 else if (vr0->type == VR_ANTI_RANGE && vr1->type == VR_ANTI_RANGE)
6233 {
6234 /* Two anti-ranges meet only if their complements intersect.
6235 Only handle the case of identical ranges. */
6236 if (compare_values (vr0->min, vr1->min) == 0
6237 && compare_values (vr0->max, vr1->max) == 0
6238 && compare_values (vr0->min, vr0->max) == 0)
6239 {
6240 /* The resulting set of equivalences is the intersection of
6241 the two sets. */
6242 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
6243 bitmap_and_into (vr0->equiv, vr1->equiv);
6244 else if (vr0->equiv && !vr1->equiv)
6245 bitmap_clear (vr0->equiv);
6246 }
6247 else
6248 goto give_up;
6249 }
6250 else if (vr0->type == VR_ANTI_RANGE || vr1->type == VR_ANTI_RANGE)
6251 {
6252 /* For a numeric range [VAL1, VAL2] and an anti-range ~[VAL3, VAL4],
6253 only handle the case where the ranges have an empty intersection.
6254 The result of the meet operation is the anti-range. */
6255 if (!symbolic_range_p (vr0)
6256 && !symbolic_range_p (vr1)
6257 && !value_ranges_intersect_p (vr0, vr1))
6258 {
6259 /* Copy most of VR1 into VR0. Don't copy VR1's equivalence
6260 set. We need to compute the intersection of the two
6261 equivalence sets. */
6262 if (vr1->type == VR_ANTI_RANGE)
6263 set_value_range (vr0, vr1->type, vr1->min, vr1->max, vr0->equiv);
6264
6265 /* The resulting set of equivalences is the intersection of
6266 the two sets. */
6267 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
6268 bitmap_and_into (vr0->equiv, vr1->equiv);
6269 else if (vr0->equiv && !vr1->equiv)
6270 bitmap_clear (vr0->equiv);
6271 }
6272 else
6273 goto give_up;
6274 }
6275 else
6276 gcc_unreachable ();
6277
6278 return;
6279
6280 give_up:
6281 /* Failed to find an efficient meet. Before giving up and setting
6282 the result to VARYING, see if we can at least derive a useful
6283 anti-range. FIXME, all this nonsense about distinguishing
6284 anti-ranges from ranges is necessary because of the odd
6285 semantics of range_includes_zero_p and friends. */
6286 if (!symbolic_range_p (vr0)
6287 && ((vr0->type == VR_RANGE && !range_includes_zero_p (vr0))
6288 || (vr0->type == VR_ANTI_RANGE && range_includes_zero_p (vr0)))
6289 && !symbolic_range_p (vr1)
6290 && ((vr1->type == VR_RANGE && !range_includes_zero_p (vr1))
6291 || (vr1->type == VR_ANTI_RANGE && range_includes_zero_p (vr1))))
6292 {
6293 set_value_range_to_nonnull (vr0, TREE_TYPE (vr0->min));
6294
6295 /* Since this meet operation did not result from the meeting of
6296 two equivalent names, VR0 cannot have any equivalences. */
6297 if (vr0->equiv)
6298 bitmap_clear (vr0->equiv);
6299 }
6300 else
6301 set_value_range_to_varying (vr0);
6302 }
6303
6304
6305 /* Visit all arguments for PHI node PHI that flow through executable
6306 edges. If a valid value range can be derived from all the incoming
6307 value ranges, set a new range for the LHS of PHI. */
6308
6309 static enum ssa_prop_result
6310 vrp_visit_phi_node (gimple phi)
6311 {
6312 size_t i;
6313 tree lhs = PHI_RESULT (phi);
6314 value_range_t *lhs_vr = get_value_range (lhs);
6315 value_range_t vr_result = { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL };
6316 int edges, old_edges;
6317 struct loop *l;
6318
6319 copy_value_range (&vr_result, lhs_vr);
6320
6321 if (dump_file && (dump_flags & TDF_DETAILS))
6322 {
6323 fprintf (dump_file, "\nVisiting PHI node: ");
6324 print_gimple_stmt (dump_file, phi, 0, dump_flags);
6325 }
6326
6327 edges = 0;
6328 for (i = 0; i < gimple_phi_num_args (phi); i++)
6329 {
6330 edge e = gimple_phi_arg_edge (phi, i);
6331
6332 if (dump_file && (dump_flags & TDF_DETAILS))
6333 {
6334 fprintf (dump_file,
6335 "\n Argument #%d (%d -> %d %sexecutable)\n",
6336 (int) i, e->src->index, e->dest->index,
6337 (e->flags & EDGE_EXECUTABLE) ? "" : "not ");
6338 }
6339
6340 if (e->flags & EDGE_EXECUTABLE)
6341 {
6342 tree arg = PHI_ARG_DEF (phi, i);
6343 value_range_t vr_arg;
6344
6345 ++edges;
6346
6347 if (TREE_CODE (arg) == SSA_NAME)
6348 {
6349 vr_arg = *(get_value_range (arg));
6350 }
6351 else
6352 {
6353 if (is_overflow_infinity (arg))
6354 {
6355 arg = copy_node (arg);
6356 TREE_OVERFLOW (arg) = 0;
6357 }
6358
6359 vr_arg.type = VR_RANGE;
6360 vr_arg.min = arg;
6361 vr_arg.max = arg;
6362 vr_arg.equiv = NULL;
6363 }
6364
6365 if (dump_file && (dump_flags & TDF_DETAILS))
6366 {
6367 fprintf (dump_file, "\t");
6368 print_generic_expr (dump_file, arg, dump_flags);
6369 fprintf (dump_file, "\n\tValue: ");
6370 dump_value_range (dump_file, &vr_arg);
6371 fprintf (dump_file, "\n");
6372 }
6373
6374 vrp_meet (&vr_result, &vr_arg);
6375
6376 if (vr_result.type == VR_VARYING)
6377 break;
6378 }
6379 }
6380
6381 /* If this is a loop PHI node SCEV may known more about its
6382 value-range. */
6383 if (current_loops
6384 && (l = loop_containing_stmt (phi))
6385 && l->header == gimple_bb (phi))
6386 adjust_range_with_scev (&vr_result, l, phi, lhs);
6387
6388 if (vr_result.type == VR_VARYING)
6389 goto varying;
6390
6391 old_edges = vr_phi_edge_counts[SSA_NAME_VERSION (lhs)];
6392 vr_phi_edge_counts[SSA_NAME_VERSION (lhs)] = edges;
6393
6394 /* To prevent infinite iterations in the algorithm, derive ranges
6395 when the new value is slightly bigger or smaller than the
6396 previous one. We don't do this if we have seen a new executable
6397 edge; this helps us avoid an overflow infinity for conditionals
6398 which are not in a loop. */
6399 if (lhs_vr->type == VR_RANGE && vr_result.type == VR_RANGE
6400 && edges <= old_edges)
6401 {
6402 if (!POINTER_TYPE_P (TREE_TYPE (lhs)))
6403 {
6404 int cmp_min = compare_values (lhs_vr->min, vr_result.min);
6405 int cmp_max = compare_values (lhs_vr->max, vr_result.max);
6406
6407 /* If the new minimum is smaller or larger than the previous
6408 one, go all the way to -INF. In the first case, to avoid
6409 iterating millions of times to reach -INF, and in the
6410 other case to avoid infinite bouncing between different
6411 minimums. */
6412 if (cmp_min > 0 || cmp_min < 0)
6413 {
6414 /* If we will end up with a (-INF, +INF) range, set it to
6415 VARYING. Same if the previous max value was invalid for
6416 the type and we'd end up with vr_result.min > vr_result.max. */
6417 if (vrp_val_is_max (vr_result.max)
6418 || compare_values (TYPE_MIN_VALUE (TREE_TYPE (vr_result.min)),
6419 vr_result.max) > 0)
6420 goto varying;
6421
6422 if (!needs_overflow_infinity (TREE_TYPE (vr_result.min))
6423 || !vrp_var_may_overflow (lhs, phi))
6424 vr_result.min = TYPE_MIN_VALUE (TREE_TYPE (vr_result.min));
6425 else if (supports_overflow_infinity (TREE_TYPE (vr_result.min)))
6426 vr_result.min =
6427 negative_overflow_infinity (TREE_TYPE (vr_result.min));
6428 else
6429 goto varying;
6430 }
6431
6432 /* Similarly, if the new maximum is smaller or larger than
6433 the previous one, go all the way to +INF. */
6434 if (cmp_max < 0 || cmp_max > 0)
6435 {
6436 /* If we will end up with a (-INF, +INF) range, set it to
6437 VARYING. Same if the previous min value was invalid for
6438 the type and we'd end up with vr_result.max < vr_result.min. */
6439 if (vrp_val_is_min (vr_result.min)
6440 || compare_values (TYPE_MAX_VALUE (TREE_TYPE (vr_result.max)),
6441 vr_result.min) < 0)
6442 goto varying;
6443
6444 if (!needs_overflow_infinity (TREE_TYPE (vr_result.max))
6445 || !vrp_var_may_overflow (lhs, phi))
6446 vr_result.max = TYPE_MAX_VALUE (TREE_TYPE (vr_result.max));
6447 else if (supports_overflow_infinity (TREE_TYPE (vr_result.max)))
6448 vr_result.max =
6449 positive_overflow_infinity (TREE_TYPE (vr_result.max));
6450 else
6451 goto varying;
6452 }
6453 }
6454 }
6455
6456 /* If the new range is different than the previous value, keep
6457 iterating. */
6458 if (update_value_range (lhs, &vr_result))
6459 {
6460 if (dump_file && (dump_flags & TDF_DETAILS))
6461 {
6462 fprintf (dump_file, "Found new range for ");
6463 print_generic_expr (dump_file, lhs, 0);
6464 fprintf (dump_file, ": ");
6465 dump_value_range (dump_file, &vr_result);
6466 fprintf (dump_file, "\n\n");
6467 }
6468
6469 return SSA_PROP_INTERESTING;
6470 }
6471
6472 /* Nothing changed, don't add outgoing edges. */
6473 return SSA_PROP_NOT_INTERESTING;
6474
6475 /* No match found. Set the LHS to VARYING. */
6476 varying:
6477 set_value_range_to_varying (lhs_vr);
6478 return SSA_PROP_VARYING;
6479 }
6480
6481 /* Simplify boolean operations if the source is known
6482 to be already a boolean. */
6483 static bool
6484 simplify_truth_ops_using_ranges (gimple_stmt_iterator *gsi, gimple stmt)
6485 {
6486 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
6487 tree val = NULL;
6488 tree op0, op1;
6489 value_range_t *vr;
6490 bool sop = false;
6491 bool need_conversion;
6492
6493 op0 = gimple_assign_rhs1 (stmt);
6494 if (TYPE_PRECISION (TREE_TYPE (op0)) != 1)
6495 {
6496 if (TREE_CODE (op0) != SSA_NAME)
6497 return false;
6498 vr = get_value_range (op0);
6499
6500 val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, &sop);
6501 if (!val || !integer_onep (val))
6502 return false;
6503
6504 val = compare_range_with_value (LE_EXPR, vr, integer_one_node, &sop);
6505 if (!val || !integer_onep (val))
6506 return false;
6507 }
6508
6509 if (rhs_code == TRUTH_NOT_EXPR)
6510 {
6511 rhs_code = NE_EXPR;
6512 op1 = build_int_cst (TREE_TYPE (op0), 1);
6513 }
6514 else
6515 {
6516 op1 = gimple_assign_rhs2 (stmt);
6517
6518 /* Reduce number of cases to handle. */
6519 if (is_gimple_min_invariant (op1))
6520 {
6521 /* Exclude anything that should have been already folded. */
6522 if (rhs_code != EQ_EXPR
6523 && rhs_code != NE_EXPR
6524 && rhs_code != TRUTH_XOR_EXPR)
6525 return false;
6526
6527 if (!integer_zerop (op1)
6528 && !integer_onep (op1)
6529 && !integer_all_onesp (op1))
6530 return false;
6531
6532 /* Limit the number of cases we have to consider. */
6533 if (rhs_code == EQ_EXPR)
6534 {
6535 rhs_code = NE_EXPR;
6536 op1 = fold_unary (TRUTH_NOT_EXPR, TREE_TYPE (op1), op1);
6537 }
6538 }
6539 else
6540 {
6541 /* Punt on A == B as there is no BIT_XNOR_EXPR. */
6542 if (rhs_code == EQ_EXPR)
6543 return false;
6544
6545 if (TYPE_PRECISION (TREE_TYPE (op1)) != 1)
6546 {
6547 vr = get_value_range (op1);
6548 val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, &sop);
6549 if (!val || !integer_onep (val))
6550 return false;
6551
6552 val = compare_range_with_value (LE_EXPR, vr, integer_one_node, &sop);
6553 if (!val || !integer_onep (val))
6554 return false;
6555 }
6556 }
6557 }
6558
6559 if (sop && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
6560 {
6561 location_t location;
6562
6563 if (!gimple_has_location (stmt))
6564 location = input_location;
6565 else
6566 location = gimple_location (stmt);
6567
6568 if (rhs_code == TRUTH_AND_EXPR || rhs_code == TRUTH_OR_EXPR)
6569 warning_at (location, OPT_Wstrict_overflow,
6570 _("assuming signed overflow does not occur when "
6571 "simplifying && or || to & or |"));
6572 else
6573 warning_at (location, OPT_Wstrict_overflow,
6574 _("assuming signed overflow does not occur when "
6575 "simplifying ==, != or ! to identity or ^"));
6576 }
6577
6578 need_conversion =
6579 !useless_type_conversion_p (TREE_TYPE (gimple_assign_lhs (stmt)),
6580 TREE_TYPE (op0));
6581
6582 /* Make sure to not sign-extend -1 as a boolean value. */
6583 if (need_conversion
6584 && !TYPE_UNSIGNED (TREE_TYPE (op0))
6585 && TYPE_PRECISION (TREE_TYPE (op0)) == 1)
6586 return false;
6587
6588 switch (rhs_code)
6589 {
6590 case TRUTH_AND_EXPR:
6591 rhs_code = BIT_AND_EXPR;
6592 break;
6593 case TRUTH_OR_EXPR:
6594 rhs_code = BIT_IOR_EXPR;
6595 break;
6596 case TRUTH_XOR_EXPR:
6597 case NE_EXPR:
6598 if (integer_zerop (op1))
6599 {
6600 gimple_assign_set_rhs_with_ops (gsi,
6601 need_conversion ? NOP_EXPR : SSA_NAME,
6602 op0, NULL);
6603 update_stmt (gsi_stmt (*gsi));
6604 return true;
6605 }
6606
6607 rhs_code = BIT_XOR_EXPR;
6608 break;
6609 default:
6610 gcc_unreachable ();
6611 }
6612
6613 if (need_conversion)
6614 return false;
6615
6616 gimple_assign_set_rhs_with_ops (gsi, rhs_code, op0, op1);
6617 update_stmt (gsi_stmt (*gsi));
6618 return true;
6619 }
6620
6621 /* Simplify a division or modulo operator to a right shift or
6622 bitwise and if the first operand is unsigned or is greater
6623 than zero and the second operand is an exact power of two. */
6624
6625 static bool
6626 simplify_div_or_mod_using_ranges (gimple stmt)
6627 {
6628 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
6629 tree val = NULL;
6630 tree op0 = gimple_assign_rhs1 (stmt);
6631 tree op1 = gimple_assign_rhs2 (stmt);
6632 value_range_t *vr = get_value_range (gimple_assign_rhs1 (stmt));
6633
6634 if (TYPE_UNSIGNED (TREE_TYPE (op0)))
6635 {
6636 val = integer_one_node;
6637 }
6638 else
6639 {
6640 bool sop = false;
6641
6642 val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, &sop);
6643
6644 if (val
6645 && sop
6646 && integer_onep (val)
6647 && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
6648 {
6649 location_t location;
6650
6651 if (!gimple_has_location (stmt))
6652 location = input_location;
6653 else
6654 location = gimple_location (stmt);
6655 warning_at (location, OPT_Wstrict_overflow,
6656 "assuming signed overflow does not occur when "
6657 "simplifying %</%> or %<%%%> to %<>>%> or %<&%>");
6658 }
6659 }
6660
6661 if (val && integer_onep (val))
6662 {
6663 tree t;
6664
6665 if (rhs_code == TRUNC_DIV_EXPR)
6666 {
6667 t = build_int_cst (NULL_TREE, tree_log2 (op1));
6668 gimple_assign_set_rhs_code (stmt, RSHIFT_EXPR);
6669 gimple_assign_set_rhs1 (stmt, op0);
6670 gimple_assign_set_rhs2 (stmt, t);
6671 }
6672 else
6673 {
6674 t = build_int_cst (TREE_TYPE (op1), 1);
6675 t = int_const_binop (MINUS_EXPR, op1, t, 0);
6676 t = fold_convert (TREE_TYPE (op0), t);
6677
6678 gimple_assign_set_rhs_code (stmt, BIT_AND_EXPR);
6679 gimple_assign_set_rhs1 (stmt, op0);
6680 gimple_assign_set_rhs2 (stmt, t);
6681 }
6682
6683 update_stmt (stmt);
6684 return true;
6685 }
6686
6687 return false;
6688 }
6689
6690 /* If the operand to an ABS_EXPR is >= 0, then eliminate the
6691 ABS_EXPR. If the operand is <= 0, then simplify the
6692 ABS_EXPR into a NEGATE_EXPR. */
6693
6694 static bool
6695 simplify_abs_using_ranges (gimple stmt)
6696 {
6697 tree val = NULL;
6698 tree op = gimple_assign_rhs1 (stmt);
6699 tree type = TREE_TYPE (op);
6700 value_range_t *vr = get_value_range (op);
6701
6702 if (TYPE_UNSIGNED (type))
6703 {
6704 val = integer_zero_node;
6705 }
6706 else if (vr)
6707 {
6708 bool sop = false;
6709
6710 val = compare_range_with_value (LE_EXPR, vr, integer_zero_node, &sop);
6711 if (!val)
6712 {
6713 sop = false;
6714 val = compare_range_with_value (GE_EXPR, vr, integer_zero_node,
6715 &sop);
6716
6717 if (val)
6718 {
6719 if (integer_zerop (val))
6720 val = integer_one_node;
6721 else if (integer_onep (val))
6722 val = integer_zero_node;
6723 }
6724 }
6725
6726 if (val
6727 && (integer_onep (val) || integer_zerop (val)))
6728 {
6729 if (sop && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
6730 {
6731 location_t location;
6732
6733 if (!gimple_has_location (stmt))
6734 location = input_location;
6735 else
6736 location = gimple_location (stmt);
6737 warning_at (location, OPT_Wstrict_overflow,
6738 "assuming signed overflow does not occur when "
6739 "simplifying %<abs (X)%> to %<X%> or %<-X%>");
6740 }
6741
6742 gimple_assign_set_rhs1 (stmt, op);
6743 if (integer_onep (val))
6744 gimple_assign_set_rhs_code (stmt, NEGATE_EXPR);
6745 else
6746 gimple_assign_set_rhs_code (stmt, SSA_NAME);
6747 update_stmt (stmt);
6748 return true;
6749 }
6750 }
6751
6752 return false;
6753 }
6754
6755 /* We are comparing trees OP0 and OP1 using COND_CODE. OP0 has
6756 a known value range VR.
6757
6758 If there is one and only one value which will satisfy the
6759 conditional, then return that value. Else return NULL. */
6760
6761 static tree
6762 test_for_singularity (enum tree_code cond_code, tree op0,
6763 tree op1, value_range_t *vr)
6764 {
6765 tree min = NULL;
6766 tree max = NULL;
6767
6768 /* Extract minimum/maximum values which satisfy the
6769 the conditional as it was written. */
6770 if (cond_code == LE_EXPR || cond_code == LT_EXPR)
6771 {
6772 /* This should not be negative infinity; there is no overflow
6773 here. */
6774 min = TYPE_MIN_VALUE (TREE_TYPE (op0));
6775
6776 max = op1;
6777 if (cond_code == LT_EXPR && !is_overflow_infinity (max))
6778 {
6779 tree one = build_int_cst (TREE_TYPE (op0), 1);
6780 max = fold_build2 (MINUS_EXPR, TREE_TYPE (op0), max, one);
6781 if (EXPR_P (max))
6782 TREE_NO_WARNING (max) = 1;
6783 }
6784 }
6785 else if (cond_code == GE_EXPR || cond_code == GT_EXPR)
6786 {
6787 /* This should not be positive infinity; there is no overflow
6788 here. */
6789 max = TYPE_MAX_VALUE (TREE_TYPE (op0));
6790
6791 min = op1;
6792 if (cond_code == GT_EXPR && !is_overflow_infinity (min))
6793 {
6794 tree one = build_int_cst (TREE_TYPE (op0), 1);
6795 min = fold_build2 (PLUS_EXPR, TREE_TYPE (op0), min, one);
6796 if (EXPR_P (min))
6797 TREE_NO_WARNING (min) = 1;
6798 }
6799 }
6800
6801 /* Now refine the minimum and maximum values using any
6802 value range information we have for op0. */
6803 if (min && max)
6804 {
6805 if (compare_values (vr->min, min) == 1)
6806 min = vr->min;
6807 if (compare_values (vr->max, max) == -1)
6808 max = vr->max;
6809
6810 /* If the new min/max values have converged to a single value,
6811 then there is only one value which can satisfy the condition,
6812 return that value. */
6813 if (operand_equal_p (min, max, 0) && is_gimple_min_invariant (min))
6814 return min;
6815 }
6816 return NULL;
6817 }
6818
6819 /* Simplify a conditional using a relational operator to an equality
6820 test if the range information indicates only one value can satisfy
6821 the original conditional. */
6822
6823 static bool
6824 simplify_cond_using_ranges (gimple stmt)
6825 {
6826 tree op0 = gimple_cond_lhs (stmt);
6827 tree op1 = gimple_cond_rhs (stmt);
6828 enum tree_code cond_code = gimple_cond_code (stmt);
6829
6830 if (cond_code != NE_EXPR
6831 && cond_code != EQ_EXPR
6832 && TREE_CODE (op0) == SSA_NAME
6833 && INTEGRAL_TYPE_P (TREE_TYPE (op0))
6834 && is_gimple_min_invariant (op1))
6835 {
6836 value_range_t *vr = get_value_range (op0);
6837
6838 /* If we have range information for OP0, then we might be
6839 able to simplify this conditional. */
6840 if (vr->type == VR_RANGE)
6841 {
6842 tree new_tree = test_for_singularity (cond_code, op0, op1, vr);
6843
6844 if (new_tree)
6845 {
6846 if (dump_file)
6847 {
6848 fprintf (dump_file, "Simplified relational ");
6849 print_gimple_stmt (dump_file, stmt, 0, 0);
6850 fprintf (dump_file, " into ");
6851 }
6852
6853 gimple_cond_set_code (stmt, EQ_EXPR);
6854 gimple_cond_set_lhs (stmt, op0);
6855 gimple_cond_set_rhs (stmt, new_tree);
6856
6857 update_stmt (stmt);
6858
6859 if (dump_file)
6860 {
6861 print_gimple_stmt (dump_file, stmt, 0, 0);
6862 fprintf (dump_file, "\n");
6863 }
6864
6865 return true;
6866 }
6867
6868 /* Try again after inverting the condition. We only deal
6869 with integral types here, so no need to worry about
6870 issues with inverting FP comparisons. */
6871 cond_code = invert_tree_comparison (cond_code, false);
6872 new_tree = test_for_singularity (cond_code, op0, op1, vr);
6873
6874 if (new_tree)
6875 {
6876 if (dump_file)
6877 {
6878 fprintf (dump_file, "Simplified relational ");
6879 print_gimple_stmt (dump_file, stmt, 0, 0);
6880 fprintf (dump_file, " into ");
6881 }
6882
6883 gimple_cond_set_code (stmt, NE_EXPR);
6884 gimple_cond_set_lhs (stmt, op0);
6885 gimple_cond_set_rhs (stmt, new_tree);
6886
6887 update_stmt (stmt);
6888
6889 if (dump_file)
6890 {
6891 print_gimple_stmt (dump_file, stmt, 0, 0);
6892 fprintf (dump_file, "\n");
6893 }
6894
6895 return true;
6896 }
6897 }
6898 }
6899
6900 return false;
6901 }
6902
6903 /* Simplify a switch statement using the value range of the switch
6904 argument. */
6905
6906 static bool
6907 simplify_switch_using_ranges (gimple stmt)
6908 {
6909 tree op = gimple_switch_index (stmt);
6910 value_range_t *vr;
6911 bool take_default;
6912 edge e;
6913 edge_iterator ei;
6914 size_t i = 0, j = 0, n, n2;
6915 tree vec2;
6916 switch_update su;
6917
6918 if (TREE_CODE (op) == SSA_NAME)
6919 {
6920 vr = get_value_range (op);
6921
6922 /* We can only handle integer ranges. */
6923 if (vr->type != VR_RANGE
6924 || symbolic_range_p (vr))
6925 return false;
6926
6927 /* Find case label for min/max of the value range. */
6928 take_default = !find_case_label_range (stmt, vr->min, vr->max, &i, &j);
6929 }
6930 else if (TREE_CODE (op) == INTEGER_CST)
6931 {
6932 take_default = !find_case_label_index (stmt, 1, op, &i);
6933 if (take_default)
6934 {
6935 i = 1;
6936 j = 0;
6937 }
6938 else
6939 {
6940 j = i;
6941 }
6942 }
6943 else
6944 return false;
6945
6946 n = gimple_switch_num_labels (stmt);
6947
6948 /* Bail out if this is just all edges taken. */
6949 if (i == 1
6950 && j == n - 1
6951 && take_default)
6952 return false;
6953
6954 /* Build a new vector of taken case labels. */
6955 vec2 = make_tree_vec (j - i + 1 + (int)take_default);
6956 n2 = 0;
6957
6958 /* Add the default edge, if necessary. */
6959 if (take_default)
6960 TREE_VEC_ELT (vec2, n2++) = gimple_switch_default_label (stmt);
6961
6962 for (; i <= j; ++i, ++n2)
6963 TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, i);
6964
6965 /* Mark needed edges. */
6966 for (i = 0; i < n2; ++i)
6967 {
6968 e = find_edge (gimple_bb (stmt),
6969 label_to_block (CASE_LABEL (TREE_VEC_ELT (vec2, i))));
6970 e->aux = (void *)-1;
6971 }
6972
6973 /* Queue not needed edges for later removal. */
6974 FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
6975 {
6976 if (e->aux == (void *)-1)
6977 {
6978 e->aux = NULL;
6979 continue;
6980 }
6981
6982 if (dump_file && (dump_flags & TDF_DETAILS))
6983 {
6984 fprintf (dump_file, "removing unreachable case label\n");
6985 }
6986 VEC_safe_push (edge, heap, to_remove_edges, e);
6987 e->flags &= ~EDGE_EXECUTABLE;
6988 }
6989
6990 /* And queue an update for the stmt. */
6991 su.stmt = stmt;
6992 su.vec = vec2;
6993 VEC_safe_push (switch_update, heap, to_update_switch_stmts, &su);
6994 return false;
6995 }
6996
6997 /* Simplify STMT using ranges if possible. */
6998
6999 static bool
7000 simplify_stmt_using_ranges (gimple_stmt_iterator *gsi)
7001 {
7002 gimple stmt = gsi_stmt (*gsi);
7003 if (is_gimple_assign (stmt))
7004 {
7005 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
7006
7007 switch (rhs_code)
7008 {
7009 case EQ_EXPR:
7010 case NE_EXPR:
7011 case TRUTH_NOT_EXPR:
7012 case TRUTH_AND_EXPR:
7013 case TRUTH_OR_EXPR:
7014 case TRUTH_XOR_EXPR:
7015 /* Transform EQ_EXPR, NE_EXPR, TRUTH_NOT_EXPR into BIT_XOR_EXPR
7016 or identity if the RHS is zero or one, and the LHS are known
7017 to be boolean values. Transform all TRUTH_*_EXPR into
7018 BIT_*_EXPR if both arguments are known to be boolean values. */
7019 if (INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_rhs1 (stmt))))
7020 return simplify_truth_ops_using_ranges (gsi, stmt);
7021 break;
7022
7023 /* Transform TRUNC_DIV_EXPR and TRUNC_MOD_EXPR into RSHIFT_EXPR
7024 and BIT_AND_EXPR respectively if the first operand is greater
7025 than zero and the second operand is an exact power of two. */
7026 case TRUNC_DIV_EXPR:
7027 case TRUNC_MOD_EXPR:
7028 if (INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_rhs1 (stmt)))
7029 && integer_pow2p (gimple_assign_rhs2 (stmt)))
7030 return simplify_div_or_mod_using_ranges (stmt);
7031 break;
7032
7033 /* Transform ABS (X) into X or -X as appropriate. */
7034 case ABS_EXPR:
7035 if (TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME
7036 && INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_rhs1 (stmt))))
7037 return simplify_abs_using_ranges (stmt);
7038 break;
7039
7040 default:
7041 break;
7042 }
7043 }
7044 else if (gimple_code (stmt) == GIMPLE_COND)
7045 return simplify_cond_using_ranges (stmt);
7046 else if (gimple_code (stmt) == GIMPLE_SWITCH)
7047 return simplify_switch_using_ranges (stmt);
7048
7049 return false;
7050 }
7051
7052 /* If the statement pointed by SI has a predicate whose value can be
7053 computed using the value range information computed by VRP, compute
7054 its value and return true. Otherwise, return false. */
7055
7056 static bool
7057 fold_predicate_in (gimple_stmt_iterator *si)
7058 {
7059 bool assignment_p = false;
7060 tree val;
7061 gimple stmt = gsi_stmt (*si);
7062
7063 if (is_gimple_assign (stmt)
7064 && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison)
7065 {
7066 assignment_p = true;
7067 val = vrp_evaluate_conditional (gimple_assign_rhs_code (stmt),
7068 gimple_assign_rhs1 (stmt),
7069 gimple_assign_rhs2 (stmt),
7070 stmt);
7071 }
7072 else if (gimple_code (stmt) == GIMPLE_COND)
7073 val = vrp_evaluate_conditional (gimple_cond_code (stmt),
7074 gimple_cond_lhs (stmt),
7075 gimple_cond_rhs (stmt),
7076 stmt);
7077 else
7078 return false;
7079
7080 if (val)
7081 {
7082 if (assignment_p)
7083 val = fold_convert (gimple_expr_type (stmt), val);
7084
7085 if (dump_file)
7086 {
7087 fprintf (dump_file, "Folding predicate ");
7088 print_gimple_expr (dump_file, stmt, 0, 0);
7089 fprintf (dump_file, " to ");
7090 print_generic_expr (dump_file, val, 0);
7091 fprintf (dump_file, "\n");
7092 }
7093
7094 if (is_gimple_assign (stmt))
7095 gimple_assign_set_rhs_from_tree (si, val);
7096 else
7097 {
7098 gcc_assert (gimple_code (stmt) == GIMPLE_COND);
7099 if (integer_zerop (val))
7100 gimple_cond_make_false (stmt);
7101 else if (integer_onep (val))
7102 gimple_cond_make_true (stmt);
7103 else
7104 gcc_unreachable ();
7105 }
7106
7107 return true;
7108 }
7109
7110 return false;
7111 }
7112
7113 /* Callback for substitute_and_fold folding the stmt at *SI. */
7114
7115 static bool
7116 vrp_fold_stmt (gimple_stmt_iterator *si)
7117 {
7118 if (fold_predicate_in (si))
7119 return true;
7120
7121 return simplify_stmt_using_ranges (si);
7122 }
7123
7124 /* Stack of dest,src equivalency pairs that need to be restored after
7125 each attempt to thread a block's incoming edge to an outgoing edge.
7126
7127 A NULL entry is used to mark the end of pairs which need to be
7128 restored. */
7129 static VEC(tree,heap) *stack;
7130
7131 /* A trivial wrapper so that we can present the generic jump threading
7132 code with a simple API for simplifying statements. STMT is the
7133 statement we want to simplify, WITHIN_STMT provides the location
7134 for any overflow warnings. */
7135
7136 static tree
7137 simplify_stmt_for_jump_threading (gimple stmt, gimple within_stmt)
7138 {
7139 /* We only use VRP information to simplify conditionals. This is
7140 overly conservative, but it's unclear if doing more would be
7141 worth the compile time cost. */
7142 if (gimple_code (stmt) != GIMPLE_COND)
7143 return NULL;
7144
7145 return vrp_evaluate_conditional (gimple_cond_code (stmt),
7146 gimple_cond_lhs (stmt),
7147 gimple_cond_rhs (stmt), within_stmt);
7148 }
7149
7150 /* Blocks which have more than one predecessor and more than
7151 one successor present jump threading opportunities, i.e.,
7152 when the block is reached from a specific predecessor, we
7153 may be able to determine which of the outgoing edges will
7154 be traversed. When this optimization applies, we are able
7155 to avoid conditionals at runtime and we may expose secondary
7156 optimization opportunities.
7157
7158 This routine is effectively a driver for the generic jump
7159 threading code. It basically just presents the generic code
7160 with edges that may be suitable for jump threading.
7161
7162 Unlike DOM, we do not iterate VRP if jump threading was successful.
7163 While iterating may expose new opportunities for VRP, it is expected
7164 those opportunities would be very limited and the compile time cost
7165 to expose those opportunities would be significant.
7166
7167 As jump threading opportunities are discovered, they are registered
7168 for later realization. */
7169
7170 static void
7171 identify_jump_threads (void)
7172 {
7173 basic_block bb;
7174 gimple dummy;
7175 int i;
7176 edge e;
7177
7178 /* Ugh. When substituting values earlier in this pass we can
7179 wipe the dominance information. So rebuild the dominator
7180 information as we need it within the jump threading code. */
7181 calculate_dominance_info (CDI_DOMINATORS);
7182
7183 /* We do not allow VRP information to be used for jump threading
7184 across a back edge in the CFG. Otherwise it becomes too
7185 difficult to avoid eliminating loop exit tests. Of course
7186 EDGE_DFS_BACK is not accurate at this time so we have to
7187 recompute it. */
7188 mark_dfs_back_edges ();
7189
7190 /* Do not thread across edges we are about to remove. Just marking
7191 them as EDGE_DFS_BACK will do. */
7192 for (i = 0; VEC_iterate (edge, to_remove_edges, i, e); ++i)
7193 e->flags |= EDGE_DFS_BACK;
7194
7195 /* Allocate our unwinder stack to unwind any temporary equivalences
7196 that might be recorded. */
7197 stack = VEC_alloc (tree, heap, 20);
7198
7199 /* To avoid lots of silly node creation, we create a single
7200 conditional and just modify it in-place when attempting to
7201 thread jumps. */
7202 dummy = gimple_build_cond (EQ_EXPR,
7203 integer_zero_node, integer_zero_node,
7204 NULL, NULL);
7205
7206 /* Walk through all the blocks finding those which present a
7207 potential jump threading opportunity. We could set this up
7208 as a dominator walker and record data during the walk, but
7209 I doubt it's worth the effort for the classes of jump
7210 threading opportunities we are trying to identify at this
7211 point in compilation. */
7212 FOR_EACH_BB (bb)
7213 {
7214 gimple last;
7215
7216 /* If the generic jump threading code does not find this block
7217 interesting, then there is nothing to do. */
7218 if (! potentially_threadable_block (bb))
7219 continue;
7220
7221 /* We only care about blocks ending in a COND_EXPR. While there
7222 may be some value in handling SWITCH_EXPR here, I doubt it's
7223 terribly important. */
7224 last = gsi_stmt (gsi_last_bb (bb));
7225 if (gimple_code (last) != GIMPLE_COND)
7226 continue;
7227
7228 /* We're basically looking for any kind of conditional with
7229 integral type arguments. */
7230 if (TREE_CODE (gimple_cond_lhs (last)) == SSA_NAME
7231 && INTEGRAL_TYPE_P (TREE_TYPE (gimple_cond_lhs (last)))
7232 && (TREE_CODE (gimple_cond_rhs (last)) == SSA_NAME
7233 || is_gimple_min_invariant (gimple_cond_rhs (last)))
7234 && INTEGRAL_TYPE_P (TREE_TYPE (gimple_cond_rhs (last))))
7235 {
7236 edge_iterator ei;
7237
7238 /* We've got a block with multiple predecessors and multiple
7239 successors which also ends in a suitable conditional. For
7240 each predecessor, see if we can thread it to a specific
7241 successor. */
7242 FOR_EACH_EDGE (e, ei, bb->preds)
7243 {
7244 /* Do not thread across back edges or abnormal edges
7245 in the CFG. */
7246 if (e->flags & (EDGE_DFS_BACK | EDGE_COMPLEX))
7247 continue;
7248
7249 thread_across_edge (dummy, e, true, &stack,
7250 simplify_stmt_for_jump_threading);
7251 }
7252 }
7253 }
7254
7255 /* We do not actually update the CFG or SSA graphs at this point as
7256 ASSERT_EXPRs are still in the IL and cfg cleanup code does not yet
7257 handle ASSERT_EXPRs gracefully. */
7258 }
7259
7260 /* We identified all the jump threading opportunities earlier, but could
7261 not transform the CFG at that time. This routine transforms the
7262 CFG and arranges for the dominator tree to be rebuilt if necessary.
7263
7264 Note the SSA graph update will occur during the normal TODO
7265 processing by the pass manager. */
7266 static void
7267 finalize_jump_threads (void)
7268 {
7269 thread_through_all_blocks (false);
7270 VEC_free (tree, heap, stack);
7271 }
7272
7273
7274 /* Traverse all the blocks folding conditionals with known ranges. */
7275
7276 static void
7277 vrp_finalize (void)
7278 {
7279 size_t i;
7280 prop_value_t *single_val_range;
7281 bool do_value_subst_p;
7282
7283 if (dump_file)
7284 {
7285 fprintf (dump_file, "\nValue ranges after VRP:\n\n");
7286 dump_all_value_ranges (dump_file);
7287 fprintf (dump_file, "\n");
7288 }
7289
7290 /* We may have ended with ranges that have exactly one value. Those
7291 values can be substituted as any other const propagated
7292 value using substitute_and_fold. */
7293 single_val_range = XCNEWVEC (prop_value_t, num_ssa_names);
7294
7295 do_value_subst_p = false;
7296 for (i = 0; i < num_ssa_names; i++)
7297 if (vr_value[i]
7298 && vr_value[i]->type == VR_RANGE
7299 && vr_value[i]->min == vr_value[i]->max
7300 && is_gimple_min_invariant (vr_value[i]->min))
7301 {
7302 single_val_range[i].value = vr_value[i]->min;
7303 do_value_subst_p = true;
7304 }
7305
7306 if (!do_value_subst_p)
7307 {
7308 /* We found no single-valued ranges, don't waste time trying to
7309 do single value substitution in substitute_and_fold. */
7310 free (single_val_range);
7311 single_val_range = NULL;
7312 }
7313
7314 substitute_and_fold (single_val_range, vrp_fold_stmt);
7315
7316 if (warn_array_bounds)
7317 check_all_array_refs ();
7318
7319 /* We must identify jump threading opportunities before we release
7320 the datastructures built by VRP. */
7321 identify_jump_threads ();
7322
7323 /* Free allocated memory. */
7324 for (i = 0; i < num_ssa_names; i++)
7325 if (vr_value[i])
7326 {
7327 BITMAP_FREE (vr_value[i]->equiv);
7328 free (vr_value[i]);
7329 }
7330
7331 free (single_val_range);
7332 free (vr_value);
7333 free (vr_phi_edge_counts);
7334
7335 /* So that we can distinguish between VRP data being available
7336 and not available. */
7337 vr_value = NULL;
7338 vr_phi_edge_counts = NULL;
7339 }
7340
7341
7342 /* Main entry point to VRP (Value Range Propagation). This pass is
7343 loosely based on J. R. C. Patterson, ``Accurate Static Branch
7344 Prediction by Value Range Propagation,'' in SIGPLAN Conference on
7345 Programming Language Design and Implementation, pp. 67-78, 1995.
7346 Also available at http://citeseer.ist.psu.edu/patterson95accurate.html
7347
7348 This is essentially an SSA-CCP pass modified to deal with ranges
7349 instead of constants.
7350
7351 While propagating ranges, we may find that two or more SSA name
7352 have equivalent, though distinct ranges. For instance,
7353
7354 1 x_9 = p_3->a;
7355 2 p_4 = ASSERT_EXPR <p_3, p_3 != 0>
7356 3 if (p_4 == q_2)
7357 4 p_5 = ASSERT_EXPR <p_4, p_4 == q_2>;
7358 5 endif
7359 6 if (q_2)
7360
7361 In the code above, pointer p_5 has range [q_2, q_2], but from the
7362 code we can also determine that p_5 cannot be NULL and, if q_2 had
7363 a non-varying range, p_5's range should also be compatible with it.
7364
7365 These equivalences are created by two expressions: ASSERT_EXPR and
7366 copy operations. Since p_5 is an assertion on p_4, and p_4 was the
7367 result of another assertion, then we can use the fact that p_5 and
7368 p_4 are equivalent when evaluating p_5's range.
7369
7370 Together with value ranges, we also propagate these equivalences
7371 between names so that we can take advantage of information from
7372 multiple ranges when doing final replacement. Note that this
7373 equivalency relation is transitive but not symmetric.
7374
7375 In the example above, p_5 is equivalent to p_4, q_2 and p_3, but we
7376 cannot assert that q_2 is equivalent to p_5 because q_2 may be used
7377 in contexts where that assertion does not hold (e.g., in line 6).
7378
7379 TODO, the main difference between this pass and Patterson's is that
7380 we do not propagate edge probabilities. We only compute whether
7381 edges can be taken or not. That is, instead of having a spectrum
7382 of jump probabilities between 0 and 1, we only deal with 0, 1 and
7383 DON'T KNOW. In the future, it may be worthwhile to propagate
7384 probabilities to aid branch prediction. */
7385
7386 static unsigned int
7387 execute_vrp (void)
7388 {
7389 int i;
7390 edge e;
7391 switch_update *su;
7392
7393 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
7394 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
7395 scev_initialize ();
7396
7397 insert_range_assertions ();
7398
7399 to_remove_edges = VEC_alloc (edge, heap, 10);
7400 to_update_switch_stmts = VEC_alloc (switch_update, heap, 5);
7401 threadedge_initialize_values ();
7402
7403 vrp_initialize ();
7404 ssa_propagate (vrp_visit_stmt, vrp_visit_phi_node);
7405 vrp_finalize ();
7406
7407 /* ASSERT_EXPRs must be removed before finalizing jump threads
7408 as finalizing jump threads calls the CFG cleanup code which
7409 does not properly handle ASSERT_EXPRs. */
7410 remove_range_assertions ();
7411
7412 /* If we exposed any new variables, go ahead and put them into
7413 SSA form now, before we handle jump threading. This simplifies
7414 interactions between rewriting of _DECL nodes into SSA form
7415 and rewriting SSA_NAME nodes into SSA form after block
7416 duplication and CFG manipulation. */
7417 update_ssa (TODO_update_ssa);
7418
7419 finalize_jump_threads ();
7420
7421 /* Remove dead edges from SWITCH_EXPR optimization. This leaves the
7422 CFG in a broken state and requires a cfg_cleanup run. */
7423 for (i = 0; VEC_iterate (edge, to_remove_edges, i, e); ++i)
7424 remove_edge (e);
7425 /* Update SWITCH_EXPR case label vector. */
7426 for (i = 0; VEC_iterate (switch_update, to_update_switch_stmts, i, su); ++i)
7427 {
7428 size_t j;
7429 size_t n = TREE_VEC_LENGTH (su->vec);
7430 tree label;
7431 gimple_switch_set_num_labels (su->stmt, n);
7432 for (j = 0; j < n; j++)
7433 gimple_switch_set_label (su->stmt, j, TREE_VEC_ELT (su->vec, j));
7434 /* As we may have replaced the default label with a regular one
7435 make sure to make it a real default label again. This ensures
7436 optimal expansion. */
7437 label = gimple_switch_default_label (su->stmt);
7438 CASE_LOW (label) = NULL_TREE;
7439 CASE_HIGH (label) = NULL_TREE;
7440 }
7441
7442 if (VEC_length (edge, to_remove_edges) > 0)
7443 free_dominance_info (CDI_DOMINATORS);
7444
7445 VEC_free (edge, heap, to_remove_edges);
7446 VEC_free (switch_update, heap, to_update_switch_stmts);
7447 threadedge_finalize_values ();
7448
7449 scev_finalize ();
7450 loop_optimizer_finalize ();
7451 return 0;
7452 }
7453
7454 static bool
7455 gate_vrp (void)
7456 {
7457 return flag_tree_vrp != 0;
7458 }
7459
7460 struct gimple_opt_pass pass_vrp =
7461 {
7462 {
7463 GIMPLE_PASS,
7464 "vrp", /* name */
7465 gate_vrp, /* gate */
7466 execute_vrp, /* execute */
7467 NULL, /* sub */
7468 NULL, /* next */
7469 0, /* static_pass_number */
7470 TV_TREE_VRP, /* tv_id */
7471 PROP_ssa, /* properties_required */
7472 0, /* properties_provided */
7473 0, /* properties_destroyed */
7474 0, /* todo_flags_start */
7475 TODO_cleanup_cfg
7476 | TODO_ggc_collect
7477 | TODO_verify_ssa
7478 | TODO_dump_func
7479 | TODO_update_ssa /* todo_flags_finish */
7480 }
7481 };