Re-factor tree.h - Part 1.
[gcc.git] / gcc / tree-vrp.c
1 /* Support routines for Value Range Propagation (VRP).
2 Copyright (C) 2005-2013 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "ggc.h"
26 #include "flags.h"
27 #include "tree.h"
28 #include "basic-block.h"
29 #include "gimple.h"
30 #include "gimple-ssa.h"
31 #include "tree-cfg.h"
32 #include "tree-phinodes.h"
33 #include "ssa-iterators.h"
34 #include "tree-ssanames.h"
35 #include "tree-ssa-loop-manip.h"
36 #include "tree-ssa-loop-niter.h"
37 #include "tree-ssa-loop.h"
38 #include "tree-into-ssa.h"
39 #include "tree-ssa.h"
40 #include "tree-pass.h"
41 #include "tree-dump.h"
42 #include "gimple-pretty-print.h"
43 #include "diagnostic-core.h"
44 #include "intl.h"
45 #include "cfgloop.h"
46 #include "tree-scalar-evolution.h"
47 #include "tree-ssa-propagate.h"
48 #include "tree-chrec.h"
49 #include "tree-ssa-threadupdate.h"
50 #include "expr.h"
51 #include "optabs.h"
52 #include "tree-ssa-threadedge.h"
53
54
55
56 /* Range of values that can be associated with an SSA_NAME after VRP
57 has executed. */
58 struct value_range_d
59 {
60 /* Lattice value represented by this range. */
61 enum value_range_type type;
62
63 /* Minimum and maximum values represented by this range. These
64 values should be interpreted as follows:
65
66 - If TYPE is VR_UNDEFINED or VR_VARYING then MIN and MAX must
67 be NULL.
68
69 - If TYPE == VR_RANGE then MIN holds the minimum value and
70 MAX holds the maximum value of the range [MIN, MAX].
71
72 - If TYPE == ANTI_RANGE the variable is known to NOT
73 take any values in the range [MIN, MAX]. */
74 tree min;
75 tree max;
76
77 /* Set of SSA names whose value ranges are equivalent to this one.
78 This set is only valid when TYPE is VR_RANGE or VR_ANTI_RANGE. */
79 bitmap equiv;
80 };
81
82 typedef struct value_range_d value_range_t;
83
84 #define VR_INITIALIZER { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL }
85
86 /* Set of SSA names found live during the RPO traversal of the function
87 for still active basic-blocks. */
88 static sbitmap *live;
89
90 /* Return true if the SSA name NAME is live on the edge E. */
91
92 static bool
93 live_on_edge (edge e, tree name)
94 {
95 return (live[e->dest->index]
96 && bitmap_bit_p (live[e->dest->index], SSA_NAME_VERSION (name)));
97 }
98
99 /* Local functions. */
100 static int compare_values (tree val1, tree val2);
101 static int compare_values_warnv (tree val1, tree val2, bool *);
102 static void vrp_meet (value_range_t *, value_range_t *);
103 static void vrp_intersect_ranges (value_range_t *, value_range_t *);
104 static tree vrp_evaluate_conditional_warnv_with_ops (enum tree_code,
105 tree, tree, bool, bool *,
106 bool *);
107
108 /* Location information for ASSERT_EXPRs. Each instance of this
109 structure describes an ASSERT_EXPR for an SSA name. Since a single
110 SSA name may have more than one assertion associated with it, these
111 locations are kept in a linked list attached to the corresponding
112 SSA name. */
113 struct assert_locus_d
114 {
115 /* Basic block where the assertion would be inserted. */
116 basic_block bb;
117
118 /* Some assertions need to be inserted on an edge (e.g., assertions
119 generated by COND_EXPRs). In those cases, BB will be NULL. */
120 edge e;
121
122 /* Pointer to the statement that generated this assertion. */
123 gimple_stmt_iterator si;
124
125 /* Predicate code for the ASSERT_EXPR. Must be COMPARISON_CLASS_P. */
126 enum tree_code comp_code;
127
128 /* Value being compared against. */
129 tree val;
130
131 /* Expression to compare. */
132 tree expr;
133
134 /* Next node in the linked list. */
135 struct assert_locus_d *next;
136 };
137
138 typedef struct assert_locus_d *assert_locus_t;
139
140 /* If bit I is present, it means that SSA name N_i has a list of
141 assertions that should be inserted in the IL. */
142 static bitmap need_assert_for;
143
144 /* Array of locations lists where to insert assertions. ASSERTS_FOR[I]
145 holds a list of ASSERT_LOCUS_T nodes that describe where
146 ASSERT_EXPRs for SSA name N_I should be inserted. */
147 static assert_locus_t *asserts_for;
148
149 /* Value range array. After propagation, VR_VALUE[I] holds the range
150 of values that SSA name N_I may take. */
151 static unsigned num_vr_values;
152 static value_range_t **vr_value;
153 static bool values_propagated;
154
155 /* For a PHI node which sets SSA name N_I, VR_COUNTS[I] holds the
156 number of executable edges we saw the last time we visited the
157 node. */
158 static int *vr_phi_edge_counts;
159
160 typedef struct {
161 gimple stmt;
162 tree vec;
163 } switch_update;
164
165 static vec<edge> to_remove_edges;
166 static vec<switch_update> to_update_switch_stmts;
167
168
169 /* Return the maximum value for TYPE. */
170
171 static inline tree
172 vrp_val_max (const_tree type)
173 {
174 if (!INTEGRAL_TYPE_P (type))
175 return NULL_TREE;
176
177 return TYPE_MAX_VALUE (type);
178 }
179
180 /* Return the minimum value for TYPE. */
181
182 static inline tree
183 vrp_val_min (const_tree type)
184 {
185 if (!INTEGRAL_TYPE_P (type))
186 return NULL_TREE;
187
188 return TYPE_MIN_VALUE (type);
189 }
190
191 /* Return whether VAL is equal to the maximum value of its type. This
192 will be true for a positive overflow infinity. We can't do a
193 simple equality comparison with TYPE_MAX_VALUE because C typedefs
194 and Ada subtypes can produce types whose TYPE_MAX_VALUE is not ==
195 to the integer constant with the same value in the type. */
196
197 static inline bool
198 vrp_val_is_max (const_tree val)
199 {
200 tree type_max = vrp_val_max (TREE_TYPE (val));
201 return (val == type_max
202 || (type_max != NULL_TREE
203 && operand_equal_p (val, type_max, 0)));
204 }
205
206 /* Return whether VAL is equal to the minimum value of its type. This
207 will be true for a negative overflow infinity. */
208
209 static inline bool
210 vrp_val_is_min (const_tree val)
211 {
212 tree type_min = vrp_val_min (TREE_TYPE (val));
213 return (val == type_min
214 || (type_min != NULL_TREE
215 && operand_equal_p (val, type_min, 0)));
216 }
217
218
219 /* Return whether TYPE should use an overflow infinity distinct from
220 TYPE_{MIN,MAX}_VALUE. We use an overflow infinity value to
221 represent a signed overflow during VRP computations. An infinity
222 is distinct from a half-range, which will go from some number to
223 TYPE_{MIN,MAX}_VALUE. */
224
225 static inline bool
226 needs_overflow_infinity (const_tree type)
227 {
228 return INTEGRAL_TYPE_P (type) && !TYPE_OVERFLOW_WRAPS (type);
229 }
230
231 /* Return whether TYPE can support our overflow infinity
232 representation: we use the TREE_OVERFLOW flag, which only exists
233 for constants. If TYPE doesn't support this, we don't optimize
234 cases which would require signed overflow--we drop them to
235 VARYING. */
236
237 static inline bool
238 supports_overflow_infinity (const_tree type)
239 {
240 tree min = vrp_val_min (type), max = vrp_val_max (type);
241 #ifdef ENABLE_CHECKING
242 gcc_assert (needs_overflow_infinity (type));
243 #endif
244 return (min != NULL_TREE
245 && CONSTANT_CLASS_P (min)
246 && max != NULL_TREE
247 && CONSTANT_CLASS_P (max));
248 }
249
250 /* VAL is the maximum or minimum value of a type. Return a
251 corresponding overflow infinity. */
252
253 static inline tree
254 make_overflow_infinity (tree val)
255 {
256 gcc_checking_assert (val != NULL_TREE && CONSTANT_CLASS_P (val));
257 val = copy_node (val);
258 TREE_OVERFLOW (val) = 1;
259 return val;
260 }
261
262 /* Return a negative overflow infinity for TYPE. */
263
264 static inline tree
265 negative_overflow_infinity (tree type)
266 {
267 gcc_checking_assert (supports_overflow_infinity (type));
268 return make_overflow_infinity (vrp_val_min (type));
269 }
270
271 /* Return a positive overflow infinity for TYPE. */
272
273 static inline tree
274 positive_overflow_infinity (tree type)
275 {
276 gcc_checking_assert (supports_overflow_infinity (type));
277 return make_overflow_infinity (vrp_val_max (type));
278 }
279
280 /* Return whether VAL is a negative overflow infinity. */
281
282 static inline bool
283 is_negative_overflow_infinity (const_tree val)
284 {
285 return (needs_overflow_infinity (TREE_TYPE (val))
286 && CONSTANT_CLASS_P (val)
287 && TREE_OVERFLOW (val)
288 && vrp_val_is_min (val));
289 }
290
291 /* Return whether VAL is a positive overflow infinity. */
292
293 static inline bool
294 is_positive_overflow_infinity (const_tree val)
295 {
296 return (needs_overflow_infinity (TREE_TYPE (val))
297 && CONSTANT_CLASS_P (val)
298 && TREE_OVERFLOW (val)
299 && vrp_val_is_max (val));
300 }
301
302 /* Return whether VAL is a positive or negative overflow infinity. */
303
304 static inline bool
305 is_overflow_infinity (const_tree val)
306 {
307 return (needs_overflow_infinity (TREE_TYPE (val))
308 && CONSTANT_CLASS_P (val)
309 && TREE_OVERFLOW (val)
310 && (vrp_val_is_min (val) || vrp_val_is_max (val)));
311 }
312
313 /* Return whether STMT has a constant rhs that is_overflow_infinity. */
314
315 static inline bool
316 stmt_overflow_infinity (gimple stmt)
317 {
318 if (is_gimple_assign (stmt)
319 && get_gimple_rhs_class (gimple_assign_rhs_code (stmt)) ==
320 GIMPLE_SINGLE_RHS)
321 return is_overflow_infinity (gimple_assign_rhs1 (stmt));
322 return false;
323 }
324
325 /* If VAL is now an overflow infinity, return VAL. Otherwise, return
326 the same value with TREE_OVERFLOW clear. This can be used to avoid
327 confusing a regular value with an overflow value. */
328
329 static inline tree
330 avoid_overflow_infinity (tree val)
331 {
332 if (!is_overflow_infinity (val))
333 return val;
334
335 if (vrp_val_is_max (val))
336 return vrp_val_max (TREE_TYPE (val));
337 else
338 {
339 gcc_checking_assert (vrp_val_is_min (val));
340 return vrp_val_min (TREE_TYPE (val));
341 }
342 }
343
344
345 /* Return true if ARG is marked with the nonnull attribute in the
346 current function signature. */
347
348 static bool
349 nonnull_arg_p (const_tree arg)
350 {
351 tree t, attrs, fntype;
352 unsigned HOST_WIDE_INT arg_num;
353
354 gcc_assert (TREE_CODE (arg) == PARM_DECL && POINTER_TYPE_P (TREE_TYPE (arg)));
355
356 /* The static chain decl is always non null. */
357 if (arg == cfun->static_chain_decl)
358 return true;
359
360 fntype = TREE_TYPE (current_function_decl);
361 for (attrs = TYPE_ATTRIBUTES (fntype); attrs; attrs = TREE_CHAIN (attrs))
362 {
363 attrs = lookup_attribute ("nonnull", attrs);
364
365 /* If "nonnull" wasn't specified, we know nothing about the argument. */
366 if (attrs == NULL_TREE)
367 return false;
368
369 /* If "nonnull" applies to all the arguments, then ARG is non-null. */
370 if (TREE_VALUE (attrs) == NULL_TREE)
371 return true;
372
373 /* Get the position number for ARG in the function signature. */
374 for (arg_num = 1, t = DECL_ARGUMENTS (current_function_decl);
375 t;
376 t = DECL_CHAIN (t), arg_num++)
377 {
378 if (t == arg)
379 break;
380 }
381
382 gcc_assert (t == arg);
383
384 /* Now see if ARG_NUM is mentioned in the nonnull list. */
385 for (t = TREE_VALUE (attrs); t; t = TREE_CHAIN (t))
386 {
387 if (compare_tree_int (TREE_VALUE (t), arg_num) == 0)
388 return true;
389 }
390 }
391
392 return false;
393 }
394
395
396 /* Set value range VR to VR_UNDEFINED. */
397
398 static inline void
399 set_value_range_to_undefined (value_range_t *vr)
400 {
401 vr->type = VR_UNDEFINED;
402 vr->min = vr->max = NULL_TREE;
403 if (vr->equiv)
404 bitmap_clear (vr->equiv);
405 }
406
407
408 /* Set value range VR to VR_VARYING. */
409
410 static inline void
411 set_value_range_to_varying (value_range_t *vr)
412 {
413 vr->type = VR_VARYING;
414 vr->min = vr->max = NULL_TREE;
415 if (vr->equiv)
416 bitmap_clear (vr->equiv);
417 }
418
419
420 /* Set value range VR to {T, MIN, MAX, EQUIV}. */
421
422 static void
423 set_value_range (value_range_t *vr, enum value_range_type t, tree min,
424 tree max, bitmap equiv)
425 {
426 #if defined ENABLE_CHECKING
427 /* Check the validity of the range. */
428 if (t == VR_RANGE || t == VR_ANTI_RANGE)
429 {
430 int cmp;
431
432 gcc_assert (min && max);
433
434 if (INTEGRAL_TYPE_P (TREE_TYPE (min)) && t == VR_ANTI_RANGE)
435 gcc_assert (!vrp_val_is_min (min) || !vrp_val_is_max (max));
436
437 cmp = compare_values (min, max);
438 gcc_assert (cmp == 0 || cmp == -1 || cmp == -2);
439
440 if (needs_overflow_infinity (TREE_TYPE (min)))
441 gcc_assert (!is_overflow_infinity (min)
442 || !is_overflow_infinity (max));
443 }
444
445 if (t == VR_UNDEFINED || t == VR_VARYING)
446 gcc_assert (min == NULL_TREE && max == NULL_TREE);
447
448 if (t == VR_UNDEFINED || t == VR_VARYING)
449 gcc_assert (equiv == NULL || bitmap_empty_p (equiv));
450 #endif
451
452 vr->type = t;
453 vr->min = min;
454 vr->max = max;
455
456 /* Since updating the equivalence set involves deep copying the
457 bitmaps, only do it if absolutely necessary. */
458 if (vr->equiv == NULL
459 && equiv != NULL)
460 vr->equiv = BITMAP_ALLOC (NULL);
461
462 if (equiv != vr->equiv)
463 {
464 if (equiv && !bitmap_empty_p (equiv))
465 bitmap_copy (vr->equiv, equiv);
466 else
467 bitmap_clear (vr->equiv);
468 }
469 }
470
471
472 /* Set value range VR to the canonical form of {T, MIN, MAX, EQUIV}.
473 This means adjusting T, MIN and MAX representing the case of a
474 wrapping range with MAX < MIN covering [MIN, type_max] U [type_min, MAX]
475 as anti-rage ~[MAX+1, MIN-1]. Likewise for wrapping anti-ranges.
476 In corner cases where MAX+1 or MIN-1 wraps this will fall back
477 to varying.
478 This routine exists to ease canonicalization in the case where we
479 extract ranges from var + CST op limit. */
480
481 static void
482 set_and_canonicalize_value_range (value_range_t *vr, enum value_range_type t,
483 tree min, tree max, bitmap equiv)
484 {
485 /* Use the canonical setters for VR_UNDEFINED and VR_VARYING. */
486 if (t == VR_UNDEFINED)
487 {
488 set_value_range_to_undefined (vr);
489 return;
490 }
491 else if (t == VR_VARYING)
492 {
493 set_value_range_to_varying (vr);
494 return;
495 }
496
497 /* Nothing to canonicalize for symbolic ranges. */
498 if (TREE_CODE (min) != INTEGER_CST
499 || TREE_CODE (max) != INTEGER_CST)
500 {
501 set_value_range (vr, t, min, max, equiv);
502 return;
503 }
504
505 /* Wrong order for min and max, to swap them and the VR type we need
506 to adjust them. */
507 if (tree_int_cst_lt (max, min))
508 {
509 tree one, tmp;
510
511 /* For one bit precision if max < min, then the swapped
512 range covers all values, so for VR_RANGE it is varying and
513 for VR_ANTI_RANGE empty range, so drop to varying as well. */
514 if (TYPE_PRECISION (TREE_TYPE (min)) == 1)
515 {
516 set_value_range_to_varying (vr);
517 return;
518 }
519
520 one = build_int_cst (TREE_TYPE (min), 1);
521 tmp = int_const_binop (PLUS_EXPR, max, one);
522 max = int_const_binop (MINUS_EXPR, min, one);
523 min = tmp;
524
525 /* There's one corner case, if we had [C+1, C] before we now have
526 that again. But this represents an empty value range, so drop
527 to varying in this case. */
528 if (tree_int_cst_lt (max, min))
529 {
530 set_value_range_to_varying (vr);
531 return;
532 }
533
534 t = t == VR_RANGE ? VR_ANTI_RANGE : VR_RANGE;
535 }
536
537 /* Anti-ranges that can be represented as ranges should be so. */
538 if (t == VR_ANTI_RANGE)
539 {
540 bool is_min = vrp_val_is_min (min);
541 bool is_max = vrp_val_is_max (max);
542
543 if (is_min && is_max)
544 {
545 /* We cannot deal with empty ranges, drop to varying.
546 ??? This could be VR_UNDEFINED instead. */
547 set_value_range_to_varying (vr);
548 return;
549 }
550 else if (TYPE_PRECISION (TREE_TYPE (min)) == 1
551 && (is_min || is_max))
552 {
553 /* Non-empty boolean ranges can always be represented
554 as a singleton range. */
555 if (is_min)
556 min = max = vrp_val_max (TREE_TYPE (min));
557 else
558 min = max = vrp_val_min (TREE_TYPE (min));
559 t = VR_RANGE;
560 }
561 else if (is_min
562 /* As a special exception preserve non-null ranges. */
563 && !(TYPE_UNSIGNED (TREE_TYPE (min))
564 && integer_zerop (max)))
565 {
566 tree one = build_int_cst (TREE_TYPE (max), 1);
567 min = int_const_binop (PLUS_EXPR, max, one);
568 max = vrp_val_max (TREE_TYPE (max));
569 t = VR_RANGE;
570 }
571 else if (is_max)
572 {
573 tree one = build_int_cst (TREE_TYPE (min), 1);
574 max = int_const_binop (MINUS_EXPR, min, one);
575 min = vrp_val_min (TREE_TYPE (min));
576 t = VR_RANGE;
577 }
578 }
579
580 /* Drop [-INF(OVF), +INF(OVF)] to varying. */
581 if (needs_overflow_infinity (TREE_TYPE (min))
582 && is_overflow_infinity (min)
583 && is_overflow_infinity (max))
584 {
585 set_value_range_to_varying (vr);
586 return;
587 }
588
589 set_value_range (vr, t, min, max, equiv);
590 }
591
592 /* Copy value range FROM into value range TO. */
593
594 static inline void
595 copy_value_range (value_range_t *to, value_range_t *from)
596 {
597 set_value_range (to, from->type, from->min, from->max, from->equiv);
598 }
599
600 /* Set value range VR to a single value. This function is only called
601 with values we get from statements, and exists to clear the
602 TREE_OVERFLOW flag so that we don't think we have an overflow
603 infinity when we shouldn't. */
604
605 static inline void
606 set_value_range_to_value (value_range_t *vr, tree val, bitmap equiv)
607 {
608 gcc_assert (is_gimple_min_invariant (val));
609 val = avoid_overflow_infinity (val);
610 set_value_range (vr, VR_RANGE, val, val, equiv);
611 }
612
613 /* Set value range VR to a non-negative range of type TYPE.
614 OVERFLOW_INFINITY indicates whether to use an overflow infinity
615 rather than TYPE_MAX_VALUE; this should be true if we determine
616 that the range is nonnegative based on the assumption that signed
617 overflow does not occur. */
618
619 static inline void
620 set_value_range_to_nonnegative (value_range_t *vr, tree type,
621 bool overflow_infinity)
622 {
623 tree zero;
624
625 if (overflow_infinity && !supports_overflow_infinity (type))
626 {
627 set_value_range_to_varying (vr);
628 return;
629 }
630
631 zero = build_int_cst (type, 0);
632 set_value_range (vr, VR_RANGE, zero,
633 (overflow_infinity
634 ? positive_overflow_infinity (type)
635 : TYPE_MAX_VALUE (type)),
636 vr->equiv);
637 }
638
639 /* Set value range VR to a non-NULL range of type TYPE. */
640
641 static inline void
642 set_value_range_to_nonnull (value_range_t *vr, tree type)
643 {
644 tree zero = build_int_cst (type, 0);
645 set_value_range (vr, VR_ANTI_RANGE, zero, zero, vr->equiv);
646 }
647
648
649 /* Set value range VR to a NULL range of type TYPE. */
650
651 static inline void
652 set_value_range_to_null (value_range_t *vr, tree type)
653 {
654 set_value_range_to_value (vr, build_int_cst (type, 0), vr->equiv);
655 }
656
657
658 /* Set value range VR to a range of a truthvalue of type TYPE. */
659
660 static inline void
661 set_value_range_to_truthvalue (value_range_t *vr, tree type)
662 {
663 if (TYPE_PRECISION (type) == 1)
664 set_value_range_to_varying (vr);
665 else
666 set_value_range (vr, VR_RANGE,
667 build_int_cst (type, 0), build_int_cst (type, 1),
668 vr->equiv);
669 }
670
671
672 /* If abs (min) < abs (max), set VR to [-max, max], if
673 abs (min) >= abs (max), set VR to [-min, min]. */
674
675 static void
676 abs_extent_range (value_range_t *vr, tree min, tree max)
677 {
678 int cmp;
679
680 gcc_assert (TREE_CODE (min) == INTEGER_CST);
681 gcc_assert (TREE_CODE (max) == INTEGER_CST);
682 gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (min)));
683 gcc_assert (!TYPE_UNSIGNED (TREE_TYPE (min)));
684 min = fold_unary (ABS_EXPR, TREE_TYPE (min), min);
685 max = fold_unary (ABS_EXPR, TREE_TYPE (max), max);
686 if (TREE_OVERFLOW (min) || TREE_OVERFLOW (max))
687 {
688 set_value_range_to_varying (vr);
689 return;
690 }
691 cmp = compare_values (min, max);
692 if (cmp == -1)
693 min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), max);
694 else if (cmp == 0 || cmp == 1)
695 {
696 max = min;
697 min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), min);
698 }
699 else
700 {
701 set_value_range_to_varying (vr);
702 return;
703 }
704 set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL);
705 }
706
707
708 /* Return value range information for VAR.
709
710 If we have no values ranges recorded (ie, VRP is not running), then
711 return NULL. Otherwise create an empty range if none existed for VAR. */
712
713 static value_range_t *
714 get_value_range (const_tree var)
715 {
716 static const struct value_range_d vr_const_varying
717 = { VR_VARYING, NULL_TREE, NULL_TREE, NULL };
718 value_range_t *vr;
719 tree sym;
720 unsigned ver = SSA_NAME_VERSION (var);
721
722 /* If we have no recorded ranges, then return NULL. */
723 if (! vr_value)
724 return NULL;
725
726 /* If we query the range for a new SSA name return an unmodifiable VARYING.
727 We should get here at most from the substitute-and-fold stage which
728 will never try to change values. */
729 if (ver >= num_vr_values)
730 return CONST_CAST (value_range_t *, &vr_const_varying);
731
732 vr = vr_value[ver];
733 if (vr)
734 return vr;
735
736 /* After propagation finished do not allocate new value-ranges. */
737 if (values_propagated)
738 return CONST_CAST (value_range_t *, &vr_const_varying);
739
740 /* Create a default value range. */
741 vr_value[ver] = vr = XCNEW (value_range_t);
742
743 /* Defer allocating the equivalence set. */
744 vr->equiv = NULL;
745
746 /* If VAR is a default definition of a parameter, the variable can
747 take any value in VAR's type. */
748 if (SSA_NAME_IS_DEFAULT_DEF (var))
749 {
750 sym = SSA_NAME_VAR (var);
751 if (TREE_CODE (sym) == PARM_DECL)
752 {
753 /* Try to use the "nonnull" attribute to create ~[0, 0]
754 anti-ranges for pointers. Note that this is only valid with
755 default definitions of PARM_DECLs. */
756 if (POINTER_TYPE_P (TREE_TYPE (sym))
757 && nonnull_arg_p (sym))
758 set_value_range_to_nonnull (vr, TREE_TYPE (sym));
759 else
760 set_value_range_to_varying (vr);
761 }
762 else if (TREE_CODE (sym) == RESULT_DECL
763 && DECL_BY_REFERENCE (sym))
764 set_value_range_to_nonnull (vr, TREE_TYPE (sym));
765 }
766
767 return vr;
768 }
769
770 /* Return true, if VAL1 and VAL2 are equal values for VRP purposes. */
771
772 static inline bool
773 vrp_operand_equal_p (const_tree val1, const_tree val2)
774 {
775 if (val1 == val2)
776 return true;
777 if (!val1 || !val2 || !operand_equal_p (val1, val2, 0))
778 return false;
779 if (is_overflow_infinity (val1))
780 return is_overflow_infinity (val2);
781 return true;
782 }
783
784 /* Return true, if the bitmaps B1 and B2 are equal. */
785
786 static inline bool
787 vrp_bitmap_equal_p (const_bitmap b1, const_bitmap b2)
788 {
789 return (b1 == b2
790 || ((!b1 || bitmap_empty_p (b1))
791 && (!b2 || bitmap_empty_p (b2)))
792 || (b1 && b2
793 && bitmap_equal_p (b1, b2)));
794 }
795
796 /* Update the value range and equivalence set for variable VAR to
797 NEW_VR. Return true if NEW_VR is different from VAR's previous
798 value.
799
800 NOTE: This function assumes that NEW_VR is a temporary value range
801 object created for the sole purpose of updating VAR's range. The
802 storage used by the equivalence set from NEW_VR will be freed by
803 this function. Do not call update_value_range when NEW_VR
804 is the range object associated with another SSA name. */
805
806 static inline bool
807 update_value_range (const_tree var, value_range_t *new_vr)
808 {
809 value_range_t *old_vr;
810 bool is_new;
811
812 /* Update the value range, if necessary. */
813 old_vr = get_value_range (var);
814 is_new = old_vr->type != new_vr->type
815 || !vrp_operand_equal_p (old_vr->min, new_vr->min)
816 || !vrp_operand_equal_p (old_vr->max, new_vr->max)
817 || !vrp_bitmap_equal_p (old_vr->equiv, new_vr->equiv);
818
819 if (is_new)
820 {
821 /* Do not allow transitions up the lattice. The following
822 is slightly more awkward than just new_vr->type < old_vr->type
823 because VR_RANGE and VR_ANTI_RANGE need to be considered
824 the same. We may not have is_new when transitioning to
825 UNDEFINED or from VARYING. */
826 if (new_vr->type == VR_UNDEFINED
827 || old_vr->type == VR_VARYING)
828 set_value_range_to_varying (old_vr);
829 else
830 set_value_range (old_vr, new_vr->type, new_vr->min, new_vr->max,
831 new_vr->equiv);
832 }
833
834 BITMAP_FREE (new_vr->equiv);
835
836 return is_new;
837 }
838
839
840 /* Add VAR and VAR's equivalence set to EQUIV. This is the central
841 point where equivalence processing can be turned on/off. */
842
843 static void
844 add_equivalence (bitmap *equiv, const_tree var)
845 {
846 unsigned ver = SSA_NAME_VERSION (var);
847 value_range_t *vr = vr_value[ver];
848
849 if (*equiv == NULL)
850 *equiv = BITMAP_ALLOC (NULL);
851 bitmap_set_bit (*equiv, ver);
852 if (vr && vr->equiv)
853 bitmap_ior_into (*equiv, vr->equiv);
854 }
855
856
857 /* Return true if VR is ~[0, 0]. */
858
859 static inline bool
860 range_is_nonnull (value_range_t *vr)
861 {
862 return vr->type == VR_ANTI_RANGE
863 && integer_zerop (vr->min)
864 && integer_zerop (vr->max);
865 }
866
867
868 /* Return true if VR is [0, 0]. */
869
870 static inline bool
871 range_is_null (value_range_t *vr)
872 {
873 return vr->type == VR_RANGE
874 && integer_zerop (vr->min)
875 && integer_zerop (vr->max);
876 }
877
878 /* Return true if max and min of VR are INTEGER_CST. It's not necessary
879 a singleton. */
880
881 static inline bool
882 range_int_cst_p (value_range_t *vr)
883 {
884 return (vr->type == VR_RANGE
885 && TREE_CODE (vr->max) == INTEGER_CST
886 && TREE_CODE (vr->min) == INTEGER_CST);
887 }
888
889 /* Return true if VR is a INTEGER_CST singleton. */
890
891 static inline bool
892 range_int_cst_singleton_p (value_range_t *vr)
893 {
894 return (range_int_cst_p (vr)
895 && !is_overflow_infinity (vr->min)
896 && !is_overflow_infinity (vr->max)
897 && tree_int_cst_equal (vr->min, vr->max));
898 }
899
900 /* Return true if value range VR involves at least one symbol. */
901
902 static inline bool
903 symbolic_range_p (value_range_t *vr)
904 {
905 return (!is_gimple_min_invariant (vr->min)
906 || !is_gimple_min_invariant (vr->max));
907 }
908
909 /* Return true if value range VR uses an overflow infinity. */
910
911 static inline bool
912 overflow_infinity_range_p (value_range_t *vr)
913 {
914 return (vr->type == VR_RANGE
915 && (is_overflow_infinity (vr->min)
916 || is_overflow_infinity (vr->max)));
917 }
918
919 /* Return false if we can not make a valid comparison based on VR;
920 this will be the case if it uses an overflow infinity and overflow
921 is not undefined (i.e., -fno-strict-overflow is in effect).
922 Otherwise return true, and set *STRICT_OVERFLOW_P to true if VR
923 uses an overflow infinity. */
924
925 static bool
926 usable_range_p (value_range_t *vr, bool *strict_overflow_p)
927 {
928 gcc_assert (vr->type == VR_RANGE);
929 if (is_overflow_infinity (vr->min))
930 {
931 *strict_overflow_p = true;
932 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr->min)))
933 return false;
934 }
935 if (is_overflow_infinity (vr->max))
936 {
937 *strict_overflow_p = true;
938 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr->max)))
939 return false;
940 }
941 return true;
942 }
943
944
945 /* Return true if the result of assignment STMT is know to be non-negative.
946 If the return value is based on the assumption that signed overflow is
947 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
948 *STRICT_OVERFLOW_P.*/
949
950 static bool
951 gimple_assign_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p)
952 {
953 enum tree_code code = gimple_assign_rhs_code (stmt);
954 switch (get_gimple_rhs_class (code))
955 {
956 case GIMPLE_UNARY_RHS:
957 return tree_unary_nonnegative_warnv_p (gimple_assign_rhs_code (stmt),
958 gimple_expr_type (stmt),
959 gimple_assign_rhs1 (stmt),
960 strict_overflow_p);
961 case GIMPLE_BINARY_RHS:
962 return tree_binary_nonnegative_warnv_p (gimple_assign_rhs_code (stmt),
963 gimple_expr_type (stmt),
964 gimple_assign_rhs1 (stmt),
965 gimple_assign_rhs2 (stmt),
966 strict_overflow_p);
967 case GIMPLE_TERNARY_RHS:
968 return false;
969 case GIMPLE_SINGLE_RHS:
970 return tree_single_nonnegative_warnv_p (gimple_assign_rhs1 (stmt),
971 strict_overflow_p);
972 case GIMPLE_INVALID_RHS:
973 gcc_unreachable ();
974 default:
975 gcc_unreachable ();
976 }
977 }
978
979 /* Return true if return value of call STMT is know to be non-negative.
980 If the return value is based on the assumption that signed overflow is
981 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
982 *STRICT_OVERFLOW_P.*/
983
984 static bool
985 gimple_call_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p)
986 {
987 tree arg0 = gimple_call_num_args (stmt) > 0 ?
988 gimple_call_arg (stmt, 0) : NULL_TREE;
989 tree arg1 = gimple_call_num_args (stmt) > 1 ?
990 gimple_call_arg (stmt, 1) : NULL_TREE;
991
992 return tree_call_nonnegative_warnv_p (gimple_expr_type (stmt),
993 gimple_call_fndecl (stmt),
994 arg0,
995 arg1,
996 strict_overflow_p);
997 }
998
999 /* Return true if STMT is know to to compute a non-negative value.
1000 If the return value is based on the assumption that signed overflow is
1001 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
1002 *STRICT_OVERFLOW_P.*/
1003
1004 static bool
1005 gimple_stmt_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p)
1006 {
1007 switch (gimple_code (stmt))
1008 {
1009 case GIMPLE_ASSIGN:
1010 return gimple_assign_nonnegative_warnv_p (stmt, strict_overflow_p);
1011 case GIMPLE_CALL:
1012 return gimple_call_nonnegative_warnv_p (stmt, strict_overflow_p);
1013 default:
1014 gcc_unreachable ();
1015 }
1016 }
1017
1018 /* Return true if the result of assignment STMT is know to be non-zero.
1019 If the return value is based on the assumption that signed overflow is
1020 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
1021 *STRICT_OVERFLOW_P.*/
1022
1023 static bool
1024 gimple_assign_nonzero_warnv_p (gimple stmt, bool *strict_overflow_p)
1025 {
1026 enum tree_code code = gimple_assign_rhs_code (stmt);
1027 switch (get_gimple_rhs_class (code))
1028 {
1029 case GIMPLE_UNARY_RHS:
1030 return tree_unary_nonzero_warnv_p (gimple_assign_rhs_code (stmt),
1031 gimple_expr_type (stmt),
1032 gimple_assign_rhs1 (stmt),
1033 strict_overflow_p);
1034 case GIMPLE_BINARY_RHS:
1035 return tree_binary_nonzero_warnv_p (gimple_assign_rhs_code (stmt),
1036 gimple_expr_type (stmt),
1037 gimple_assign_rhs1 (stmt),
1038 gimple_assign_rhs2 (stmt),
1039 strict_overflow_p);
1040 case GIMPLE_TERNARY_RHS:
1041 return false;
1042 case GIMPLE_SINGLE_RHS:
1043 return tree_single_nonzero_warnv_p (gimple_assign_rhs1 (stmt),
1044 strict_overflow_p);
1045 case GIMPLE_INVALID_RHS:
1046 gcc_unreachable ();
1047 default:
1048 gcc_unreachable ();
1049 }
1050 }
1051
1052 /* Return true if STMT is known to compute a non-zero value.
1053 If the return value is based on the assumption that signed overflow is
1054 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
1055 *STRICT_OVERFLOW_P.*/
1056
1057 static bool
1058 gimple_stmt_nonzero_warnv_p (gimple stmt, bool *strict_overflow_p)
1059 {
1060 switch (gimple_code (stmt))
1061 {
1062 case GIMPLE_ASSIGN:
1063 return gimple_assign_nonzero_warnv_p (stmt, strict_overflow_p);
1064 case GIMPLE_CALL:
1065 {
1066 tree fndecl = gimple_call_fndecl (stmt);
1067 if (!fndecl) return false;
1068 if (flag_delete_null_pointer_checks && !flag_check_new
1069 && DECL_IS_OPERATOR_NEW (fndecl)
1070 && !TREE_NOTHROW (fndecl))
1071 return true;
1072 if (flag_delete_null_pointer_checks &&
1073 lookup_attribute ("returns_nonnull",
1074 TYPE_ATTRIBUTES (gimple_call_fntype (stmt))))
1075 return true;
1076 return gimple_alloca_call_p (stmt);
1077 }
1078 default:
1079 gcc_unreachable ();
1080 }
1081 }
1082
1083 /* Like tree_expr_nonzero_warnv_p, but this function uses value ranges
1084 obtained so far. */
1085
1086 static bool
1087 vrp_stmt_computes_nonzero (gimple stmt, bool *strict_overflow_p)
1088 {
1089 if (gimple_stmt_nonzero_warnv_p (stmt, strict_overflow_p))
1090 return true;
1091
1092 /* If we have an expression of the form &X->a, then the expression
1093 is nonnull if X is nonnull. */
1094 if (is_gimple_assign (stmt)
1095 && gimple_assign_rhs_code (stmt) == ADDR_EXPR)
1096 {
1097 tree expr = gimple_assign_rhs1 (stmt);
1098 tree base = get_base_address (TREE_OPERAND (expr, 0));
1099
1100 if (base != NULL_TREE
1101 && TREE_CODE (base) == MEM_REF
1102 && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME)
1103 {
1104 value_range_t *vr = get_value_range (TREE_OPERAND (base, 0));
1105 if (range_is_nonnull (vr))
1106 return true;
1107 }
1108 }
1109
1110 return false;
1111 }
1112
1113 /* Returns true if EXPR is a valid value (as expected by compare_values) --
1114 a gimple invariant, or SSA_NAME +- CST. */
1115
1116 static bool
1117 valid_value_p (tree expr)
1118 {
1119 if (TREE_CODE (expr) == SSA_NAME)
1120 return true;
1121
1122 if (TREE_CODE (expr) == PLUS_EXPR
1123 || TREE_CODE (expr) == MINUS_EXPR)
1124 return (TREE_CODE (TREE_OPERAND (expr, 0)) == SSA_NAME
1125 && TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST);
1126
1127 return is_gimple_min_invariant (expr);
1128 }
1129
1130 /* Return
1131 1 if VAL < VAL2
1132 0 if !(VAL < VAL2)
1133 -2 if those are incomparable. */
1134 static inline int
1135 operand_less_p (tree val, tree val2)
1136 {
1137 /* LT is folded faster than GE and others. Inline the common case. */
1138 if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
1139 {
1140 if (TYPE_UNSIGNED (TREE_TYPE (val)))
1141 return INT_CST_LT_UNSIGNED (val, val2);
1142 else
1143 {
1144 if (INT_CST_LT (val, val2))
1145 return 1;
1146 }
1147 }
1148 else
1149 {
1150 tree tcmp;
1151
1152 fold_defer_overflow_warnings ();
1153
1154 tcmp = fold_binary_to_constant (LT_EXPR, boolean_type_node, val, val2);
1155
1156 fold_undefer_and_ignore_overflow_warnings ();
1157
1158 if (!tcmp
1159 || TREE_CODE (tcmp) != INTEGER_CST)
1160 return -2;
1161
1162 if (!integer_zerop (tcmp))
1163 return 1;
1164 }
1165
1166 /* val >= val2, not considering overflow infinity. */
1167 if (is_negative_overflow_infinity (val))
1168 return is_negative_overflow_infinity (val2) ? 0 : 1;
1169 else if (is_positive_overflow_infinity (val2))
1170 return is_positive_overflow_infinity (val) ? 0 : 1;
1171
1172 return 0;
1173 }
1174
1175 /* Compare two values VAL1 and VAL2. Return
1176
1177 -2 if VAL1 and VAL2 cannot be compared at compile-time,
1178 -1 if VAL1 < VAL2,
1179 0 if VAL1 == VAL2,
1180 +1 if VAL1 > VAL2, and
1181 +2 if VAL1 != VAL2
1182
1183 This is similar to tree_int_cst_compare but supports pointer values
1184 and values that cannot be compared at compile time.
1185
1186 If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to
1187 true if the return value is only valid if we assume that signed
1188 overflow is undefined. */
1189
1190 static int
1191 compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p)
1192 {
1193 if (val1 == val2)
1194 return 0;
1195
1196 /* Below we rely on the fact that VAL1 and VAL2 are both pointers or
1197 both integers. */
1198 gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1))
1199 == POINTER_TYPE_P (TREE_TYPE (val2)));
1200 /* Convert the two values into the same type. This is needed because
1201 sizetype causes sign extension even for unsigned types. */
1202 val2 = fold_convert (TREE_TYPE (val1), val2);
1203 STRIP_USELESS_TYPE_CONVERSION (val2);
1204
1205 if ((TREE_CODE (val1) == SSA_NAME
1206 || TREE_CODE (val1) == PLUS_EXPR
1207 || TREE_CODE (val1) == MINUS_EXPR)
1208 && (TREE_CODE (val2) == SSA_NAME
1209 || TREE_CODE (val2) == PLUS_EXPR
1210 || TREE_CODE (val2) == MINUS_EXPR))
1211 {
1212 tree n1, c1, n2, c2;
1213 enum tree_code code1, code2;
1214
1215 /* If VAL1 and VAL2 are of the form 'NAME [+-] CST' or 'NAME',
1216 return -1 or +1 accordingly. If VAL1 and VAL2 don't use the
1217 same name, return -2. */
1218 if (TREE_CODE (val1) == SSA_NAME)
1219 {
1220 code1 = SSA_NAME;
1221 n1 = val1;
1222 c1 = NULL_TREE;
1223 }
1224 else
1225 {
1226 code1 = TREE_CODE (val1);
1227 n1 = TREE_OPERAND (val1, 0);
1228 c1 = TREE_OPERAND (val1, 1);
1229 if (tree_int_cst_sgn (c1) == -1)
1230 {
1231 if (is_negative_overflow_infinity (c1))
1232 return -2;
1233 c1 = fold_unary_to_constant (NEGATE_EXPR, TREE_TYPE (c1), c1);
1234 if (!c1)
1235 return -2;
1236 code1 = code1 == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR;
1237 }
1238 }
1239
1240 if (TREE_CODE (val2) == SSA_NAME)
1241 {
1242 code2 = SSA_NAME;
1243 n2 = val2;
1244 c2 = NULL_TREE;
1245 }
1246 else
1247 {
1248 code2 = TREE_CODE (val2);
1249 n2 = TREE_OPERAND (val2, 0);
1250 c2 = TREE_OPERAND (val2, 1);
1251 if (tree_int_cst_sgn (c2) == -1)
1252 {
1253 if (is_negative_overflow_infinity (c2))
1254 return -2;
1255 c2 = fold_unary_to_constant (NEGATE_EXPR, TREE_TYPE (c2), c2);
1256 if (!c2)
1257 return -2;
1258 code2 = code2 == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR;
1259 }
1260 }
1261
1262 /* Both values must use the same name. */
1263 if (n1 != n2)
1264 return -2;
1265
1266 if (code1 == SSA_NAME
1267 && code2 == SSA_NAME)
1268 /* NAME == NAME */
1269 return 0;
1270
1271 /* If overflow is defined we cannot simplify more. */
1272 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1)))
1273 return -2;
1274
1275 if (strict_overflow_p != NULL
1276 && (code1 == SSA_NAME || !TREE_NO_WARNING (val1))
1277 && (code2 == SSA_NAME || !TREE_NO_WARNING (val2)))
1278 *strict_overflow_p = true;
1279
1280 if (code1 == SSA_NAME)
1281 {
1282 if (code2 == PLUS_EXPR)
1283 /* NAME < NAME + CST */
1284 return -1;
1285 else if (code2 == MINUS_EXPR)
1286 /* NAME > NAME - CST */
1287 return 1;
1288 }
1289 else if (code1 == PLUS_EXPR)
1290 {
1291 if (code2 == SSA_NAME)
1292 /* NAME + CST > NAME */
1293 return 1;
1294 else if (code2 == PLUS_EXPR)
1295 /* NAME + CST1 > NAME + CST2, if CST1 > CST2 */
1296 return compare_values_warnv (c1, c2, strict_overflow_p);
1297 else if (code2 == MINUS_EXPR)
1298 /* NAME + CST1 > NAME - CST2 */
1299 return 1;
1300 }
1301 else if (code1 == MINUS_EXPR)
1302 {
1303 if (code2 == SSA_NAME)
1304 /* NAME - CST < NAME */
1305 return -1;
1306 else if (code2 == PLUS_EXPR)
1307 /* NAME - CST1 < NAME + CST2 */
1308 return -1;
1309 else if (code2 == MINUS_EXPR)
1310 /* NAME - CST1 > NAME - CST2, if CST1 < CST2. Notice that
1311 C1 and C2 are swapped in the call to compare_values. */
1312 return compare_values_warnv (c2, c1, strict_overflow_p);
1313 }
1314
1315 gcc_unreachable ();
1316 }
1317
1318 /* We cannot compare non-constants. */
1319 if (!is_gimple_min_invariant (val1) || !is_gimple_min_invariant (val2))
1320 return -2;
1321
1322 if (!POINTER_TYPE_P (TREE_TYPE (val1)))
1323 {
1324 /* We cannot compare overflowed values, except for overflow
1325 infinities. */
1326 if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2))
1327 {
1328 if (strict_overflow_p != NULL)
1329 *strict_overflow_p = true;
1330 if (is_negative_overflow_infinity (val1))
1331 return is_negative_overflow_infinity (val2) ? 0 : -1;
1332 else if (is_negative_overflow_infinity (val2))
1333 return 1;
1334 else if (is_positive_overflow_infinity (val1))
1335 return is_positive_overflow_infinity (val2) ? 0 : 1;
1336 else if (is_positive_overflow_infinity (val2))
1337 return -1;
1338 return -2;
1339 }
1340
1341 return tree_int_cst_compare (val1, val2);
1342 }
1343 else
1344 {
1345 tree t;
1346
1347 /* First see if VAL1 and VAL2 are not the same. */
1348 if (val1 == val2 || operand_equal_p (val1, val2, 0))
1349 return 0;
1350
1351 /* If VAL1 is a lower address than VAL2, return -1. */
1352 if (operand_less_p (val1, val2) == 1)
1353 return -1;
1354
1355 /* If VAL1 is a higher address than VAL2, return +1. */
1356 if (operand_less_p (val2, val1) == 1)
1357 return 1;
1358
1359 /* If VAL1 is different than VAL2, return +2.
1360 For integer constants we either have already returned -1 or 1
1361 or they are equivalent. We still might succeed in proving
1362 something about non-trivial operands. */
1363 if (TREE_CODE (val1) != INTEGER_CST
1364 || TREE_CODE (val2) != INTEGER_CST)
1365 {
1366 t = fold_binary_to_constant (NE_EXPR, boolean_type_node, val1, val2);
1367 if (t && integer_onep (t))
1368 return 2;
1369 }
1370
1371 return -2;
1372 }
1373 }
1374
1375 /* Compare values like compare_values_warnv, but treat comparisons of
1376 nonconstants which rely on undefined overflow as incomparable. */
1377
1378 static int
1379 compare_values (tree val1, tree val2)
1380 {
1381 bool sop;
1382 int ret;
1383
1384 sop = false;
1385 ret = compare_values_warnv (val1, val2, &sop);
1386 if (sop
1387 && (!is_gimple_min_invariant (val1) || !is_gimple_min_invariant (val2)))
1388 ret = -2;
1389 return ret;
1390 }
1391
1392
1393 /* Return 1 if VAL is inside value range MIN <= VAL <= MAX,
1394 0 if VAL is not inside [MIN, MAX],
1395 -2 if we cannot tell either way.
1396
1397 Benchmark compile/20001226-1.c compilation time after changing this
1398 function. */
1399
1400 static inline int
1401 value_inside_range (tree val, tree min, tree max)
1402 {
1403 int cmp1, cmp2;
1404
1405 cmp1 = operand_less_p (val, min);
1406 if (cmp1 == -2)
1407 return -2;
1408 if (cmp1 == 1)
1409 return 0;
1410
1411 cmp2 = operand_less_p (max, val);
1412 if (cmp2 == -2)
1413 return -2;
1414
1415 return !cmp2;
1416 }
1417
1418
1419 /* Return true if value ranges VR0 and VR1 have a non-empty
1420 intersection.
1421
1422 Benchmark compile/20001226-1.c compilation time after changing this
1423 function.
1424 */
1425
1426 static inline bool
1427 value_ranges_intersect_p (value_range_t *vr0, value_range_t *vr1)
1428 {
1429 /* The value ranges do not intersect if the maximum of the first range is
1430 less than the minimum of the second range or vice versa.
1431 When those relations are unknown, we can't do any better. */
1432 if (operand_less_p (vr0->max, vr1->min) != 0)
1433 return false;
1434 if (operand_less_p (vr1->max, vr0->min) != 0)
1435 return false;
1436 return true;
1437 }
1438
1439
1440 /* Return 1 if [MIN, MAX] includes the value zero, 0 if it does not
1441 include the value zero, -2 if we cannot tell. */
1442
1443 static inline int
1444 range_includes_zero_p (tree min, tree max)
1445 {
1446 tree zero = build_int_cst (TREE_TYPE (min), 0);
1447 return value_inside_range (zero, min, max);
1448 }
1449
1450 /* Return true if *VR is know to only contain nonnegative values. */
1451
1452 static inline bool
1453 value_range_nonnegative_p (value_range_t *vr)
1454 {
1455 /* Testing for VR_ANTI_RANGE is not useful here as any anti-range
1456 which would return a useful value should be encoded as a
1457 VR_RANGE. */
1458 if (vr->type == VR_RANGE)
1459 {
1460 int result = compare_values (vr->min, integer_zero_node);
1461 return (result == 0 || result == 1);
1462 }
1463
1464 return false;
1465 }
1466
1467 /* If *VR has a value rante that is a single constant value return that,
1468 otherwise return NULL_TREE. */
1469
1470 static tree
1471 value_range_constant_singleton (value_range_t *vr)
1472 {
1473 if (vr->type == VR_RANGE
1474 && operand_equal_p (vr->min, vr->max, 0)
1475 && is_gimple_min_invariant (vr->min))
1476 return vr->min;
1477
1478 return NULL_TREE;
1479 }
1480
1481 /* If OP has a value range with a single constant value return that,
1482 otherwise return NULL_TREE. This returns OP itself if OP is a
1483 constant. */
1484
1485 static tree
1486 op_with_constant_singleton_value_range (tree op)
1487 {
1488 if (is_gimple_min_invariant (op))
1489 return op;
1490
1491 if (TREE_CODE (op) != SSA_NAME)
1492 return NULL_TREE;
1493
1494 return value_range_constant_singleton (get_value_range (op));
1495 }
1496
1497 /* Return true if op is in a boolean [0, 1] value-range. */
1498
1499 static bool
1500 op_with_boolean_value_range_p (tree op)
1501 {
1502 value_range_t *vr;
1503
1504 if (TYPE_PRECISION (TREE_TYPE (op)) == 1)
1505 return true;
1506
1507 if (integer_zerop (op)
1508 || integer_onep (op))
1509 return true;
1510
1511 if (TREE_CODE (op) != SSA_NAME)
1512 return false;
1513
1514 vr = get_value_range (op);
1515 return (vr->type == VR_RANGE
1516 && integer_zerop (vr->min)
1517 && integer_onep (vr->max));
1518 }
1519
1520 /* Extract value range information from an ASSERT_EXPR EXPR and store
1521 it in *VR_P. */
1522
1523 static void
1524 extract_range_from_assert (value_range_t *vr_p, tree expr)
1525 {
1526 tree var, cond, limit, min, max, type;
1527 value_range_t *limit_vr;
1528 enum tree_code cond_code;
1529
1530 var = ASSERT_EXPR_VAR (expr);
1531 cond = ASSERT_EXPR_COND (expr);
1532
1533 gcc_assert (COMPARISON_CLASS_P (cond));
1534
1535 /* Find VAR in the ASSERT_EXPR conditional. */
1536 if (var == TREE_OPERAND (cond, 0)
1537 || TREE_CODE (TREE_OPERAND (cond, 0)) == PLUS_EXPR
1538 || TREE_CODE (TREE_OPERAND (cond, 0)) == NOP_EXPR)
1539 {
1540 /* If the predicate is of the form VAR COMP LIMIT, then we just
1541 take LIMIT from the RHS and use the same comparison code. */
1542 cond_code = TREE_CODE (cond);
1543 limit = TREE_OPERAND (cond, 1);
1544 cond = TREE_OPERAND (cond, 0);
1545 }
1546 else
1547 {
1548 /* If the predicate is of the form LIMIT COMP VAR, then we need
1549 to flip around the comparison code to create the proper range
1550 for VAR. */
1551 cond_code = swap_tree_comparison (TREE_CODE (cond));
1552 limit = TREE_OPERAND (cond, 0);
1553 cond = TREE_OPERAND (cond, 1);
1554 }
1555
1556 limit = avoid_overflow_infinity (limit);
1557
1558 type = TREE_TYPE (var);
1559 gcc_assert (limit != var);
1560
1561 /* For pointer arithmetic, we only keep track of pointer equality
1562 and inequality. */
1563 if (POINTER_TYPE_P (type) && cond_code != NE_EXPR && cond_code != EQ_EXPR)
1564 {
1565 set_value_range_to_varying (vr_p);
1566 return;
1567 }
1568
1569 /* If LIMIT is another SSA name and LIMIT has a range of its own,
1570 try to use LIMIT's range to avoid creating symbolic ranges
1571 unnecessarily. */
1572 limit_vr = (TREE_CODE (limit) == SSA_NAME) ? get_value_range (limit) : NULL;
1573
1574 /* LIMIT's range is only interesting if it has any useful information. */
1575 if (limit_vr
1576 && (limit_vr->type == VR_UNDEFINED
1577 || limit_vr->type == VR_VARYING
1578 || symbolic_range_p (limit_vr)))
1579 limit_vr = NULL;
1580
1581 /* Initially, the new range has the same set of equivalences of
1582 VAR's range. This will be revised before returning the final
1583 value. Since assertions may be chained via mutually exclusive
1584 predicates, we will need to trim the set of equivalences before
1585 we are done. */
1586 gcc_assert (vr_p->equiv == NULL);
1587 add_equivalence (&vr_p->equiv, var);
1588
1589 /* Extract a new range based on the asserted comparison for VAR and
1590 LIMIT's value range. Notice that if LIMIT has an anti-range, we
1591 will only use it for equality comparisons (EQ_EXPR). For any
1592 other kind of assertion, we cannot derive a range from LIMIT's
1593 anti-range that can be used to describe the new range. For
1594 instance, ASSERT_EXPR <x_2, x_2 <= b_4>. If b_4 is ~[2, 10],
1595 then b_4 takes on the ranges [-INF, 1] and [11, +INF]. There is
1596 no single range for x_2 that could describe LE_EXPR, so we might
1597 as well build the range [b_4, +INF] for it.
1598 One special case we handle is extracting a range from a
1599 range test encoded as (unsigned)var + CST <= limit. */
1600 if (TREE_CODE (cond) == NOP_EXPR
1601 || TREE_CODE (cond) == PLUS_EXPR)
1602 {
1603 if (TREE_CODE (cond) == PLUS_EXPR)
1604 {
1605 min = fold_build1 (NEGATE_EXPR, TREE_TYPE (TREE_OPERAND (cond, 1)),
1606 TREE_OPERAND (cond, 1));
1607 max = int_const_binop (PLUS_EXPR, limit, min);
1608 cond = TREE_OPERAND (cond, 0);
1609 }
1610 else
1611 {
1612 min = build_int_cst (TREE_TYPE (var), 0);
1613 max = limit;
1614 }
1615
1616 /* Make sure to not set TREE_OVERFLOW on the final type
1617 conversion. We are willingly interpreting large positive
1618 unsigned values as negative singed values here. */
1619 min = force_fit_type_double (TREE_TYPE (var), tree_to_double_int (min),
1620 0, false);
1621 max = force_fit_type_double (TREE_TYPE (var), tree_to_double_int (max),
1622 0, false);
1623
1624 /* We can transform a max, min range to an anti-range or
1625 vice-versa. Use set_and_canonicalize_value_range which does
1626 this for us. */
1627 if (cond_code == LE_EXPR)
1628 set_and_canonicalize_value_range (vr_p, VR_RANGE,
1629 min, max, vr_p->equiv);
1630 else if (cond_code == GT_EXPR)
1631 set_and_canonicalize_value_range (vr_p, VR_ANTI_RANGE,
1632 min, max, vr_p->equiv);
1633 else
1634 gcc_unreachable ();
1635 }
1636 else if (cond_code == EQ_EXPR)
1637 {
1638 enum value_range_type range_type;
1639
1640 if (limit_vr)
1641 {
1642 range_type = limit_vr->type;
1643 min = limit_vr->min;
1644 max = limit_vr->max;
1645 }
1646 else
1647 {
1648 range_type = VR_RANGE;
1649 min = limit;
1650 max = limit;
1651 }
1652
1653 set_value_range (vr_p, range_type, min, max, vr_p->equiv);
1654
1655 /* When asserting the equality VAR == LIMIT and LIMIT is another
1656 SSA name, the new range will also inherit the equivalence set
1657 from LIMIT. */
1658 if (TREE_CODE (limit) == SSA_NAME)
1659 add_equivalence (&vr_p->equiv, limit);
1660 }
1661 else if (cond_code == NE_EXPR)
1662 {
1663 /* As described above, when LIMIT's range is an anti-range and
1664 this assertion is an inequality (NE_EXPR), then we cannot
1665 derive anything from the anti-range. For instance, if
1666 LIMIT's range was ~[0, 0], the assertion 'VAR != LIMIT' does
1667 not imply that VAR's range is [0, 0]. So, in the case of
1668 anti-ranges, we just assert the inequality using LIMIT and
1669 not its anti-range.
1670
1671 If LIMIT_VR is a range, we can only use it to build a new
1672 anti-range if LIMIT_VR is a single-valued range. For
1673 instance, if LIMIT_VR is [0, 1], the predicate
1674 VAR != [0, 1] does not mean that VAR's range is ~[0, 1].
1675 Rather, it means that for value 0 VAR should be ~[0, 0]
1676 and for value 1, VAR should be ~[1, 1]. We cannot
1677 represent these ranges.
1678
1679 The only situation in which we can build a valid
1680 anti-range is when LIMIT_VR is a single-valued range
1681 (i.e., LIMIT_VR->MIN == LIMIT_VR->MAX). In that case,
1682 build the anti-range ~[LIMIT_VR->MIN, LIMIT_VR->MAX]. */
1683 if (limit_vr
1684 && limit_vr->type == VR_RANGE
1685 && compare_values (limit_vr->min, limit_vr->max) == 0)
1686 {
1687 min = limit_vr->min;
1688 max = limit_vr->max;
1689 }
1690 else
1691 {
1692 /* In any other case, we cannot use LIMIT's range to build a
1693 valid anti-range. */
1694 min = max = limit;
1695 }
1696
1697 /* If MIN and MAX cover the whole range for their type, then
1698 just use the original LIMIT. */
1699 if (INTEGRAL_TYPE_P (type)
1700 && vrp_val_is_min (min)
1701 && vrp_val_is_max (max))
1702 min = max = limit;
1703
1704 set_and_canonicalize_value_range (vr_p, VR_ANTI_RANGE,
1705 min, max, vr_p->equiv);
1706 }
1707 else if (cond_code == LE_EXPR || cond_code == LT_EXPR)
1708 {
1709 min = TYPE_MIN_VALUE (type);
1710
1711 if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE)
1712 max = limit;
1713 else
1714 {
1715 /* If LIMIT_VR is of the form [N1, N2], we need to build the
1716 range [MIN, N2] for LE_EXPR and [MIN, N2 - 1] for
1717 LT_EXPR. */
1718 max = limit_vr->max;
1719 }
1720
1721 /* If the maximum value forces us to be out of bounds, simply punt.
1722 It would be pointless to try and do anything more since this
1723 all should be optimized away above us. */
1724 if ((cond_code == LT_EXPR
1725 && compare_values (max, min) == 0)
1726 || is_overflow_infinity (max))
1727 set_value_range_to_varying (vr_p);
1728 else
1729 {
1730 /* For LT_EXPR, we create the range [MIN, MAX - 1]. */
1731 if (cond_code == LT_EXPR)
1732 {
1733 if (TYPE_PRECISION (TREE_TYPE (max)) == 1
1734 && !TYPE_UNSIGNED (TREE_TYPE (max)))
1735 max = fold_build2 (PLUS_EXPR, TREE_TYPE (max), max,
1736 build_int_cst (TREE_TYPE (max), -1));
1737 else
1738 max = fold_build2 (MINUS_EXPR, TREE_TYPE (max), max,
1739 build_int_cst (TREE_TYPE (max), 1));
1740 if (EXPR_P (max))
1741 TREE_NO_WARNING (max) = 1;
1742 }
1743
1744 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1745 }
1746 }
1747 else if (cond_code == GE_EXPR || cond_code == GT_EXPR)
1748 {
1749 max = TYPE_MAX_VALUE (type);
1750
1751 if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE)
1752 min = limit;
1753 else
1754 {
1755 /* If LIMIT_VR is of the form [N1, N2], we need to build the
1756 range [N1, MAX] for GE_EXPR and [N1 + 1, MAX] for
1757 GT_EXPR. */
1758 min = limit_vr->min;
1759 }
1760
1761 /* If the minimum value forces us to be out of bounds, simply punt.
1762 It would be pointless to try and do anything more since this
1763 all should be optimized away above us. */
1764 if ((cond_code == GT_EXPR
1765 && compare_values (min, max) == 0)
1766 || is_overflow_infinity (min))
1767 set_value_range_to_varying (vr_p);
1768 else
1769 {
1770 /* For GT_EXPR, we create the range [MIN + 1, MAX]. */
1771 if (cond_code == GT_EXPR)
1772 {
1773 if (TYPE_PRECISION (TREE_TYPE (min)) == 1
1774 && !TYPE_UNSIGNED (TREE_TYPE (min)))
1775 min = fold_build2 (MINUS_EXPR, TREE_TYPE (min), min,
1776 build_int_cst (TREE_TYPE (min), -1));
1777 else
1778 min = fold_build2 (PLUS_EXPR, TREE_TYPE (min), min,
1779 build_int_cst (TREE_TYPE (min), 1));
1780 if (EXPR_P (min))
1781 TREE_NO_WARNING (min) = 1;
1782 }
1783
1784 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1785 }
1786 }
1787 else
1788 gcc_unreachable ();
1789
1790 /* Finally intersect the new range with what we already know about var. */
1791 vrp_intersect_ranges (vr_p, get_value_range (var));
1792 }
1793
1794
1795 /* Extract range information from SSA name VAR and store it in VR. If
1796 VAR has an interesting range, use it. Otherwise, create the
1797 range [VAR, VAR] and return it. This is useful in situations where
1798 we may have conditionals testing values of VARYING names. For
1799 instance,
1800
1801 x_3 = y_5;
1802 if (x_3 > y_5)
1803 ...
1804
1805 Even if y_5 is deemed VARYING, we can determine that x_3 > y_5 is
1806 always false. */
1807
1808 static void
1809 extract_range_from_ssa_name (value_range_t *vr, tree var)
1810 {
1811 value_range_t *var_vr = get_value_range (var);
1812
1813 if (var_vr->type != VR_UNDEFINED && var_vr->type != VR_VARYING)
1814 copy_value_range (vr, var_vr);
1815 else
1816 set_value_range (vr, VR_RANGE, var, var, NULL);
1817
1818 add_equivalence (&vr->equiv, var);
1819 }
1820
1821
1822 /* Wrapper around int_const_binop. If the operation overflows and we
1823 are not using wrapping arithmetic, then adjust the result to be
1824 -INF or +INF depending on CODE, VAL1 and VAL2. This can return
1825 NULL_TREE if we need to use an overflow infinity representation but
1826 the type does not support it. */
1827
1828 static tree
1829 vrp_int_const_binop (enum tree_code code, tree val1, tree val2)
1830 {
1831 tree res;
1832
1833 res = int_const_binop (code, val1, val2);
1834
1835 /* If we are using unsigned arithmetic, operate symbolically
1836 on -INF and +INF as int_const_binop only handles signed overflow. */
1837 if (TYPE_UNSIGNED (TREE_TYPE (val1)))
1838 {
1839 int checkz = compare_values (res, val1);
1840 bool overflow = false;
1841
1842 /* Ensure that res = val1 [+*] val2 >= val1
1843 or that res = val1 - val2 <= val1. */
1844 if ((code == PLUS_EXPR
1845 && !(checkz == 1 || checkz == 0))
1846 || (code == MINUS_EXPR
1847 && !(checkz == 0 || checkz == -1)))
1848 {
1849 overflow = true;
1850 }
1851 /* Checking for multiplication overflow is done by dividing the
1852 output of the multiplication by the first input of the
1853 multiplication. If the result of that division operation is
1854 not equal to the second input of the multiplication, then the
1855 multiplication overflowed. */
1856 else if (code == MULT_EXPR && !integer_zerop (val1))
1857 {
1858 tree tmp = int_const_binop (TRUNC_DIV_EXPR,
1859 res,
1860 val1);
1861 int check = compare_values (tmp, val2);
1862
1863 if (check != 0)
1864 overflow = true;
1865 }
1866
1867 if (overflow)
1868 {
1869 res = copy_node (res);
1870 TREE_OVERFLOW (res) = 1;
1871 }
1872
1873 }
1874 else if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (val1)))
1875 /* If the singed operation wraps then int_const_binop has done
1876 everything we want. */
1877 ;
1878 else if ((TREE_OVERFLOW (res)
1879 && !TREE_OVERFLOW (val1)
1880 && !TREE_OVERFLOW (val2))
1881 || is_overflow_infinity (val1)
1882 || is_overflow_infinity (val2))
1883 {
1884 /* If the operation overflowed but neither VAL1 nor VAL2 are
1885 overflown, return -INF or +INF depending on the operation
1886 and the combination of signs of the operands. */
1887 int sgn1 = tree_int_cst_sgn (val1);
1888 int sgn2 = tree_int_cst_sgn (val2);
1889
1890 if (needs_overflow_infinity (TREE_TYPE (res))
1891 && !supports_overflow_infinity (TREE_TYPE (res)))
1892 return NULL_TREE;
1893
1894 /* We have to punt on adding infinities of different signs,
1895 since we can't tell what the sign of the result should be.
1896 Likewise for subtracting infinities of the same sign. */
1897 if (((code == PLUS_EXPR && sgn1 != sgn2)
1898 || (code == MINUS_EXPR && sgn1 == sgn2))
1899 && is_overflow_infinity (val1)
1900 && is_overflow_infinity (val2))
1901 return NULL_TREE;
1902
1903 /* Don't try to handle division or shifting of infinities. */
1904 if ((code == TRUNC_DIV_EXPR
1905 || code == FLOOR_DIV_EXPR
1906 || code == CEIL_DIV_EXPR
1907 || code == EXACT_DIV_EXPR
1908 || code == ROUND_DIV_EXPR
1909 || code == RSHIFT_EXPR)
1910 && (is_overflow_infinity (val1)
1911 || is_overflow_infinity (val2)))
1912 return NULL_TREE;
1913
1914 /* Notice that we only need to handle the restricted set of
1915 operations handled by extract_range_from_binary_expr.
1916 Among them, only multiplication, addition and subtraction
1917 can yield overflow without overflown operands because we
1918 are working with integral types only... except in the
1919 case VAL1 = -INF and VAL2 = -1 which overflows to +INF
1920 for division too. */
1921
1922 /* For multiplication, the sign of the overflow is given
1923 by the comparison of the signs of the operands. */
1924 if ((code == MULT_EXPR && sgn1 == sgn2)
1925 /* For addition, the operands must be of the same sign
1926 to yield an overflow. Its sign is therefore that
1927 of one of the operands, for example the first. For
1928 infinite operands X + -INF is negative, not positive. */
1929 || (code == PLUS_EXPR
1930 && (sgn1 >= 0
1931 ? !is_negative_overflow_infinity (val2)
1932 : is_positive_overflow_infinity (val2)))
1933 /* For subtraction, non-infinite operands must be of
1934 different signs to yield an overflow. Its sign is
1935 therefore that of the first operand or the opposite of
1936 that of the second operand. A first operand of 0 counts
1937 as positive here, for the corner case 0 - (-INF), which
1938 overflows, but must yield +INF. For infinite operands 0
1939 - INF is negative, not positive. */
1940 || (code == MINUS_EXPR
1941 && (sgn1 >= 0
1942 ? !is_positive_overflow_infinity (val2)
1943 : is_negative_overflow_infinity (val2)))
1944 /* We only get in here with positive shift count, so the
1945 overflow direction is the same as the sign of val1.
1946 Actually rshift does not overflow at all, but we only
1947 handle the case of shifting overflowed -INF and +INF. */
1948 || (code == RSHIFT_EXPR
1949 && sgn1 >= 0)
1950 /* For division, the only case is -INF / -1 = +INF. */
1951 || code == TRUNC_DIV_EXPR
1952 || code == FLOOR_DIV_EXPR
1953 || code == CEIL_DIV_EXPR
1954 || code == EXACT_DIV_EXPR
1955 || code == ROUND_DIV_EXPR)
1956 return (needs_overflow_infinity (TREE_TYPE (res))
1957 ? positive_overflow_infinity (TREE_TYPE (res))
1958 : TYPE_MAX_VALUE (TREE_TYPE (res)));
1959 else
1960 return (needs_overflow_infinity (TREE_TYPE (res))
1961 ? negative_overflow_infinity (TREE_TYPE (res))
1962 : TYPE_MIN_VALUE (TREE_TYPE (res)));
1963 }
1964
1965 return res;
1966 }
1967
1968
1969 /* For range VR compute two double_int bitmasks. In *MAY_BE_NONZERO
1970 bitmask if some bit is unset, it means for all numbers in the range
1971 the bit is 0, otherwise it might be 0 or 1. In *MUST_BE_NONZERO
1972 bitmask if some bit is set, it means for all numbers in the range
1973 the bit is 1, otherwise it might be 0 or 1. */
1974
1975 static bool
1976 zero_nonzero_bits_from_vr (value_range_t *vr,
1977 double_int *may_be_nonzero,
1978 double_int *must_be_nonzero)
1979 {
1980 *may_be_nonzero = double_int_minus_one;
1981 *must_be_nonzero = double_int_zero;
1982 if (!range_int_cst_p (vr)
1983 || is_overflow_infinity (vr->min)
1984 || is_overflow_infinity (vr->max))
1985 return false;
1986
1987 if (range_int_cst_singleton_p (vr))
1988 {
1989 *may_be_nonzero = tree_to_double_int (vr->min);
1990 *must_be_nonzero = *may_be_nonzero;
1991 }
1992 else if (tree_int_cst_sgn (vr->min) >= 0
1993 || tree_int_cst_sgn (vr->max) < 0)
1994 {
1995 double_int dmin = tree_to_double_int (vr->min);
1996 double_int dmax = tree_to_double_int (vr->max);
1997 double_int xor_mask = dmin ^ dmax;
1998 *may_be_nonzero = dmin | dmax;
1999 *must_be_nonzero = dmin & dmax;
2000 if (xor_mask.high != 0)
2001 {
2002 unsigned HOST_WIDE_INT mask
2003 = ((unsigned HOST_WIDE_INT) 1
2004 << floor_log2 (xor_mask.high)) - 1;
2005 may_be_nonzero->low = ALL_ONES;
2006 may_be_nonzero->high |= mask;
2007 must_be_nonzero->low = 0;
2008 must_be_nonzero->high &= ~mask;
2009 }
2010 else if (xor_mask.low != 0)
2011 {
2012 unsigned HOST_WIDE_INT mask
2013 = ((unsigned HOST_WIDE_INT) 1
2014 << floor_log2 (xor_mask.low)) - 1;
2015 may_be_nonzero->low |= mask;
2016 must_be_nonzero->low &= ~mask;
2017 }
2018 }
2019
2020 return true;
2021 }
2022
2023 /* Create two value-ranges in *VR0 and *VR1 from the anti-range *AR
2024 so that *VR0 U *VR1 == *AR. Returns true if that is possible,
2025 false otherwise. If *AR can be represented with a single range
2026 *VR1 will be VR_UNDEFINED. */
2027
2028 static bool
2029 ranges_from_anti_range (value_range_t *ar,
2030 value_range_t *vr0, value_range_t *vr1)
2031 {
2032 tree type = TREE_TYPE (ar->min);
2033
2034 vr0->type = VR_UNDEFINED;
2035 vr1->type = VR_UNDEFINED;
2036
2037 if (ar->type != VR_ANTI_RANGE
2038 || TREE_CODE (ar->min) != INTEGER_CST
2039 || TREE_CODE (ar->max) != INTEGER_CST
2040 || !vrp_val_min (type)
2041 || !vrp_val_max (type))
2042 return false;
2043
2044 if (!vrp_val_is_min (ar->min))
2045 {
2046 vr0->type = VR_RANGE;
2047 vr0->min = vrp_val_min (type);
2048 vr0->max
2049 = double_int_to_tree (type,
2050 tree_to_double_int (ar->min) - double_int_one);
2051 }
2052 if (!vrp_val_is_max (ar->max))
2053 {
2054 vr1->type = VR_RANGE;
2055 vr1->min
2056 = double_int_to_tree (type,
2057 tree_to_double_int (ar->max) + double_int_one);
2058 vr1->max = vrp_val_max (type);
2059 }
2060 if (vr0->type == VR_UNDEFINED)
2061 {
2062 *vr0 = *vr1;
2063 vr1->type = VR_UNDEFINED;
2064 }
2065
2066 return vr0->type != VR_UNDEFINED;
2067 }
2068
2069 /* Helper to extract a value-range *VR for a multiplicative operation
2070 *VR0 CODE *VR1. */
2071
2072 static void
2073 extract_range_from_multiplicative_op_1 (value_range_t *vr,
2074 enum tree_code code,
2075 value_range_t *vr0, value_range_t *vr1)
2076 {
2077 enum value_range_type type;
2078 tree val[4];
2079 size_t i;
2080 tree min, max;
2081 bool sop;
2082 int cmp;
2083
2084 /* Multiplications, divisions and shifts are a bit tricky to handle,
2085 depending on the mix of signs we have in the two ranges, we
2086 need to operate on different values to get the minimum and
2087 maximum values for the new range. One approach is to figure
2088 out all the variations of range combinations and do the
2089 operations.
2090
2091 However, this involves several calls to compare_values and it
2092 is pretty convoluted. It's simpler to do the 4 operations
2093 (MIN0 OP MIN1, MIN0 OP MAX1, MAX0 OP MIN1 and MAX0 OP MAX0 OP
2094 MAX1) and then figure the smallest and largest values to form
2095 the new range. */
2096 gcc_assert (code == MULT_EXPR
2097 || code == TRUNC_DIV_EXPR
2098 || code == FLOOR_DIV_EXPR
2099 || code == CEIL_DIV_EXPR
2100 || code == EXACT_DIV_EXPR
2101 || code == ROUND_DIV_EXPR
2102 || code == RSHIFT_EXPR
2103 || code == LSHIFT_EXPR);
2104 gcc_assert ((vr0->type == VR_RANGE
2105 || (code == MULT_EXPR && vr0->type == VR_ANTI_RANGE))
2106 && vr0->type == vr1->type);
2107
2108 type = vr0->type;
2109
2110 /* Compute the 4 cross operations. */
2111 sop = false;
2112 val[0] = vrp_int_const_binop (code, vr0->min, vr1->min);
2113 if (val[0] == NULL_TREE)
2114 sop = true;
2115
2116 if (vr1->max == vr1->min)
2117 val[1] = NULL_TREE;
2118 else
2119 {
2120 val[1] = vrp_int_const_binop (code, vr0->min, vr1->max);
2121 if (val[1] == NULL_TREE)
2122 sop = true;
2123 }
2124
2125 if (vr0->max == vr0->min)
2126 val[2] = NULL_TREE;
2127 else
2128 {
2129 val[2] = vrp_int_const_binop (code, vr0->max, vr1->min);
2130 if (val[2] == NULL_TREE)
2131 sop = true;
2132 }
2133
2134 if (vr0->min == vr0->max || vr1->min == vr1->max)
2135 val[3] = NULL_TREE;
2136 else
2137 {
2138 val[3] = vrp_int_const_binop (code, vr0->max, vr1->max);
2139 if (val[3] == NULL_TREE)
2140 sop = true;
2141 }
2142
2143 if (sop)
2144 {
2145 set_value_range_to_varying (vr);
2146 return;
2147 }
2148
2149 /* Set MIN to the minimum of VAL[i] and MAX to the maximum
2150 of VAL[i]. */
2151 min = val[0];
2152 max = val[0];
2153 for (i = 1; i < 4; i++)
2154 {
2155 if (!is_gimple_min_invariant (min)
2156 || (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
2157 || !is_gimple_min_invariant (max)
2158 || (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
2159 break;
2160
2161 if (val[i])
2162 {
2163 if (!is_gimple_min_invariant (val[i])
2164 || (TREE_OVERFLOW (val[i])
2165 && !is_overflow_infinity (val[i])))
2166 {
2167 /* If we found an overflowed value, set MIN and MAX
2168 to it so that we set the resulting range to
2169 VARYING. */
2170 min = max = val[i];
2171 break;
2172 }
2173
2174 if (compare_values (val[i], min) == -1)
2175 min = val[i];
2176
2177 if (compare_values (val[i], max) == 1)
2178 max = val[i];
2179 }
2180 }
2181
2182 /* If either MIN or MAX overflowed, then set the resulting range to
2183 VARYING. But we do accept an overflow infinity
2184 representation. */
2185 if (min == NULL_TREE
2186 || !is_gimple_min_invariant (min)
2187 || (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
2188 || max == NULL_TREE
2189 || !is_gimple_min_invariant (max)
2190 || (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
2191 {
2192 set_value_range_to_varying (vr);
2193 return;
2194 }
2195
2196 /* We punt if:
2197 1) [-INF, +INF]
2198 2) [-INF, +-INF(OVF)]
2199 3) [+-INF(OVF), +INF]
2200 4) [+-INF(OVF), +-INF(OVF)]
2201 We learn nothing when we have INF and INF(OVF) on both sides.
2202 Note that we do accept [-INF, -INF] and [+INF, +INF] without
2203 overflow. */
2204 if ((vrp_val_is_min (min) || is_overflow_infinity (min))
2205 && (vrp_val_is_max (max) || is_overflow_infinity (max)))
2206 {
2207 set_value_range_to_varying (vr);
2208 return;
2209 }
2210
2211 cmp = compare_values (min, max);
2212 if (cmp == -2 || cmp == 1)
2213 {
2214 /* If the new range has its limits swapped around (MIN > MAX),
2215 then the operation caused one of them to wrap around, mark
2216 the new range VARYING. */
2217 set_value_range_to_varying (vr);
2218 }
2219 else
2220 set_value_range (vr, type, min, max, NULL);
2221 }
2222
2223 /* Some quadruple precision helpers. */
2224 static int
2225 quad_int_cmp (double_int l0, double_int h0,
2226 double_int l1, double_int h1, bool uns)
2227 {
2228 int c = h0.cmp (h1, uns);
2229 if (c != 0) return c;
2230 return l0.ucmp (l1);
2231 }
2232
2233 static void
2234 quad_int_pair_sort (double_int *l0, double_int *h0,
2235 double_int *l1, double_int *h1, bool uns)
2236 {
2237 if (quad_int_cmp (*l0, *h0, *l1, *h1, uns) > 0)
2238 {
2239 double_int tmp;
2240 tmp = *l0; *l0 = *l1; *l1 = tmp;
2241 tmp = *h0; *h0 = *h1; *h1 = tmp;
2242 }
2243 }
2244
2245 /* Extract range information from a binary operation CODE based on
2246 the ranges of each of its operands, *VR0 and *VR1 with resulting
2247 type EXPR_TYPE. The resulting range is stored in *VR. */
2248
2249 static void
2250 extract_range_from_binary_expr_1 (value_range_t *vr,
2251 enum tree_code code, tree expr_type,
2252 value_range_t *vr0_, value_range_t *vr1_)
2253 {
2254 value_range_t vr0 = *vr0_, vr1 = *vr1_;
2255 value_range_t vrtem0 = VR_INITIALIZER, vrtem1 = VR_INITIALIZER;
2256 enum value_range_type type;
2257 tree min = NULL_TREE, max = NULL_TREE;
2258 int cmp;
2259
2260 if (!INTEGRAL_TYPE_P (expr_type)
2261 && !POINTER_TYPE_P (expr_type))
2262 {
2263 set_value_range_to_varying (vr);
2264 return;
2265 }
2266
2267 /* Not all binary expressions can be applied to ranges in a
2268 meaningful way. Handle only arithmetic operations. */
2269 if (code != PLUS_EXPR
2270 && code != MINUS_EXPR
2271 && code != POINTER_PLUS_EXPR
2272 && code != MULT_EXPR
2273 && code != TRUNC_DIV_EXPR
2274 && code != FLOOR_DIV_EXPR
2275 && code != CEIL_DIV_EXPR
2276 && code != EXACT_DIV_EXPR
2277 && code != ROUND_DIV_EXPR
2278 && code != TRUNC_MOD_EXPR
2279 && code != RSHIFT_EXPR
2280 && code != LSHIFT_EXPR
2281 && code != MIN_EXPR
2282 && code != MAX_EXPR
2283 && code != BIT_AND_EXPR
2284 && code != BIT_IOR_EXPR
2285 && code != BIT_XOR_EXPR)
2286 {
2287 set_value_range_to_varying (vr);
2288 return;
2289 }
2290
2291 /* If both ranges are UNDEFINED, so is the result. */
2292 if (vr0.type == VR_UNDEFINED && vr1.type == VR_UNDEFINED)
2293 {
2294 set_value_range_to_undefined (vr);
2295 return;
2296 }
2297 /* If one of the ranges is UNDEFINED drop it to VARYING for the following
2298 code. At some point we may want to special-case operations that
2299 have UNDEFINED result for all or some value-ranges of the not UNDEFINED
2300 operand. */
2301 else if (vr0.type == VR_UNDEFINED)
2302 set_value_range_to_varying (&vr0);
2303 else if (vr1.type == VR_UNDEFINED)
2304 set_value_range_to_varying (&vr1);
2305
2306 /* Now canonicalize anti-ranges to ranges when they are not symbolic
2307 and express ~[] op X as ([]' op X) U ([]'' op X). */
2308 if (vr0.type == VR_ANTI_RANGE
2309 && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1))
2310 {
2311 extract_range_from_binary_expr_1 (vr, code, expr_type, &vrtem0, vr1_);
2312 if (vrtem1.type != VR_UNDEFINED)
2313 {
2314 value_range_t vrres = VR_INITIALIZER;
2315 extract_range_from_binary_expr_1 (&vrres, code, expr_type,
2316 &vrtem1, vr1_);
2317 vrp_meet (vr, &vrres);
2318 }
2319 return;
2320 }
2321 /* Likewise for X op ~[]. */
2322 if (vr1.type == VR_ANTI_RANGE
2323 && ranges_from_anti_range (&vr1, &vrtem0, &vrtem1))
2324 {
2325 extract_range_from_binary_expr_1 (vr, code, expr_type, vr0_, &vrtem0);
2326 if (vrtem1.type != VR_UNDEFINED)
2327 {
2328 value_range_t vrres = VR_INITIALIZER;
2329 extract_range_from_binary_expr_1 (&vrres, code, expr_type,
2330 vr0_, &vrtem1);
2331 vrp_meet (vr, &vrres);
2332 }
2333 return;
2334 }
2335
2336 /* The type of the resulting value range defaults to VR0.TYPE. */
2337 type = vr0.type;
2338
2339 /* Refuse to operate on VARYING ranges, ranges of different kinds
2340 and symbolic ranges. As an exception, we allow BIT_AND_EXPR
2341 because we may be able to derive a useful range even if one of
2342 the operands is VR_VARYING or symbolic range. Similarly for
2343 divisions. TODO, we may be able to derive anti-ranges in
2344 some cases. */
2345 if (code != BIT_AND_EXPR
2346 && code != BIT_IOR_EXPR
2347 && code != TRUNC_DIV_EXPR
2348 && code != FLOOR_DIV_EXPR
2349 && code != CEIL_DIV_EXPR
2350 && code != EXACT_DIV_EXPR
2351 && code != ROUND_DIV_EXPR
2352 && code != TRUNC_MOD_EXPR
2353 && code != MIN_EXPR
2354 && code != MAX_EXPR
2355 && (vr0.type == VR_VARYING
2356 || vr1.type == VR_VARYING
2357 || vr0.type != vr1.type
2358 || symbolic_range_p (&vr0)
2359 || symbolic_range_p (&vr1)))
2360 {
2361 set_value_range_to_varying (vr);
2362 return;
2363 }
2364
2365 /* Now evaluate the expression to determine the new range. */
2366 if (POINTER_TYPE_P (expr_type))
2367 {
2368 if (code == MIN_EXPR || code == MAX_EXPR)
2369 {
2370 /* For MIN/MAX expressions with pointers, we only care about
2371 nullness, if both are non null, then the result is nonnull.
2372 If both are null, then the result is null. Otherwise they
2373 are varying. */
2374 if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1))
2375 set_value_range_to_nonnull (vr, expr_type);
2376 else if (range_is_null (&vr0) && range_is_null (&vr1))
2377 set_value_range_to_null (vr, expr_type);
2378 else
2379 set_value_range_to_varying (vr);
2380 }
2381 else if (code == POINTER_PLUS_EXPR)
2382 {
2383 /* For pointer types, we are really only interested in asserting
2384 whether the expression evaluates to non-NULL. */
2385 if (range_is_nonnull (&vr0) || range_is_nonnull (&vr1))
2386 set_value_range_to_nonnull (vr, expr_type);
2387 else if (range_is_null (&vr0) && range_is_null (&vr1))
2388 set_value_range_to_null (vr, expr_type);
2389 else
2390 set_value_range_to_varying (vr);
2391 }
2392 else if (code == BIT_AND_EXPR)
2393 {
2394 /* For pointer types, we are really only interested in asserting
2395 whether the expression evaluates to non-NULL. */
2396 if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1))
2397 set_value_range_to_nonnull (vr, expr_type);
2398 else if (range_is_null (&vr0) || range_is_null (&vr1))
2399 set_value_range_to_null (vr, expr_type);
2400 else
2401 set_value_range_to_varying (vr);
2402 }
2403 else
2404 set_value_range_to_varying (vr);
2405
2406 return;
2407 }
2408
2409 /* For integer ranges, apply the operation to each end of the
2410 range and see what we end up with. */
2411 if (code == PLUS_EXPR || code == MINUS_EXPR)
2412 {
2413 /* If we have a PLUS_EXPR with two VR_RANGE integer constant
2414 ranges compute the precise range for such case if possible. */
2415 if (range_int_cst_p (&vr0)
2416 && range_int_cst_p (&vr1)
2417 /* We need as many bits as the possibly unsigned inputs. */
2418 && TYPE_PRECISION (expr_type) <= HOST_BITS_PER_DOUBLE_INT)
2419 {
2420 double_int min0 = tree_to_double_int (vr0.min);
2421 double_int max0 = tree_to_double_int (vr0.max);
2422 double_int min1 = tree_to_double_int (vr1.min);
2423 double_int max1 = tree_to_double_int (vr1.max);
2424 bool uns = TYPE_UNSIGNED (expr_type);
2425 double_int type_min
2426 = double_int::min_value (TYPE_PRECISION (expr_type), uns);
2427 double_int type_max
2428 = double_int::max_value (TYPE_PRECISION (expr_type), uns);
2429 double_int dmin, dmax;
2430 int min_ovf = 0;
2431 int max_ovf = 0;
2432
2433 if (code == PLUS_EXPR)
2434 {
2435 dmin = min0 + min1;
2436 dmax = max0 + max1;
2437
2438 /* Check for overflow in double_int. */
2439 if (min1.cmp (double_int_zero, uns) != dmin.cmp (min0, uns))
2440 min_ovf = min0.cmp (dmin, uns);
2441 if (max1.cmp (double_int_zero, uns) != dmax.cmp (max0, uns))
2442 max_ovf = max0.cmp (dmax, uns);
2443 }
2444 else /* if (code == MINUS_EXPR) */
2445 {
2446 dmin = min0 - max1;
2447 dmax = max0 - min1;
2448
2449 if (double_int_zero.cmp (max1, uns) != dmin.cmp (min0, uns))
2450 min_ovf = min0.cmp (max1, uns);
2451 if (double_int_zero.cmp (min1, uns) != dmax.cmp (max0, uns))
2452 max_ovf = max0.cmp (min1, uns);
2453 }
2454
2455 /* For non-wrapping arithmetic look at possibly smaller
2456 value-ranges of the type. */
2457 if (!TYPE_OVERFLOW_WRAPS (expr_type))
2458 {
2459 if (vrp_val_min (expr_type))
2460 type_min = tree_to_double_int (vrp_val_min (expr_type));
2461 if (vrp_val_max (expr_type))
2462 type_max = tree_to_double_int (vrp_val_max (expr_type));
2463 }
2464
2465 /* Check for type overflow. */
2466 if (min_ovf == 0)
2467 {
2468 if (dmin.cmp (type_min, uns) == -1)
2469 min_ovf = -1;
2470 else if (dmin.cmp (type_max, uns) == 1)
2471 min_ovf = 1;
2472 }
2473 if (max_ovf == 0)
2474 {
2475 if (dmax.cmp (type_min, uns) == -1)
2476 max_ovf = -1;
2477 else if (dmax.cmp (type_max, uns) == 1)
2478 max_ovf = 1;
2479 }
2480
2481 if (TYPE_OVERFLOW_WRAPS (expr_type))
2482 {
2483 /* If overflow wraps, truncate the values and adjust the
2484 range kind and bounds appropriately. */
2485 double_int tmin
2486 = dmin.ext (TYPE_PRECISION (expr_type), uns);
2487 double_int tmax
2488 = dmax.ext (TYPE_PRECISION (expr_type), uns);
2489 if (min_ovf == max_ovf)
2490 {
2491 /* No overflow or both overflow or underflow. The
2492 range kind stays VR_RANGE. */
2493 min = double_int_to_tree (expr_type, tmin);
2494 max = double_int_to_tree (expr_type, tmax);
2495 }
2496 else if (min_ovf == -1
2497 && max_ovf == 1)
2498 {
2499 /* Underflow and overflow, drop to VR_VARYING. */
2500 set_value_range_to_varying (vr);
2501 return;
2502 }
2503 else
2504 {
2505 /* Min underflow or max overflow. The range kind
2506 changes to VR_ANTI_RANGE. */
2507 bool covers = false;
2508 double_int tem = tmin;
2509 gcc_assert ((min_ovf == -1 && max_ovf == 0)
2510 || (max_ovf == 1 && min_ovf == 0));
2511 type = VR_ANTI_RANGE;
2512 tmin = tmax + double_int_one;
2513 if (tmin.cmp (tmax, uns) < 0)
2514 covers = true;
2515 tmax = tem + double_int_minus_one;
2516 if (tmax.cmp (tem, uns) > 0)
2517 covers = true;
2518 /* If the anti-range would cover nothing, drop to varying.
2519 Likewise if the anti-range bounds are outside of the
2520 types values. */
2521 if (covers || tmin.cmp (tmax, uns) > 0)
2522 {
2523 set_value_range_to_varying (vr);
2524 return;
2525 }
2526 min = double_int_to_tree (expr_type, tmin);
2527 max = double_int_to_tree (expr_type, tmax);
2528 }
2529 }
2530 else
2531 {
2532 /* If overflow does not wrap, saturate to the types min/max
2533 value. */
2534 if (min_ovf == -1)
2535 {
2536 if (needs_overflow_infinity (expr_type)
2537 && supports_overflow_infinity (expr_type))
2538 min = negative_overflow_infinity (expr_type);
2539 else
2540 min = double_int_to_tree (expr_type, type_min);
2541 }
2542 else if (min_ovf == 1)
2543 {
2544 if (needs_overflow_infinity (expr_type)
2545 && supports_overflow_infinity (expr_type))
2546 min = positive_overflow_infinity (expr_type);
2547 else
2548 min = double_int_to_tree (expr_type, type_max);
2549 }
2550 else
2551 min = double_int_to_tree (expr_type, dmin);
2552
2553 if (max_ovf == -1)
2554 {
2555 if (needs_overflow_infinity (expr_type)
2556 && supports_overflow_infinity (expr_type))
2557 max = negative_overflow_infinity (expr_type);
2558 else
2559 max = double_int_to_tree (expr_type, type_min);
2560 }
2561 else if (max_ovf == 1)
2562 {
2563 if (needs_overflow_infinity (expr_type)
2564 && supports_overflow_infinity (expr_type))
2565 max = positive_overflow_infinity (expr_type);
2566 else
2567 max = double_int_to_tree (expr_type, type_max);
2568 }
2569 else
2570 max = double_int_to_tree (expr_type, dmax);
2571 }
2572 if (needs_overflow_infinity (expr_type)
2573 && supports_overflow_infinity (expr_type))
2574 {
2575 if (is_negative_overflow_infinity (vr0.min)
2576 || (code == PLUS_EXPR
2577 ? is_negative_overflow_infinity (vr1.min)
2578 : is_positive_overflow_infinity (vr1.max)))
2579 min = negative_overflow_infinity (expr_type);
2580 if (is_positive_overflow_infinity (vr0.max)
2581 || (code == PLUS_EXPR
2582 ? is_positive_overflow_infinity (vr1.max)
2583 : is_negative_overflow_infinity (vr1.min)))
2584 max = positive_overflow_infinity (expr_type);
2585 }
2586 }
2587 else
2588 {
2589 /* For other cases, for example if we have a PLUS_EXPR with two
2590 VR_ANTI_RANGEs, drop to VR_VARYING. It would take more effort
2591 to compute a precise range for such a case.
2592 ??? General even mixed range kind operations can be expressed
2593 by for example transforming ~[3, 5] + [1, 2] to range-only
2594 operations and a union primitive:
2595 [-INF, 2] + [1, 2] U [5, +INF] + [1, 2]
2596 [-INF+1, 4] U [6, +INF(OVF)]
2597 though usually the union is not exactly representable with
2598 a single range or anti-range as the above is
2599 [-INF+1, +INF(OVF)] intersected with ~[5, 5]
2600 but one could use a scheme similar to equivalences for this. */
2601 set_value_range_to_varying (vr);
2602 return;
2603 }
2604 }
2605 else if (code == MIN_EXPR
2606 || code == MAX_EXPR)
2607 {
2608 if (vr0.type == VR_RANGE
2609 && !symbolic_range_p (&vr0))
2610 {
2611 type = VR_RANGE;
2612 if (vr1.type == VR_RANGE
2613 && !symbolic_range_p (&vr1))
2614 {
2615 /* For operations that make the resulting range directly
2616 proportional to the original ranges, apply the operation to
2617 the same end of each range. */
2618 min = vrp_int_const_binop (code, vr0.min, vr1.min);
2619 max = vrp_int_const_binop (code, vr0.max, vr1.max);
2620 }
2621 else if (code == MIN_EXPR)
2622 {
2623 min = vrp_val_min (expr_type);
2624 max = vr0.max;
2625 }
2626 else if (code == MAX_EXPR)
2627 {
2628 min = vr0.min;
2629 max = vrp_val_max (expr_type);
2630 }
2631 }
2632 else if (vr1.type == VR_RANGE
2633 && !symbolic_range_p (&vr1))
2634 {
2635 type = VR_RANGE;
2636 if (code == MIN_EXPR)
2637 {
2638 min = vrp_val_min (expr_type);
2639 max = vr1.max;
2640 }
2641 else if (code == MAX_EXPR)
2642 {
2643 min = vr1.min;
2644 max = vrp_val_max (expr_type);
2645 }
2646 }
2647 else
2648 {
2649 set_value_range_to_varying (vr);
2650 return;
2651 }
2652 }
2653 else if (code == MULT_EXPR)
2654 {
2655 /* Fancy code so that with unsigned, [-3,-1]*[-3,-1] does not
2656 drop to varying. */
2657 if (range_int_cst_p (&vr0)
2658 && range_int_cst_p (&vr1)
2659 && TYPE_OVERFLOW_WRAPS (expr_type))
2660 {
2661 double_int min0, max0, min1, max1, sizem1, size;
2662 double_int prod0l, prod0h, prod1l, prod1h,
2663 prod2l, prod2h, prod3l, prod3h;
2664 bool uns0, uns1, uns;
2665
2666 sizem1 = double_int::max_value (TYPE_PRECISION (expr_type), true);
2667 size = sizem1 + double_int_one;
2668
2669 min0 = tree_to_double_int (vr0.min);
2670 max0 = tree_to_double_int (vr0.max);
2671 min1 = tree_to_double_int (vr1.min);
2672 max1 = tree_to_double_int (vr1.max);
2673
2674 uns0 = TYPE_UNSIGNED (expr_type);
2675 uns1 = uns0;
2676
2677 /* Canonicalize the intervals. */
2678 if (TYPE_UNSIGNED (expr_type))
2679 {
2680 double_int min2 = size - min0;
2681 if (!min2.is_zero () && min2.cmp (max0, true) < 0)
2682 {
2683 min0 = -min2;
2684 max0 -= size;
2685 uns0 = false;
2686 }
2687
2688 min2 = size - min1;
2689 if (!min2.is_zero () && min2.cmp (max1, true) < 0)
2690 {
2691 min1 = -min2;
2692 max1 -= size;
2693 uns1 = false;
2694 }
2695 }
2696 uns = uns0 & uns1;
2697
2698 bool overflow;
2699 prod0l = min0.wide_mul_with_sign (min1, true, &prod0h, &overflow);
2700 if (!uns0 && min0.is_negative ())
2701 prod0h -= min1;
2702 if (!uns1 && min1.is_negative ())
2703 prod0h -= min0;
2704
2705 prod1l = min0.wide_mul_with_sign (max1, true, &prod1h, &overflow);
2706 if (!uns0 && min0.is_negative ())
2707 prod1h -= max1;
2708 if (!uns1 && max1.is_negative ())
2709 prod1h -= min0;
2710
2711 prod2l = max0.wide_mul_with_sign (min1, true, &prod2h, &overflow);
2712 if (!uns0 && max0.is_negative ())
2713 prod2h -= min1;
2714 if (!uns1 && min1.is_negative ())
2715 prod2h -= max0;
2716
2717 prod3l = max0.wide_mul_with_sign (max1, true, &prod3h, &overflow);
2718 if (!uns0 && max0.is_negative ())
2719 prod3h -= max1;
2720 if (!uns1 && max1.is_negative ())
2721 prod3h -= max0;
2722
2723 /* Sort the 4 products. */
2724 quad_int_pair_sort (&prod0l, &prod0h, &prod3l, &prod3h, uns);
2725 quad_int_pair_sort (&prod1l, &prod1h, &prod2l, &prod2h, uns);
2726 quad_int_pair_sort (&prod0l, &prod0h, &prod1l, &prod1h, uns);
2727 quad_int_pair_sort (&prod2l, &prod2h, &prod3l, &prod3h, uns);
2728
2729 /* Max - min. */
2730 if (prod0l.is_zero ())
2731 {
2732 prod1l = double_int_zero;
2733 prod1h = -prod0h;
2734 }
2735 else
2736 {
2737 prod1l = -prod0l;
2738 prod1h = ~prod0h;
2739 }
2740 prod2l = prod3l + prod1l;
2741 prod2h = prod3h + prod1h;
2742 if (prod2l.ult (prod3l))
2743 prod2h += double_int_one; /* carry */
2744
2745 if (!prod2h.is_zero ()
2746 || prod2l.cmp (sizem1, true) >= 0)
2747 {
2748 /* the range covers all values. */
2749 set_value_range_to_varying (vr);
2750 return;
2751 }
2752
2753 /* The following should handle the wrapping and selecting
2754 VR_ANTI_RANGE for us. */
2755 min = double_int_to_tree (expr_type, prod0l);
2756 max = double_int_to_tree (expr_type, prod3l);
2757 set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL);
2758 return;
2759 }
2760
2761 /* If we have an unsigned MULT_EXPR with two VR_ANTI_RANGEs,
2762 drop to VR_VARYING. It would take more effort to compute a
2763 precise range for such a case. For example, if we have
2764 op0 == 65536 and op1 == 65536 with their ranges both being
2765 ~[0,0] on a 32-bit machine, we would have op0 * op1 == 0, so
2766 we cannot claim that the product is in ~[0,0]. Note that we
2767 are guaranteed to have vr0.type == vr1.type at this
2768 point. */
2769 if (vr0.type == VR_ANTI_RANGE
2770 && !TYPE_OVERFLOW_UNDEFINED (expr_type))
2771 {
2772 set_value_range_to_varying (vr);
2773 return;
2774 }
2775
2776 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2777 return;
2778 }
2779 else if (code == RSHIFT_EXPR
2780 || code == LSHIFT_EXPR)
2781 {
2782 /* If we have a RSHIFT_EXPR with any shift values outside [0..prec-1],
2783 then drop to VR_VARYING. Outside of this range we get undefined
2784 behavior from the shift operation. We cannot even trust
2785 SHIFT_COUNT_TRUNCATED at this stage, because that applies to rtl
2786 shifts, and the operation at the tree level may be widened. */
2787 if (range_int_cst_p (&vr1)
2788 && compare_tree_int (vr1.min, 0) >= 0
2789 && compare_tree_int (vr1.max, TYPE_PRECISION (expr_type)) == -1)
2790 {
2791 if (code == RSHIFT_EXPR)
2792 {
2793 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2794 return;
2795 }
2796 /* We can map lshifts by constants to MULT_EXPR handling. */
2797 else if (code == LSHIFT_EXPR
2798 && range_int_cst_singleton_p (&vr1))
2799 {
2800 bool saved_flag_wrapv;
2801 value_range_t vr1p = VR_INITIALIZER;
2802 vr1p.type = VR_RANGE;
2803 vr1p.min
2804 = double_int_to_tree (expr_type,
2805 double_int_one
2806 .llshift (TREE_INT_CST_LOW (vr1.min),
2807 TYPE_PRECISION (expr_type)));
2808 vr1p.max = vr1p.min;
2809 /* We have to use a wrapping multiply though as signed overflow
2810 on lshifts is implementation defined in C89. */
2811 saved_flag_wrapv = flag_wrapv;
2812 flag_wrapv = 1;
2813 extract_range_from_binary_expr_1 (vr, MULT_EXPR, expr_type,
2814 &vr0, &vr1p);
2815 flag_wrapv = saved_flag_wrapv;
2816 return;
2817 }
2818 else if (code == LSHIFT_EXPR
2819 && range_int_cst_p (&vr0))
2820 {
2821 int prec = TYPE_PRECISION (expr_type);
2822 int overflow_pos = prec;
2823 int bound_shift;
2824 double_int bound, complement, low_bound, high_bound;
2825 bool uns = TYPE_UNSIGNED (expr_type);
2826 bool in_bounds = false;
2827
2828 if (!uns)
2829 overflow_pos -= 1;
2830
2831 bound_shift = overflow_pos - TREE_INT_CST_LOW (vr1.max);
2832 /* If bound_shift == HOST_BITS_PER_DOUBLE_INT, the llshift can
2833 overflow. However, for that to happen, vr1.max needs to be
2834 zero, which means vr1 is a singleton range of zero, which
2835 means it should be handled by the previous LSHIFT_EXPR
2836 if-clause. */
2837 bound = double_int_one.llshift (bound_shift, prec);
2838 complement = ~(bound - double_int_one);
2839
2840 if (uns)
2841 {
2842 low_bound = bound.zext (prec);
2843 high_bound = complement.zext (prec);
2844 if (tree_to_double_int (vr0.max).ult (low_bound))
2845 {
2846 /* [5, 6] << [1, 2] == [10, 24]. */
2847 /* We're shifting out only zeroes, the value increases
2848 monotonically. */
2849 in_bounds = true;
2850 }
2851 else if (high_bound.ult (tree_to_double_int (vr0.min)))
2852 {
2853 /* [0xffffff00, 0xffffffff] << [1, 2]
2854 == [0xfffffc00, 0xfffffffe]. */
2855 /* We're shifting out only ones, the value decreases
2856 monotonically. */
2857 in_bounds = true;
2858 }
2859 }
2860 else
2861 {
2862 /* [-1, 1] << [1, 2] == [-4, 4]. */
2863 low_bound = complement.sext (prec);
2864 high_bound = bound;
2865 if (tree_to_double_int (vr0.max).slt (high_bound)
2866 && low_bound.slt (tree_to_double_int (vr0.min)))
2867 {
2868 /* For non-negative numbers, we're shifting out only
2869 zeroes, the value increases monotonically.
2870 For negative numbers, we're shifting out only ones, the
2871 value decreases monotomically. */
2872 in_bounds = true;
2873 }
2874 }
2875
2876 if (in_bounds)
2877 {
2878 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2879 return;
2880 }
2881 }
2882 }
2883 set_value_range_to_varying (vr);
2884 return;
2885 }
2886 else if (code == TRUNC_DIV_EXPR
2887 || code == FLOOR_DIV_EXPR
2888 || code == CEIL_DIV_EXPR
2889 || code == EXACT_DIV_EXPR
2890 || code == ROUND_DIV_EXPR)
2891 {
2892 if (vr0.type != VR_RANGE || symbolic_range_p (&vr0))
2893 {
2894 /* For division, if op1 has VR_RANGE but op0 does not, something
2895 can be deduced just from that range. Say [min, max] / [4, max]
2896 gives [min / 4, max / 4] range. */
2897 if (vr1.type == VR_RANGE
2898 && !symbolic_range_p (&vr1)
2899 && range_includes_zero_p (vr1.min, vr1.max) == 0)
2900 {
2901 vr0.type = type = VR_RANGE;
2902 vr0.min = vrp_val_min (expr_type);
2903 vr0.max = vrp_val_max (expr_type);
2904 }
2905 else
2906 {
2907 set_value_range_to_varying (vr);
2908 return;
2909 }
2910 }
2911
2912 /* For divisions, if flag_non_call_exceptions is true, we must
2913 not eliminate a division by zero. */
2914 if (cfun->can_throw_non_call_exceptions
2915 && (vr1.type != VR_RANGE
2916 || range_includes_zero_p (vr1.min, vr1.max) != 0))
2917 {
2918 set_value_range_to_varying (vr);
2919 return;
2920 }
2921
2922 /* For divisions, if op0 is VR_RANGE, we can deduce a range
2923 even if op1 is VR_VARYING, VR_ANTI_RANGE, symbolic or can
2924 include 0. */
2925 if (vr0.type == VR_RANGE
2926 && (vr1.type != VR_RANGE
2927 || range_includes_zero_p (vr1.min, vr1.max) != 0))
2928 {
2929 tree zero = build_int_cst (TREE_TYPE (vr0.min), 0);
2930 int cmp;
2931
2932 min = NULL_TREE;
2933 max = NULL_TREE;
2934 if (TYPE_UNSIGNED (expr_type)
2935 || value_range_nonnegative_p (&vr1))
2936 {
2937 /* For unsigned division or when divisor is known
2938 to be non-negative, the range has to cover
2939 all numbers from 0 to max for positive max
2940 and all numbers from min to 0 for negative min. */
2941 cmp = compare_values (vr0.max, zero);
2942 if (cmp == -1)
2943 max = zero;
2944 else if (cmp == 0 || cmp == 1)
2945 max = vr0.max;
2946 else
2947 type = VR_VARYING;
2948 cmp = compare_values (vr0.min, zero);
2949 if (cmp == 1)
2950 min = zero;
2951 else if (cmp == 0 || cmp == -1)
2952 min = vr0.min;
2953 else
2954 type = VR_VARYING;
2955 }
2956 else
2957 {
2958 /* Otherwise the range is -max .. max or min .. -min
2959 depending on which bound is bigger in absolute value,
2960 as the division can change the sign. */
2961 abs_extent_range (vr, vr0.min, vr0.max);
2962 return;
2963 }
2964 if (type == VR_VARYING)
2965 {
2966 set_value_range_to_varying (vr);
2967 return;
2968 }
2969 }
2970 else
2971 {
2972 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2973 return;
2974 }
2975 }
2976 else if (code == TRUNC_MOD_EXPR)
2977 {
2978 if (vr1.type != VR_RANGE
2979 || range_includes_zero_p (vr1.min, vr1.max) != 0
2980 || vrp_val_is_min (vr1.min))
2981 {
2982 set_value_range_to_varying (vr);
2983 return;
2984 }
2985 type = VR_RANGE;
2986 /* Compute MAX <|vr1.min|, |vr1.max|> - 1. */
2987 max = fold_unary_to_constant (ABS_EXPR, expr_type, vr1.min);
2988 if (tree_int_cst_lt (max, vr1.max))
2989 max = vr1.max;
2990 max = int_const_binop (MINUS_EXPR, max, integer_one_node);
2991 /* If the dividend is non-negative the modulus will be
2992 non-negative as well. */
2993 if (TYPE_UNSIGNED (expr_type)
2994 || value_range_nonnegative_p (&vr0))
2995 min = build_int_cst (TREE_TYPE (max), 0);
2996 else
2997 min = fold_unary_to_constant (NEGATE_EXPR, expr_type, max);
2998 }
2999 else if (code == BIT_AND_EXPR || code == BIT_IOR_EXPR || code == BIT_XOR_EXPR)
3000 {
3001 bool int_cst_range0, int_cst_range1;
3002 double_int may_be_nonzero0, may_be_nonzero1;
3003 double_int must_be_nonzero0, must_be_nonzero1;
3004
3005 int_cst_range0 = zero_nonzero_bits_from_vr (&vr0, &may_be_nonzero0,
3006 &must_be_nonzero0);
3007 int_cst_range1 = zero_nonzero_bits_from_vr (&vr1, &may_be_nonzero1,
3008 &must_be_nonzero1);
3009
3010 type = VR_RANGE;
3011 if (code == BIT_AND_EXPR)
3012 {
3013 double_int dmax;
3014 min = double_int_to_tree (expr_type,
3015 must_be_nonzero0 & must_be_nonzero1);
3016 dmax = may_be_nonzero0 & may_be_nonzero1;
3017 /* If both input ranges contain only negative values we can
3018 truncate the result range maximum to the minimum of the
3019 input range maxima. */
3020 if (int_cst_range0 && int_cst_range1
3021 && tree_int_cst_sgn (vr0.max) < 0
3022 && tree_int_cst_sgn (vr1.max) < 0)
3023 {
3024 dmax = dmax.min (tree_to_double_int (vr0.max),
3025 TYPE_UNSIGNED (expr_type));
3026 dmax = dmax.min (tree_to_double_int (vr1.max),
3027 TYPE_UNSIGNED (expr_type));
3028 }
3029 /* If either input range contains only non-negative values
3030 we can truncate the result range maximum to the respective
3031 maximum of the input range. */
3032 if (int_cst_range0 && tree_int_cst_sgn (vr0.min) >= 0)
3033 dmax = dmax.min (tree_to_double_int (vr0.max),
3034 TYPE_UNSIGNED (expr_type));
3035 if (int_cst_range1 && tree_int_cst_sgn (vr1.min) >= 0)
3036 dmax = dmax.min (tree_to_double_int (vr1.max),
3037 TYPE_UNSIGNED (expr_type));
3038 max = double_int_to_tree (expr_type, dmax);
3039 }
3040 else if (code == BIT_IOR_EXPR)
3041 {
3042 double_int dmin;
3043 max = double_int_to_tree (expr_type,
3044 may_be_nonzero0 | may_be_nonzero1);
3045 dmin = must_be_nonzero0 | must_be_nonzero1;
3046 /* If the input ranges contain only positive values we can
3047 truncate the minimum of the result range to the maximum
3048 of the input range minima. */
3049 if (int_cst_range0 && int_cst_range1
3050 && tree_int_cst_sgn (vr0.min) >= 0
3051 && tree_int_cst_sgn (vr1.min) >= 0)
3052 {
3053 dmin = dmin.max (tree_to_double_int (vr0.min),
3054 TYPE_UNSIGNED (expr_type));
3055 dmin = dmin.max (tree_to_double_int (vr1.min),
3056 TYPE_UNSIGNED (expr_type));
3057 }
3058 /* If either input range contains only negative values
3059 we can truncate the minimum of the result range to the
3060 respective minimum range. */
3061 if (int_cst_range0 && tree_int_cst_sgn (vr0.max) < 0)
3062 dmin = dmin.max (tree_to_double_int (vr0.min),
3063 TYPE_UNSIGNED (expr_type));
3064 if (int_cst_range1 && tree_int_cst_sgn (vr1.max) < 0)
3065 dmin = dmin.max (tree_to_double_int (vr1.min),
3066 TYPE_UNSIGNED (expr_type));
3067 min = double_int_to_tree (expr_type, dmin);
3068 }
3069 else if (code == BIT_XOR_EXPR)
3070 {
3071 double_int result_zero_bits, result_one_bits;
3072 result_zero_bits = (must_be_nonzero0 & must_be_nonzero1)
3073 | ~(may_be_nonzero0 | may_be_nonzero1);
3074 result_one_bits = must_be_nonzero0.and_not (may_be_nonzero1)
3075 | must_be_nonzero1.and_not (may_be_nonzero0);
3076 max = double_int_to_tree (expr_type, ~result_zero_bits);
3077 min = double_int_to_tree (expr_type, result_one_bits);
3078 /* If the range has all positive or all negative values the
3079 result is better than VARYING. */
3080 if (tree_int_cst_sgn (min) < 0
3081 || tree_int_cst_sgn (max) >= 0)
3082 ;
3083 else
3084 max = min = NULL_TREE;
3085 }
3086 }
3087 else
3088 gcc_unreachable ();
3089
3090 /* If either MIN or MAX overflowed, then set the resulting range to
3091 VARYING. But we do accept an overflow infinity
3092 representation. */
3093 if (min == NULL_TREE
3094 || !is_gimple_min_invariant (min)
3095 || (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
3096 || max == NULL_TREE
3097 || !is_gimple_min_invariant (max)
3098 || (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
3099 {
3100 set_value_range_to_varying (vr);
3101 return;
3102 }
3103
3104 /* We punt if:
3105 1) [-INF, +INF]
3106 2) [-INF, +-INF(OVF)]
3107 3) [+-INF(OVF), +INF]
3108 4) [+-INF(OVF), +-INF(OVF)]
3109 We learn nothing when we have INF and INF(OVF) on both sides.
3110 Note that we do accept [-INF, -INF] and [+INF, +INF] without
3111 overflow. */
3112 if ((vrp_val_is_min (min) || is_overflow_infinity (min))
3113 && (vrp_val_is_max (max) || is_overflow_infinity (max)))
3114 {
3115 set_value_range_to_varying (vr);
3116 return;
3117 }
3118
3119 cmp = compare_values (min, max);
3120 if (cmp == -2 || cmp == 1)
3121 {
3122 /* If the new range has its limits swapped around (MIN > MAX),
3123 then the operation caused one of them to wrap around, mark
3124 the new range VARYING. */
3125 set_value_range_to_varying (vr);
3126 }
3127 else
3128 set_value_range (vr, type, min, max, NULL);
3129 }
3130
3131 /* Extract range information from a binary expression OP0 CODE OP1 based on
3132 the ranges of each of its operands with resulting type EXPR_TYPE.
3133 The resulting range is stored in *VR. */
3134
3135 static void
3136 extract_range_from_binary_expr (value_range_t *vr,
3137 enum tree_code code,
3138 tree expr_type, tree op0, tree op1)
3139 {
3140 value_range_t vr0 = VR_INITIALIZER;
3141 value_range_t vr1 = VR_INITIALIZER;
3142
3143 /* Get value ranges for each operand. For constant operands, create
3144 a new value range with the operand to simplify processing. */
3145 if (TREE_CODE (op0) == SSA_NAME)
3146 vr0 = *(get_value_range (op0));
3147 else if (is_gimple_min_invariant (op0))
3148 set_value_range_to_value (&vr0, op0, NULL);
3149 else
3150 set_value_range_to_varying (&vr0);
3151
3152 if (TREE_CODE (op1) == SSA_NAME)
3153 vr1 = *(get_value_range (op1));
3154 else if (is_gimple_min_invariant (op1))
3155 set_value_range_to_value (&vr1, op1, NULL);
3156 else
3157 set_value_range_to_varying (&vr1);
3158
3159 extract_range_from_binary_expr_1 (vr, code, expr_type, &vr0, &vr1);
3160 }
3161
3162 /* Extract range information from a unary operation CODE based on
3163 the range of its operand *VR0 with type OP0_TYPE with resulting type TYPE.
3164 The The resulting range is stored in *VR. */
3165
3166 static void
3167 extract_range_from_unary_expr_1 (value_range_t *vr,
3168 enum tree_code code, tree type,
3169 value_range_t *vr0_, tree op0_type)
3170 {
3171 value_range_t vr0 = *vr0_, vrtem0 = VR_INITIALIZER, vrtem1 = VR_INITIALIZER;
3172
3173 /* VRP only operates on integral and pointer types. */
3174 if (!(INTEGRAL_TYPE_P (op0_type)
3175 || POINTER_TYPE_P (op0_type))
3176 || !(INTEGRAL_TYPE_P (type)
3177 || POINTER_TYPE_P (type)))
3178 {
3179 set_value_range_to_varying (vr);
3180 return;
3181 }
3182
3183 /* If VR0 is UNDEFINED, so is the result. */
3184 if (vr0.type == VR_UNDEFINED)
3185 {
3186 set_value_range_to_undefined (vr);
3187 return;
3188 }
3189
3190 /* Handle operations that we express in terms of others. */
3191 if (code == PAREN_EXPR)
3192 {
3193 /* PAREN_EXPR is a simple copy. */
3194 copy_value_range (vr, &vr0);
3195 return;
3196 }
3197 else if (code == NEGATE_EXPR)
3198 {
3199 /* -X is simply 0 - X, so re-use existing code that also handles
3200 anti-ranges fine. */
3201 value_range_t zero = VR_INITIALIZER;
3202 set_value_range_to_value (&zero, build_int_cst (type, 0), NULL);
3203 extract_range_from_binary_expr_1 (vr, MINUS_EXPR, type, &zero, &vr0);
3204 return;
3205 }
3206 else if (code == BIT_NOT_EXPR)
3207 {
3208 /* ~X is simply -1 - X, so re-use existing code that also handles
3209 anti-ranges fine. */
3210 value_range_t minusone = VR_INITIALIZER;
3211 set_value_range_to_value (&minusone, build_int_cst (type, -1), NULL);
3212 extract_range_from_binary_expr_1 (vr, MINUS_EXPR,
3213 type, &minusone, &vr0);
3214 return;
3215 }
3216
3217 /* Now canonicalize anti-ranges to ranges when they are not symbolic
3218 and express op ~[] as (op []') U (op []''). */
3219 if (vr0.type == VR_ANTI_RANGE
3220 && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1))
3221 {
3222 extract_range_from_unary_expr_1 (vr, code, type, &vrtem0, op0_type);
3223 if (vrtem1.type != VR_UNDEFINED)
3224 {
3225 value_range_t vrres = VR_INITIALIZER;
3226 extract_range_from_unary_expr_1 (&vrres, code, type,
3227 &vrtem1, op0_type);
3228 vrp_meet (vr, &vrres);
3229 }
3230 return;
3231 }
3232
3233 if (CONVERT_EXPR_CODE_P (code))
3234 {
3235 tree inner_type = op0_type;
3236 tree outer_type = type;
3237
3238 /* If the expression evaluates to a pointer, we are only interested in
3239 determining if it evaluates to NULL [0, 0] or non-NULL (~[0, 0]). */
3240 if (POINTER_TYPE_P (type))
3241 {
3242 if (range_is_nonnull (&vr0))
3243 set_value_range_to_nonnull (vr, type);
3244 else if (range_is_null (&vr0))
3245 set_value_range_to_null (vr, type);
3246 else
3247 set_value_range_to_varying (vr);
3248 return;
3249 }
3250
3251 /* If VR0 is varying and we increase the type precision, assume
3252 a full range for the following transformation. */
3253 if (vr0.type == VR_VARYING
3254 && INTEGRAL_TYPE_P (inner_type)
3255 && TYPE_PRECISION (inner_type) < TYPE_PRECISION (outer_type))
3256 {
3257 vr0.type = VR_RANGE;
3258 vr0.min = TYPE_MIN_VALUE (inner_type);
3259 vr0.max = TYPE_MAX_VALUE (inner_type);
3260 }
3261
3262 /* If VR0 is a constant range or anti-range and the conversion is
3263 not truncating we can convert the min and max values and
3264 canonicalize the resulting range. Otherwise we can do the
3265 conversion if the size of the range is less than what the
3266 precision of the target type can represent and the range is
3267 not an anti-range. */
3268 if ((vr0.type == VR_RANGE
3269 || vr0.type == VR_ANTI_RANGE)
3270 && TREE_CODE (vr0.min) == INTEGER_CST
3271 && TREE_CODE (vr0.max) == INTEGER_CST
3272 && (!is_overflow_infinity (vr0.min)
3273 || (vr0.type == VR_RANGE
3274 && TYPE_PRECISION (outer_type) > TYPE_PRECISION (inner_type)
3275 && needs_overflow_infinity (outer_type)
3276 && supports_overflow_infinity (outer_type)))
3277 && (!is_overflow_infinity (vr0.max)
3278 || (vr0.type == VR_RANGE
3279 && TYPE_PRECISION (outer_type) > TYPE_PRECISION (inner_type)
3280 && needs_overflow_infinity (outer_type)
3281 && supports_overflow_infinity (outer_type)))
3282 && (TYPE_PRECISION (outer_type) >= TYPE_PRECISION (inner_type)
3283 || (vr0.type == VR_RANGE
3284 && integer_zerop (int_const_binop (RSHIFT_EXPR,
3285 int_const_binop (MINUS_EXPR, vr0.max, vr0.min),
3286 size_int (TYPE_PRECISION (outer_type)))))))
3287 {
3288 tree new_min, new_max;
3289 if (is_overflow_infinity (vr0.min))
3290 new_min = negative_overflow_infinity (outer_type);
3291 else
3292 new_min = force_fit_type_double (outer_type,
3293 tree_to_double_int (vr0.min),
3294 0, false);
3295 if (is_overflow_infinity (vr0.max))
3296 new_max = positive_overflow_infinity (outer_type);
3297 else
3298 new_max = force_fit_type_double (outer_type,
3299 tree_to_double_int (vr0.max),
3300 0, false);
3301 set_and_canonicalize_value_range (vr, vr0.type,
3302 new_min, new_max, NULL);
3303 return;
3304 }
3305
3306 set_value_range_to_varying (vr);
3307 return;
3308 }
3309 else if (code == ABS_EXPR)
3310 {
3311 tree min, max;
3312 int cmp;
3313
3314 /* Pass through vr0 in the easy cases. */
3315 if (TYPE_UNSIGNED (type)
3316 || value_range_nonnegative_p (&vr0))
3317 {
3318 copy_value_range (vr, &vr0);
3319 return;
3320 }
3321
3322 /* For the remaining varying or symbolic ranges we can't do anything
3323 useful. */
3324 if (vr0.type == VR_VARYING
3325 || symbolic_range_p (&vr0))
3326 {
3327 set_value_range_to_varying (vr);
3328 return;
3329 }
3330
3331 /* -TYPE_MIN_VALUE = TYPE_MIN_VALUE with flag_wrapv so we can't get a
3332 useful range. */
3333 if (!TYPE_OVERFLOW_UNDEFINED (type)
3334 && ((vr0.type == VR_RANGE
3335 && vrp_val_is_min (vr0.min))
3336 || (vr0.type == VR_ANTI_RANGE
3337 && !vrp_val_is_min (vr0.min))))
3338 {
3339 set_value_range_to_varying (vr);
3340 return;
3341 }
3342
3343 /* ABS_EXPR may flip the range around, if the original range
3344 included negative values. */
3345 if (is_overflow_infinity (vr0.min))
3346 min = positive_overflow_infinity (type);
3347 else if (!vrp_val_is_min (vr0.min))
3348 min = fold_unary_to_constant (code, type, vr0.min);
3349 else if (!needs_overflow_infinity (type))
3350 min = TYPE_MAX_VALUE (type);
3351 else if (supports_overflow_infinity (type))
3352 min = positive_overflow_infinity (type);
3353 else
3354 {
3355 set_value_range_to_varying (vr);
3356 return;
3357 }
3358
3359 if (is_overflow_infinity (vr0.max))
3360 max = positive_overflow_infinity (type);
3361 else if (!vrp_val_is_min (vr0.max))
3362 max = fold_unary_to_constant (code, type, vr0.max);
3363 else if (!needs_overflow_infinity (type))
3364 max = TYPE_MAX_VALUE (type);
3365 else if (supports_overflow_infinity (type)
3366 /* We shouldn't generate [+INF, +INF] as set_value_range
3367 doesn't like this and ICEs. */
3368 && !is_positive_overflow_infinity (min))
3369 max = positive_overflow_infinity (type);
3370 else
3371 {
3372 set_value_range_to_varying (vr);
3373 return;
3374 }
3375
3376 cmp = compare_values (min, max);
3377
3378 /* If a VR_ANTI_RANGEs contains zero, then we have
3379 ~[-INF, min(MIN, MAX)]. */
3380 if (vr0.type == VR_ANTI_RANGE)
3381 {
3382 if (range_includes_zero_p (vr0.min, vr0.max) == 1)
3383 {
3384 /* Take the lower of the two values. */
3385 if (cmp != 1)
3386 max = min;
3387
3388 /* Create ~[-INF, min (abs(MIN), abs(MAX))]
3389 or ~[-INF + 1, min (abs(MIN), abs(MAX))] when
3390 flag_wrapv is set and the original anti-range doesn't include
3391 TYPE_MIN_VALUE, remember -TYPE_MIN_VALUE = TYPE_MIN_VALUE. */
3392 if (TYPE_OVERFLOW_WRAPS (type))
3393 {
3394 tree type_min_value = TYPE_MIN_VALUE (type);
3395
3396 min = (vr0.min != type_min_value
3397 ? int_const_binop (PLUS_EXPR, type_min_value,
3398 integer_one_node)
3399 : type_min_value);
3400 }
3401 else
3402 {
3403 if (overflow_infinity_range_p (&vr0))
3404 min = negative_overflow_infinity (type);
3405 else
3406 min = TYPE_MIN_VALUE (type);
3407 }
3408 }
3409 else
3410 {
3411 /* All else has failed, so create the range [0, INF], even for
3412 flag_wrapv since TYPE_MIN_VALUE is in the original
3413 anti-range. */
3414 vr0.type = VR_RANGE;
3415 min = build_int_cst (type, 0);
3416 if (needs_overflow_infinity (type))
3417 {
3418 if (supports_overflow_infinity (type))
3419 max = positive_overflow_infinity (type);
3420 else
3421 {
3422 set_value_range_to_varying (vr);
3423 return;
3424 }
3425 }
3426 else
3427 max = TYPE_MAX_VALUE (type);
3428 }
3429 }
3430
3431 /* If the range contains zero then we know that the minimum value in the
3432 range will be zero. */
3433 else if (range_includes_zero_p (vr0.min, vr0.max) == 1)
3434 {
3435 if (cmp == 1)
3436 max = min;
3437 min = build_int_cst (type, 0);
3438 }
3439 else
3440 {
3441 /* If the range was reversed, swap MIN and MAX. */
3442 if (cmp == 1)
3443 {
3444 tree t = min;
3445 min = max;
3446 max = t;
3447 }
3448 }
3449
3450 cmp = compare_values (min, max);
3451 if (cmp == -2 || cmp == 1)
3452 {
3453 /* If the new range has its limits swapped around (MIN > MAX),
3454 then the operation caused one of them to wrap around, mark
3455 the new range VARYING. */
3456 set_value_range_to_varying (vr);
3457 }
3458 else
3459 set_value_range (vr, vr0.type, min, max, NULL);
3460 return;
3461 }
3462
3463 /* For unhandled operations fall back to varying. */
3464 set_value_range_to_varying (vr);
3465 return;
3466 }
3467
3468
3469 /* Extract range information from a unary expression CODE OP0 based on
3470 the range of its operand with resulting type TYPE.
3471 The resulting range is stored in *VR. */
3472
3473 static void
3474 extract_range_from_unary_expr (value_range_t *vr, enum tree_code code,
3475 tree type, tree op0)
3476 {
3477 value_range_t vr0 = VR_INITIALIZER;
3478
3479 /* Get value ranges for the operand. For constant operands, create
3480 a new value range with the operand to simplify processing. */
3481 if (TREE_CODE (op0) == SSA_NAME)
3482 vr0 = *(get_value_range (op0));
3483 else if (is_gimple_min_invariant (op0))
3484 set_value_range_to_value (&vr0, op0, NULL);
3485 else
3486 set_value_range_to_varying (&vr0);
3487
3488 extract_range_from_unary_expr_1 (vr, code, type, &vr0, TREE_TYPE (op0));
3489 }
3490
3491
3492 /* Extract range information from a conditional expression STMT based on
3493 the ranges of each of its operands and the expression code. */
3494
3495 static void
3496 extract_range_from_cond_expr (value_range_t *vr, gimple stmt)
3497 {
3498 tree op0, op1;
3499 value_range_t vr0 = VR_INITIALIZER;
3500 value_range_t vr1 = VR_INITIALIZER;
3501
3502 /* Get value ranges for each operand. For constant operands, create
3503 a new value range with the operand to simplify processing. */
3504 op0 = gimple_assign_rhs2 (stmt);
3505 if (TREE_CODE (op0) == SSA_NAME)
3506 vr0 = *(get_value_range (op0));
3507 else if (is_gimple_min_invariant (op0))
3508 set_value_range_to_value (&vr0, op0, NULL);
3509 else
3510 set_value_range_to_varying (&vr0);
3511
3512 op1 = gimple_assign_rhs3 (stmt);
3513 if (TREE_CODE (op1) == SSA_NAME)
3514 vr1 = *(get_value_range (op1));
3515 else if (is_gimple_min_invariant (op1))
3516 set_value_range_to_value (&vr1, op1, NULL);
3517 else
3518 set_value_range_to_varying (&vr1);
3519
3520 /* The resulting value range is the union of the operand ranges */
3521 copy_value_range (vr, &vr0);
3522 vrp_meet (vr, &vr1);
3523 }
3524
3525
3526 /* Extract range information from a comparison expression EXPR based
3527 on the range of its operand and the expression code. */
3528
3529 static void
3530 extract_range_from_comparison (value_range_t *vr, enum tree_code code,
3531 tree type, tree op0, tree op1)
3532 {
3533 bool sop = false;
3534 tree val;
3535
3536 val = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, false, &sop,
3537 NULL);
3538
3539 /* A disadvantage of using a special infinity as an overflow
3540 representation is that we lose the ability to record overflow
3541 when we don't have an infinity. So we have to ignore a result
3542 which relies on overflow. */
3543
3544 if (val && !is_overflow_infinity (val) && !sop)
3545 {
3546 /* Since this expression was found on the RHS of an assignment,
3547 its type may be different from _Bool. Convert VAL to EXPR's
3548 type. */
3549 val = fold_convert (type, val);
3550 if (is_gimple_min_invariant (val))
3551 set_value_range_to_value (vr, val, vr->equiv);
3552 else
3553 set_value_range (vr, VR_RANGE, val, val, vr->equiv);
3554 }
3555 else
3556 /* The result of a comparison is always true or false. */
3557 set_value_range_to_truthvalue (vr, type);
3558 }
3559
3560 /* Try to derive a nonnegative or nonzero range out of STMT relying
3561 primarily on generic routines in fold in conjunction with range data.
3562 Store the result in *VR */
3563
3564 static void
3565 extract_range_basic (value_range_t *vr, gimple stmt)
3566 {
3567 bool sop = false;
3568 tree type = gimple_expr_type (stmt);
3569
3570 if (gimple_call_builtin_p (stmt, BUILT_IN_NORMAL))
3571 {
3572 tree fndecl = gimple_call_fndecl (stmt), arg;
3573 int mini, maxi, zerov = 0, prec;
3574
3575 switch (DECL_FUNCTION_CODE (fndecl))
3576 {
3577 case BUILT_IN_CONSTANT_P:
3578 /* If the call is __builtin_constant_p and the argument is a
3579 function parameter resolve it to false. This avoids bogus
3580 array bound warnings.
3581 ??? We could do this as early as inlining is finished. */
3582 arg = gimple_call_arg (stmt, 0);
3583 if (TREE_CODE (arg) == SSA_NAME
3584 && SSA_NAME_IS_DEFAULT_DEF (arg)
3585 && TREE_CODE (SSA_NAME_VAR (arg)) == PARM_DECL)
3586 {
3587 set_value_range_to_null (vr, type);
3588 return;
3589 }
3590 break;
3591 /* Both __builtin_ffs* and __builtin_popcount return
3592 [0, prec]. */
3593 CASE_INT_FN (BUILT_IN_FFS):
3594 CASE_INT_FN (BUILT_IN_POPCOUNT):
3595 arg = gimple_call_arg (stmt, 0);
3596 prec = TYPE_PRECISION (TREE_TYPE (arg));
3597 mini = 0;
3598 maxi = prec;
3599 if (TREE_CODE (arg) == SSA_NAME)
3600 {
3601 value_range_t *vr0 = get_value_range (arg);
3602 /* If arg is non-zero, then ffs or popcount
3603 are non-zero. */
3604 if (((vr0->type == VR_RANGE
3605 && integer_nonzerop (vr0->min))
3606 || (vr0->type == VR_ANTI_RANGE
3607 && integer_zerop (vr0->min)))
3608 && !is_overflow_infinity (vr0->min))
3609 mini = 1;
3610 /* If some high bits are known to be zero,
3611 we can decrease the maximum. */
3612 if (vr0->type == VR_RANGE
3613 && TREE_CODE (vr0->max) == INTEGER_CST
3614 && !is_overflow_infinity (vr0->max))
3615 maxi = tree_floor_log2 (vr0->max) + 1;
3616 }
3617 goto bitop_builtin;
3618 /* __builtin_parity* returns [0, 1]. */
3619 CASE_INT_FN (BUILT_IN_PARITY):
3620 mini = 0;
3621 maxi = 1;
3622 goto bitop_builtin;
3623 /* __builtin_c[lt]z* return [0, prec-1], except for
3624 when the argument is 0, but that is undefined behavior.
3625 On many targets where the CLZ RTL or optab value is defined
3626 for 0 the value is prec, so include that in the range
3627 by default. */
3628 CASE_INT_FN (BUILT_IN_CLZ):
3629 arg = gimple_call_arg (stmt, 0);
3630 prec = TYPE_PRECISION (TREE_TYPE (arg));
3631 mini = 0;
3632 maxi = prec;
3633 if (optab_handler (clz_optab, TYPE_MODE (TREE_TYPE (arg)))
3634 != CODE_FOR_nothing
3635 && CLZ_DEFINED_VALUE_AT_ZERO (TYPE_MODE (TREE_TYPE (arg)),
3636 zerov)
3637 /* Handle only the single common value. */
3638 && zerov != prec)
3639 /* Magic value to give up, unless vr0 proves
3640 arg is non-zero. */
3641 mini = -2;
3642 if (TREE_CODE (arg) == SSA_NAME)
3643 {
3644 value_range_t *vr0 = get_value_range (arg);
3645 /* From clz of VR_RANGE minimum we can compute
3646 result maximum. */
3647 if (vr0->type == VR_RANGE
3648 && TREE_CODE (vr0->min) == INTEGER_CST
3649 && !is_overflow_infinity (vr0->min))
3650 {
3651 maxi = prec - 1 - tree_floor_log2 (vr0->min);
3652 if (maxi != prec)
3653 mini = 0;
3654 }
3655 else if (vr0->type == VR_ANTI_RANGE
3656 && integer_zerop (vr0->min)
3657 && !is_overflow_infinity (vr0->min))
3658 {
3659 maxi = prec - 1;
3660 mini = 0;
3661 }
3662 if (mini == -2)
3663 break;
3664 /* From clz of VR_RANGE maximum we can compute
3665 result minimum. */
3666 if (vr0->type == VR_RANGE
3667 && TREE_CODE (vr0->max) == INTEGER_CST
3668 && !is_overflow_infinity (vr0->max))
3669 {
3670 mini = prec - 1 - tree_floor_log2 (vr0->max);
3671 if (mini == prec)
3672 break;
3673 }
3674 }
3675 if (mini == -2)
3676 break;
3677 goto bitop_builtin;
3678 /* __builtin_ctz* return [0, prec-1], except for
3679 when the argument is 0, but that is undefined behavior.
3680 If there is a ctz optab for this mode and
3681 CTZ_DEFINED_VALUE_AT_ZERO, include that in the range,
3682 otherwise just assume 0 won't be seen. */
3683 CASE_INT_FN (BUILT_IN_CTZ):
3684 arg = gimple_call_arg (stmt, 0);
3685 prec = TYPE_PRECISION (TREE_TYPE (arg));
3686 mini = 0;
3687 maxi = prec - 1;
3688 if (optab_handler (ctz_optab, TYPE_MODE (TREE_TYPE (arg)))
3689 != CODE_FOR_nothing
3690 && CTZ_DEFINED_VALUE_AT_ZERO (TYPE_MODE (TREE_TYPE (arg)),
3691 zerov))
3692 {
3693 /* Handle only the two common values. */
3694 if (zerov == -1)
3695 mini = -1;
3696 else if (zerov == prec)
3697 maxi = prec;
3698 else
3699 /* Magic value to give up, unless vr0 proves
3700 arg is non-zero. */
3701 mini = -2;
3702 }
3703 if (TREE_CODE (arg) == SSA_NAME)
3704 {
3705 value_range_t *vr0 = get_value_range (arg);
3706 /* If arg is non-zero, then use [0, prec - 1]. */
3707 if (((vr0->type == VR_RANGE
3708 && integer_nonzerop (vr0->min))
3709 || (vr0->type == VR_ANTI_RANGE
3710 && integer_zerop (vr0->min)))
3711 && !is_overflow_infinity (vr0->min))
3712 {
3713 mini = 0;
3714 maxi = prec - 1;
3715 }
3716 /* If some high bits are known to be zero,
3717 we can decrease the result maximum. */
3718 if (vr0->type == VR_RANGE
3719 && TREE_CODE (vr0->max) == INTEGER_CST
3720 && !is_overflow_infinity (vr0->max))
3721 {
3722 maxi = tree_floor_log2 (vr0->max);
3723 /* For vr0 [0, 0] give up. */
3724 if (maxi == -1)
3725 break;
3726 }
3727 }
3728 if (mini == -2)
3729 break;
3730 goto bitop_builtin;
3731 /* __builtin_clrsb* returns [0, prec-1]. */
3732 CASE_INT_FN (BUILT_IN_CLRSB):
3733 arg = gimple_call_arg (stmt, 0);
3734 prec = TYPE_PRECISION (TREE_TYPE (arg));
3735 mini = 0;
3736 maxi = prec - 1;
3737 goto bitop_builtin;
3738 bitop_builtin:
3739 set_value_range (vr, VR_RANGE, build_int_cst (type, mini),
3740 build_int_cst (type, maxi), NULL);
3741 return;
3742 default:
3743 break;
3744 }
3745 }
3746 if (INTEGRAL_TYPE_P (type)
3747 && gimple_stmt_nonnegative_warnv_p (stmt, &sop))
3748 set_value_range_to_nonnegative (vr, type,
3749 sop || stmt_overflow_infinity (stmt));
3750 else if (vrp_stmt_computes_nonzero (stmt, &sop)
3751 && !sop)
3752 set_value_range_to_nonnull (vr, type);
3753 else
3754 set_value_range_to_varying (vr);
3755 }
3756
3757
3758 /* Try to compute a useful range out of assignment STMT and store it
3759 in *VR. */
3760
3761 static void
3762 extract_range_from_assignment (value_range_t *vr, gimple stmt)
3763 {
3764 enum tree_code code = gimple_assign_rhs_code (stmt);
3765
3766 if (code == ASSERT_EXPR)
3767 extract_range_from_assert (vr, gimple_assign_rhs1 (stmt));
3768 else if (code == SSA_NAME)
3769 extract_range_from_ssa_name (vr, gimple_assign_rhs1 (stmt));
3770 else if (TREE_CODE_CLASS (code) == tcc_binary)
3771 extract_range_from_binary_expr (vr, gimple_assign_rhs_code (stmt),
3772 gimple_expr_type (stmt),
3773 gimple_assign_rhs1 (stmt),
3774 gimple_assign_rhs2 (stmt));
3775 else if (TREE_CODE_CLASS (code) == tcc_unary)
3776 extract_range_from_unary_expr (vr, gimple_assign_rhs_code (stmt),
3777 gimple_expr_type (stmt),
3778 gimple_assign_rhs1 (stmt));
3779 else if (code == COND_EXPR)
3780 extract_range_from_cond_expr (vr, stmt);
3781 else if (TREE_CODE_CLASS (code) == tcc_comparison)
3782 extract_range_from_comparison (vr, gimple_assign_rhs_code (stmt),
3783 gimple_expr_type (stmt),
3784 gimple_assign_rhs1 (stmt),
3785 gimple_assign_rhs2 (stmt));
3786 else if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS
3787 && is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
3788 set_value_range_to_value (vr, gimple_assign_rhs1 (stmt), NULL);
3789 else
3790 set_value_range_to_varying (vr);
3791
3792 if (vr->type == VR_VARYING)
3793 extract_range_basic (vr, stmt);
3794 }
3795
3796 /* Given a range VR, a LOOP and a variable VAR, determine whether it
3797 would be profitable to adjust VR using scalar evolution information
3798 for VAR. If so, update VR with the new limits. */
3799
3800 static void
3801 adjust_range_with_scev (value_range_t *vr, struct loop *loop,
3802 gimple stmt, tree var)
3803 {
3804 tree init, step, chrec, tmin, tmax, min, max, type, tem;
3805 enum ev_direction dir;
3806
3807 /* TODO. Don't adjust anti-ranges. An anti-range may provide
3808 better opportunities than a regular range, but I'm not sure. */
3809 if (vr->type == VR_ANTI_RANGE)
3810 return;
3811
3812 chrec = instantiate_parameters (loop, analyze_scalar_evolution (loop, var));
3813
3814 /* Like in PR19590, scev can return a constant function. */
3815 if (is_gimple_min_invariant (chrec))
3816 {
3817 set_value_range_to_value (vr, chrec, vr->equiv);
3818 return;
3819 }
3820
3821 if (TREE_CODE (chrec) != POLYNOMIAL_CHREC)
3822 return;
3823
3824 init = initial_condition_in_loop_num (chrec, loop->num);
3825 tem = op_with_constant_singleton_value_range (init);
3826 if (tem)
3827 init = tem;
3828 step = evolution_part_in_loop_num (chrec, loop->num);
3829 tem = op_with_constant_singleton_value_range (step);
3830 if (tem)
3831 step = tem;
3832
3833 /* If STEP is symbolic, we can't know whether INIT will be the
3834 minimum or maximum value in the range. Also, unless INIT is
3835 a simple expression, compare_values and possibly other functions
3836 in tree-vrp won't be able to handle it. */
3837 if (step == NULL_TREE
3838 || !is_gimple_min_invariant (step)
3839 || !valid_value_p (init))
3840 return;
3841
3842 dir = scev_direction (chrec);
3843 if (/* Do not adjust ranges if we do not know whether the iv increases
3844 or decreases, ... */
3845 dir == EV_DIR_UNKNOWN
3846 /* ... or if it may wrap. */
3847 || scev_probably_wraps_p (init, step, stmt, get_chrec_loop (chrec),
3848 true))
3849 return;
3850
3851 /* We use TYPE_MIN_VALUE and TYPE_MAX_VALUE here instead of
3852 negative_overflow_infinity and positive_overflow_infinity,
3853 because we have concluded that the loop probably does not
3854 wrap. */
3855
3856 type = TREE_TYPE (var);
3857 if (POINTER_TYPE_P (type) || !TYPE_MIN_VALUE (type))
3858 tmin = lower_bound_in_type (type, type);
3859 else
3860 tmin = TYPE_MIN_VALUE (type);
3861 if (POINTER_TYPE_P (type) || !TYPE_MAX_VALUE (type))
3862 tmax = upper_bound_in_type (type, type);
3863 else
3864 tmax = TYPE_MAX_VALUE (type);
3865
3866 /* Try to use estimated number of iterations for the loop to constrain the
3867 final value in the evolution. */
3868 if (TREE_CODE (step) == INTEGER_CST
3869 && is_gimple_val (init)
3870 && (TREE_CODE (init) != SSA_NAME
3871 || get_value_range (init)->type == VR_RANGE))
3872 {
3873 double_int nit;
3874
3875 /* We are only entering here for loop header PHI nodes, so using
3876 the number of latch executions is the correct thing to use. */
3877 if (max_loop_iterations (loop, &nit))
3878 {
3879 value_range_t maxvr = VR_INITIALIZER;
3880 double_int dtmp;
3881 bool unsigned_p = TYPE_UNSIGNED (TREE_TYPE (step));
3882 bool overflow = false;
3883
3884 dtmp = tree_to_double_int (step)
3885 .mul_with_sign (nit, unsigned_p, &overflow);
3886 /* If the multiplication overflowed we can't do a meaningful
3887 adjustment. Likewise if the result doesn't fit in the type
3888 of the induction variable. For a signed type we have to
3889 check whether the result has the expected signedness which
3890 is that of the step as number of iterations is unsigned. */
3891 if (!overflow
3892 && double_int_fits_to_tree_p (TREE_TYPE (init), dtmp)
3893 && (unsigned_p
3894 || ((dtmp.high ^ TREE_INT_CST_HIGH (step)) >= 0)))
3895 {
3896 tem = double_int_to_tree (TREE_TYPE (init), dtmp);
3897 extract_range_from_binary_expr (&maxvr, PLUS_EXPR,
3898 TREE_TYPE (init), init, tem);
3899 /* Likewise if the addition did. */
3900 if (maxvr.type == VR_RANGE)
3901 {
3902 tmin = maxvr.min;
3903 tmax = maxvr.max;
3904 }
3905 }
3906 }
3907 }
3908
3909 if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED)
3910 {
3911 min = tmin;
3912 max = tmax;
3913
3914 /* For VARYING or UNDEFINED ranges, just about anything we get
3915 from scalar evolutions should be better. */
3916
3917 if (dir == EV_DIR_DECREASES)
3918 max = init;
3919 else
3920 min = init;
3921
3922 /* If we would create an invalid range, then just assume we
3923 know absolutely nothing. This may be over-conservative,
3924 but it's clearly safe, and should happen only in unreachable
3925 parts of code, or for invalid programs. */
3926 if (compare_values (min, max) == 1)
3927 return;
3928
3929 set_value_range (vr, VR_RANGE, min, max, vr->equiv);
3930 }
3931 else if (vr->type == VR_RANGE)
3932 {
3933 min = vr->min;
3934 max = vr->max;
3935
3936 if (dir == EV_DIR_DECREASES)
3937 {
3938 /* INIT is the maximum value. If INIT is lower than VR->MAX
3939 but no smaller than VR->MIN, set VR->MAX to INIT. */
3940 if (compare_values (init, max) == -1)
3941 max = init;
3942
3943 /* According to the loop information, the variable does not
3944 overflow. If we think it does, probably because of an
3945 overflow due to arithmetic on a different INF value,
3946 reset now. */
3947 if (is_negative_overflow_infinity (min)
3948 || compare_values (min, tmin) == -1)
3949 min = tmin;
3950
3951 }
3952 else
3953 {
3954 /* If INIT is bigger than VR->MIN, set VR->MIN to INIT. */
3955 if (compare_values (init, min) == 1)
3956 min = init;
3957
3958 if (is_positive_overflow_infinity (max)
3959 || compare_values (tmax, max) == -1)
3960 max = tmax;
3961 }
3962
3963 /* If we just created an invalid range with the minimum
3964 greater than the maximum, we fail conservatively.
3965 This should happen only in unreachable
3966 parts of code, or for invalid programs. */
3967 if (compare_values (min, max) == 1)
3968 return;
3969
3970 set_value_range (vr, VR_RANGE, min, max, vr->equiv);
3971 }
3972 }
3973
3974 /* Return true if VAR may overflow at STMT. This checks any available
3975 loop information to see if we can determine that VAR does not
3976 overflow. */
3977
3978 static bool
3979 vrp_var_may_overflow (tree var, gimple stmt)
3980 {
3981 struct loop *l;
3982 tree chrec, init, step;
3983
3984 if (current_loops == NULL)
3985 return true;
3986
3987 l = loop_containing_stmt (stmt);
3988 if (l == NULL
3989 || !loop_outer (l))
3990 return true;
3991
3992 chrec = instantiate_parameters (l, analyze_scalar_evolution (l, var));
3993 if (TREE_CODE (chrec) != POLYNOMIAL_CHREC)
3994 return true;
3995
3996 init = initial_condition_in_loop_num (chrec, l->num);
3997 step = evolution_part_in_loop_num (chrec, l->num);
3998
3999 if (step == NULL_TREE
4000 || !is_gimple_min_invariant (step)
4001 || !valid_value_p (init))
4002 return true;
4003
4004 /* If we get here, we know something useful about VAR based on the
4005 loop information. If it wraps, it may overflow. */
4006
4007 if (scev_probably_wraps_p (init, step, stmt, get_chrec_loop (chrec),
4008 true))
4009 return true;
4010
4011 if (dump_file && (dump_flags & TDF_DETAILS) != 0)
4012 {
4013 print_generic_expr (dump_file, var, 0);
4014 fprintf (dump_file, ": loop information indicates does not overflow\n");
4015 }
4016
4017 return false;
4018 }
4019
4020
4021 /* Given two numeric value ranges VR0, VR1 and a comparison code COMP:
4022
4023 - Return BOOLEAN_TRUE_NODE if VR0 COMP VR1 always returns true for
4024 all the values in the ranges.
4025
4026 - Return BOOLEAN_FALSE_NODE if the comparison always returns false.
4027
4028 - Return NULL_TREE if it is not always possible to determine the
4029 value of the comparison.
4030
4031 Also set *STRICT_OVERFLOW_P to indicate whether a range with an
4032 overflow infinity was used in the test. */
4033
4034
4035 static tree
4036 compare_ranges (enum tree_code comp, value_range_t *vr0, value_range_t *vr1,
4037 bool *strict_overflow_p)
4038 {
4039 /* VARYING or UNDEFINED ranges cannot be compared. */
4040 if (vr0->type == VR_VARYING
4041 || vr0->type == VR_UNDEFINED
4042 || vr1->type == VR_VARYING
4043 || vr1->type == VR_UNDEFINED)
4044 return NULL_TREE;
4045
4046 /* Anti-ranges need to be handled separately. */
4047 if (vr0->type == VR_ANTI_RANGE || vr1->type == VR_ANTI_RANGE)
4048 {
4049 /* If both are anti-ranges, then we cannot compute any
4050 comparison. */
4051 if (vr0->type == VR_ANTI_RANGE && vr1->type == VR_ANTI_RANGE)
4052 return NULL_TREE;
4053
4054 /* These comparisons are never statically computable. */
4055 if (comp == GT_EXPR
4056 || comp == GE_EXPR
4057 || comp == LT_EXPR
4058 || comp == LE_EXPR)
4059 return NULL_TREE;
4060
4061 /* Equality can be computed only between a range and an
4062 anti-range. ~[VAL1, VAL2] == [VAL1, VAL2] is always false. */
4063 if (vr0->type == VR_RANGE)
4064 {
4065 /* To simplify processing, make VR0 the anti-range. */
4066 value_range_t *tmp = vr0;
4067 vr0 = vr1;
4068 vr1 = tmp;
4069 }
4070
4071 gcc_assert (comp == NE_EXPR || comp == EQ_EXPR);
4072
4073 if (compare_values_warnv (vr0->min, vr1->min, strict_overflow_p) == 0
4074 && compare_values_warnv (vr0->max, vr1->max, strict_overflow_p) == 0)
4075 return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node;
4076
4077 return NULL_TREE;
4078 }
4079
4080 if (!usable_range_p (vr0, strict_overflow_p)
4081 || !usable_range_p (vr1, strict_overflow_p))
4082 return NULL_TREE;
4083
4084 /* Simplify processing. If COMP is GT_EXPR or GE_EXPR, switch the
4085 operands around and change the comparison code. */
4086 if (comp == GT_EXPR || comp == GE_EXPR)
4087 {
4088 value_range_t *tmp;
4089 comp = (comp == GT_EXPR) ? LT_EXPR : LE_EXPR;
4090 tmp = vr0;
4091 vr0 = vr1;
4092 vr1 = tmp;
4093 }
4094
4095 if (comp == EQ_EXPR)
4096 {
4097 /* Equality may only be computed if both ranges represent
4098 exactly one value. */
4099 if (compare_values_warnv (vr0->min, vr0->max, strict_overflow_p) == 0
4100 && compare_values_warnv (vr1->min, vr1->max, strict_overflow_p) == 0)
4101 {
4102 int cmp_min = compare_values_warnv (vr0->min, vr1->min,
4103 strict_overflow_p);
4104 int cmp_max = compare_values_warnv (vr0->max, vr1->max,
4105 strict_overflow_p);
4106 if (cmp_min == 0 && cmp_max == 0)
4107 return boolean_true_node;
4108 else if (cmp_min != -2 && cmp_max != -2)
4109 return boolean_false_node;
4110 }
4111 /* If [V0_MIN, V1_MAX] < [V1_MIN, V1_MAX] then V0 != V1. */
4112 else if (compare_values_warnv (vr0->min, vr1->max,
4113 strict_overflow_p) == 1
4114 || compare_values_warnv (vr1->min, vr0->max,
4115 strict_overflow_p) == 1)
4116 return boolean_false_node;
4117
4118 return NULL_TREE;
4119 }
4120 else if (comp == NE_EXPR)
4121 {
4122 int cmp1, cmp2;
4123
4124 /* If VR0 is completely to the left or completely to the right
4125 of VR1, they are always different. Notice that we need to
4126 make sure that both comparisons yield similar results to
4127 avoid comparing values that cannot be compared at
4128 compile-time. */
4129 cmp1 = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p);
4130 cmp2 = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p);
4131 if ((cmp1 == -1 && cmp2 == -1) || (cmp1 == 1 && cmp2 == 1))
4132 return boolean_true_node;
4133
4134 /* If VR0 and VR1 represent a single value and are identical,
4135 return false. */
4136 else if (compare_values_warnv (vr0->min, vr0->max,
4137 strict_overflow_p) == 0
4138 && compare_values_warnv (vr1->min, vr1->max,
4139 strict_overflow_p) == 0
4140 && compare_values_warnv (vr0->min, vr1->min,
4141 strict_overflow_p) == 0
4142 && compare_values_warnv (vr0->max, vr1->max,
4143 strict_overflow_p) == 0)
4144 return boolean_false_node;
4145
4146 /* Otherwise, they may or may not be different. */
4147 else
4148 return NULL_TREE;
4149 }
4150 else if (comp == LT_EXPR || comp == LE_EXPR)
4151 {
4152 int tst;
4153
4154 /* If VR0 is to the left of VR1, return true. */
4155 tst = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p);
4156 if ((comp == LT_EXPR && tst == -1)
4157 || (comp == LE_EXPR && (tst == -1 || tst == 0)))
4158 {
4159 if (overflow_infinity_range_p (vr0)
4160 || overflow_infinity_range_p (vr1))
4161 *strict_overflow_p = true;
4162 return boolean_true_node;
4163 }
4164
4165 /* If VR0 is to the right of VR1, return false. */
4166 tst = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p);
4167 if ((comp == LT_EXPR && (tst == 0 || tst == 1))
4168 || (comp == LE_EXPR && tst == 1))
4169 {
4170 if (overflow_infinity_range_p (vr0)
4171 || overflow_infinity_range_p (vr1))
4172 *strict_overflow_p = true;
4173 return boolean_false_node;
4174 }
4175
4176 /* Otherwise, we don't know. */
4177 return NULL_TREE;
4178 }
4179
4180 gcc_unreachable ();
4181 }
4182
4183
4184 /* Given a value range VR, a value VAL and a comparison code COMP, return
4185 BOOLEAN_TRUE_NODE if VR COMP VAL always returns true for all the
4186 values in VR. Return BOOLEAN_FALSE_NODE if the comparison
4187 always returns false. Return NULL_TREE if it is not always
4188 possible to determine the value of the comparison. Also set
4189 *STRICT_OVERFLOW_P to indicate whether a range with an overflow
4190 infinity was used in the test. */
4191
4192 static tree
4193 compare_range_with_value (enum tree_code comp, value_range_t *vr, tree val,
4194 bool *strict_overflow_p)
4195 {
4196 if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED)
4197 return NULL_TREE;
4198
4199 /* Anti-ranges need to be handled separately. */
4200 if (vr->type == VR_ANTI_RANGE)
4201 {
4202 /* For anti-ranges, the only predicates that we can compute at
4203 compile time are equality and inequality. */
4204 if (comp == GT_EXPR
4205 || comp == GE_EXPR
4206 || comp == LT_EXPR
4207 || comp == LE_EXPR)
4208 return NULL_TREE;
4209
4210 /* ~[VAL_1, VAL_2] OP VAL is known if VAL_1 <= VAL <= VAL_2. */
4211 if (value_inside_range (val, vr->min, vr->max) == 1)
4212 return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node;
4213
4214 return NULL_TREE;
4215 }
4216
4217 if (!usable_range_p (vr, strict_overflow_p))
4218 return NULL_TREE;
4219
4220 if (comp == EQ_EXPR)
4221 {
4222 /* EQ_EXPR may only be computed if VR represents exactly
4223 one value. */
4224 if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0)
4225 {
4226 int cmp = compare_values_warnv (vr->min, val, strict_overflow_p);
4227 if (cmp == 0)
4228 return boolean_true_node;
4229 else if (cmp == -1 || cmp == 1 || cmp == 2)
4230 return boolean_false_node;
4231 }
4232 else if (compare_values_warnv (val, vr->min, strict_overflow_p) == -1
4233 || compare_values_warnv (vr->max, val, strict_overflow_p) == -1)
4234 return boolean_false_node;
4235
4236 return NULL_TREE;
4237 }
4238 else if (comp == NE_EXPR)
4239 {
4240 /* If VAL is not inside VR, then they are always different. */
4241 if (compare_values_warnv (vr->max, val, strict_overflow_p) == -1
4242 || compare_values_warnv (vr->min, val, strict_overflow_p) == 1)
4243 return boolean_true_node;
4244
4245 /* If VR represents exactly one value equal to VAL, then return
4246 false. */
4247 if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0
4248 && compare_values_warnv (vr->min, val, strict_overflow_p) == 0)
4249 return boolean_false_node;
4250
4251 /* Otherwise, they may or may not be different. */
4252 return NULL_TREE;
4253 }
4254 else if (comp == LT_EXPR || comp == LE_EXPR)
4255 {
4256 int tst;
4257
4258 /* If VR is to the left of VAL, return true. */
4259 tst = compare_values_warnv (vr->max, val, strict_overflow_p);
4260 if ((comp == LT_EXPR && tst == -1)
4261 || (comp == LE_EXPR && (tst == -1 || tst == 0)))
4262 {
4263 if (overflow_infinity_range_p (vr))
4264 *strict_overflow_p = true;
4265 return boolean_true_node;
4266 }
4267
4268 /* If VR is to the right of VAL, return false. */
4269 tst = compare_values_warnv (vr->min, val, strict_overflow_p);
4270 if ((comp == LT_EXPR && (tst == 0 || tst == 1))
4271 || (comp == LE_EXPR && tst == 1))
4272 {
4273 if (overflow_infinity_range_p (vr))
4274 *strict_overflow_p = true;
4275 return boolean_false_node;
4276 }
4277
4278 /* Otherwise, we don't know. */
4279 return NULL_TREE;
4280 }
4281 else if (comp == GT_EXPR || comp == GE_EXPR)
4282 {
4283 int tst;
4284
4285 /* If VR is to the right of VAL, return true. */
4286 tst = compare_values_warnv (vr->min, val, strict_overflow_p);
4287 if ((comp == GT_EXPR && tst == 1)
4288 || (comp == GE_EXPR && (tst == 0 || tst == 1)))
4289 {
4290 if (overflow_infinity_range_p (vr))
4291 *strict_overflow_p = true;
4292 return boolean_true_node;
4293 }
4294
4295 /* If VR is to the left of VAL, return false. */
4296 tst = compare_values_warnv (vr->max, val, strict_overflow_p);
4297 if ((comp == GT_EXPR && (tst == -1 || tst == 0))
4298 || (comp == GE_EXPR && tst == -1))
4299 {
4300 if (overflow_infinity_range_p (vr))
4301 *strict_overflow_p = true;
4302 return boolean_false_node;
4303 }
4304
4305 /* Otherwise, we don't know. */
4306 return NULL_TREE;
4307 }
4308
4309 gcc_unreachable ();
4310 }
4311
4312
4313 /* Debugging dumps. */
4314
4315 void dump_value_range (FILE *, value_range_t *);
4316 void debug_value_range (value_range_t *);
4317 void dump_all_value_ranges (FILE *);
4318 void debug_all_value_ranges (void);
4319 void dump_vr_equiv (FILE *, bitmap);
4320 void debug_vr_equiv (bitmap);
4321
4322
4323 /* Dump value range VR to FILE. */
4324
4325 void
4326 dump_value_range (FILE *file, value_range_t *vr)
4327 {
4328 if (vr == NULL)
4329 fprintf (file, "[]");
4330 else if (vr->type == VR_UNDEFINED)
4331 fprintf (file, "UNDEFINED");
4332 else if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
4333 {
4334 tree type = TREE_TYPE (vr->min);
4335
4336 fprintf (file, "%s[", (vr->type == VR_ANTI_RANGE) ? "~" : "");
4337
4338 if (is_negative_overflow_infinity (vr->min))
4339 fprintf (file, "-INF(OVF)");
4340 else if (INTEGRAL_TYPE_P (type)
4341 && !TYPE_UNSIGNED (type)
4342 && vrp_val_is_min (vr->min))
4343 fprintf (file, "-INF");
4344 else
4345 print_generic_expr (file, vr->min, 0);
4346
4347 fprintf (file, ", ");
4348
4349 if (is_positive_overflow_infinity (vr->max))
4350 fprintf (file, "+INF(OVF)");
4351 else if (INTEGRAL_TYPE_P (type)
4352 && vrp_val_is_max (vr->max))
4353 fprintf (file, "+INF");
4354 else
4355 print_generic_expr (file, vr->max, 0);
4356
4357 fprintf (file, "]");
4358
4359 if (vr->equiv)
4360 {
4361 bitmap_iterator bi;
4362 unsigned i, c = 0;
4363
4364 fprintf (file, " EQUIVALENCES: { ");
4365
4366 EXECUTE_IF_SET_IN_BITMAP (vr->equiv, 0, i, bi)
4367 {
4368 print_generic_expr (file, ssa_name (i), 0);
4369 fprintf (file, " ");
4370 c++;
4371 }
4372
4373 fprintf (file, "} (%u elements)", c);
4374 }
4375 }
4376 else if (vr->type == VR_VARYING)
4377 fprintf (file, "VARYING");
4378 else
4379 fprintf (file, "INVALID RANGE");
4380 }
4381
4382
4383 /* Dump value range VR to stderr. */
4384
4385 DEBUG_FUNCTION void
4386 debug_value_range (value_range_t *vr)
4387 {
4388 dump_value_range (stderr, vr);
4389 fprintf (stderr, "\n");
4390 }
4391
4392
4393 /* Dump value ranges of all SSA_NAMEs to FILE. */
4394
4395 void
4396 dump_all_value_ranges (FILE *file)
4397 {
4398 size_t i;
4399
4400 for (i = 0; i < num_vr_values; i++)
4401 {
4402 if (vr_value[i])
4403 {
4404 print_generic_expr (file, ssa_name (i), 0);
4405 fprintf (file, ": ");
4406 dump_value_range (file, vr_value[i]);
4407 fprintf (file, "\n");
4408 }
4409 }
4410
4411 fprintf (file, "\n");
4412 }
4413
4414
4415 /* Dump all value ranges to stderr. */
4416
4417 DEBUG_FUNCTION void
4418 debug_all_value_ranges (void)
4419 {
4420 dump_all_value_ranges (stderr);
4421 }
4422
4423
4424 /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
4425 create a new SSA name N and return the assertion assignment
4426 'V = ASSERT_EXPR <V, V OP W>'. */
4427
4428 static gimple
4429 build_assert_expr_for (tree cond, tree v)
4430 {
4431 tree a;
4432 gimple assertion;
4433
4434 gcc_assert (TREE_CODE (v) == SSA_NAME
4435 && COMPARISON_CLASS_P (cond));
4436
4437 a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond);
4438 assertion = gimple_build_assign (NULL_TREE, a);
4439
4440 /* The new ASSERT_EXPR, creates a new SSA name that replaces the
4441 operand of the ASSERT_EXPR. Create it so the new name and the old one
4442 are registered in the replacement table so that we can fix the SSA web
4443 after adding all the ASSERT_EXPRs. */
4444 create_new_def_for (v, assertion, NULL);
4445
4446 return assertion;
4447 }
4448
4449
4450 /* Return false if EXPR is a predicate expression involving floating
4451 point values. */
4452
4453 static inline bool
4454 fp_predicate (gimple stmt)
4455 {
4456 GIMPLE_CHECK (stmt, GIMPLE_COND);
4457
4458 return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)));
4459 }
4460
4461 /* If the range of values taken by OP can be inferred after STMT executes,
4462 return the comparison code (COMP_CODE_P) and value (VAL_P) that
4463 describes the inferred range. Return true if a range could be
4464 inferred. */
4465
4466 static bool
4467 infer_value_range (gimple stmt, tree op, enum tree_code *comp_code_p, tree *val_p)
4468 {
4469 *val_p = NULL_TREE;
4470 *comp_code_p = ERROR_MARK;
4471
4472 /* Do not attempt to infer anything in names that flow through
4473 abnormal edges. */
4474 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op))
4475 return false;
4476
4477 /* Similarly, don't infer anything from statements that may throw
4478 exceptions. ??? Relax this requirement? */
4479 if (stmt_could_throw_p (stmt))
4480 return false;
4481
4482 /* If STMT is the last statement of a basic block with no
4483 successors, there is no point inferring anything about any of its
4484 operands. We would not be able to find a proper insertion point
4485 for the assertion, anyway. */
4486 if (stmt_ends_bb_p (stmt) && EDGE_COUNT (gimple_bb (stmt)->succs) == 0)
4487 return false;
4488
4489 if (infer_nonnull_range (stmt, op))
4490 {
4491 *val_p = build_int_cst (TREE_TYPE (op), 0);
4492 *comp_code_p = NE_EXPR;
4493 return true;
4494 }
4495
4496 return false;
4497 }
4498
4499
4500 void dump_asserts_for (FILE *, tree);
4501 void debug_asserts_for (tree);
4502 void dump_all_asserts (FILE *);
4503 void debug_all_asserts (void);
4504
4505 /* Dump all the registered assertions for NAME to FILE. */
4506
4507 void
4508 dump_asserts_for (FILE *file, tree name)
4509 {
4510 assert_locus_t loc;
4511
4512 fprintf (file, "Assertions to be inserted for ");
4513 print_generic_expr (file, name, 0);
4514 fprintf (file, "\n");
4515
4516 loc = asserts_for[SSA_NAME_VERSION (name)];
4517 while (loc)
4518 {
4519 fprintf (file, "\t");
4520 print_gimple_stmt (file, gsi_stmt (loc->si), 0, 0);
4521 fprintf (file, "\n\tBB #%d", loc->bb->index);
4522 if (loc->e)
4523 {
4524 fprintf (file, "\n\tEDGE %d->%d", loc->e->src->index,
4525 loc->e->dest->index);
4526 dump_edge_info (file, loc->e, dump_flags, 0);
4527 }
4528 fprintf (file, "\n\tPREDICATE: ");
4529 print_generic_expr (file, name, 0);
4530 fprintf (file, " %s ", get_tree_code_name (loc->comp_code));
4531 print_generic_expr (file, loc->val, 0);
4532 fprintf (file, "\n\n");
4533 loc = loc->next;
4534 }
4535
4536 fprintf (file, "\n");
4537 }
4538
4539
4540 /* Dump all the registered assertions for NAME to stderr. */
4541
4542 DEBUG_FUNCTION void
4543 debug_asserts_for (tree name)
4544 {
4545 dump_asserts_for (stderr, name);
4546 }
4547
4548
4549 /* Dump all the registered assertions for all the names to FILE. */
4550
4551 void
4552 dump_all_asserts (FILE *file)
4553 {
4554 unsigned i;
4555 bitmap_iterator bi;
4556
4557 fprintf (file, "\nASSERT_EXPRs to be inserted\n\n");
4558 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
4559 dump_asserts_for (file, ssa_name (i));
4560 fprintf (file, "\n");
4561 }
4562
4563
4564 /* Dump all the registered assertions for all the names to stderr. */
4565
4566 DEBUG_FUNCTION void
4567 debug_all_asserts (void)
4568 {
4569 dump_all_asserts (stderr);
4570 }
4571
4572
4573 /* If NAME doesn't have an ASSERT_EXPR registered for asserting
4574 'EXPR COMP_CODE VAL' at a location that dominates block BB or
4575 E->DEST, then register this location as a possible insertion point
4576 for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
4577
4578 BB, E and SI provide the exact insertion point for the new
4579 ASSERT_EXPR. If BB is NULL, then the ASSERT_EXPR is to be inserted
4580 on edge E. Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
4581 BB. If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
4582 must not be NULL. */
4583
4584 static void
4585 register_new_assert_for (tree name, tree expr,
4586 enum tree_code comp_code,
4587 tree val,
4588 basic_block bb,
4589 edge e,
4590 gimple_stmt_iterator si)
4591 {
4592 assert_locus_t n, loc, last_loc;
4593 basic_block dest_bb;
4594
4595 gcc_checking_assert (bb == NULL || e == NULL);
4596
4597 if (e == NULL)
4598 gcc_checking_assert (gimple_code (gsi_stmt (si)) != GIMPLE_COND
4599 && gimple_code (gsi_stmt (si)) != GIMPLE_SWITCH);
4600
4601 /* Never build an assert comparing against an integer constant with
4602 TREE_OVERFLOW set. This confuses our undefined overflow warning
4603 machinery. */
4604 if (TREE_OVERFLOW_P (val))
4605 val = drop_tree_overflow (val);
4606
4607 /* The new assertion A will be inserted at BB or E. We need to
4608 determine if the new location is dominated by a previously
4609 registered location for A. If we are doing an edge insertion,
4610 assume that A will be inserted at E->DEST. Note that this is not
4611 necessarily true.
4612
4613 If E is a critical edge, it will be split. But even if E is
4614 split, the new block will dominate the same set of blocks that
4615 E->DEST dominates.
4616
4617 The reverse, however, is not true, blocks dominated by E->DEST
4618 will not be dominated by the new block created to split E. So,
4619 if the insertion location is on a critical edge, we will not use
4620 the new location to move another assertion previously registered
4621 at a block dominated by E->DEST. */
4622 dest_bb = (bb) ? bb : e->dest;
4623
4624 /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and
4625 VAL at a block dominating DEST_BB, then we don't need to insert a new
4626 one. Similarly, if the same assertion already exists at a block
4627 dominated by DEST_BB and the new location is not on a critical
4628 edge, then update the existing location for the assertion (i.e.,
4629 move the assertion up in the dominance tree).
4630
4631 Note, this is implemented as a simple linked list because there
4632 should not be more than a handful of assertions registered per
4633 name. If this becomes a performance problem, a table hashed by
4634 COMP_CODE and VAL could be implemented. */
4635 loc = asserts_for[SSA_NAME_VERSION (name)];
4636 last_loc = loc;
4637 while (loc)
4638 {
4639 if (loc->comp_code == comp_code
4640 && (loc->val == val
4641 || operand_equal_p (loc->val, val, 0))
4642 && (loc->expr == expr
4643 || operand_equal_p (loc->expr, expr, 0)))
4644 {
4645 /* If E is not a critical edge and DEST_BB
4646 dominates the existing location for the assertion, move
4647 the assertion up in the dominance tree by updating its
4648 location information. */
4649 if ((e == NULL || !EDGE_CRITICAL_P (e))
4650 && dominated_by_p (CDI_DOMINATORS, loc->bb, dest_bb))
4651 {
4652 loc->bb = dest_bb;
4653 loc->e = e;
4654 loc->si = si;
4655 return;
4656 }
4657 }
4658
4659 /* Update the last node of the list and move to the next one. */
4660 last_loc = loc;
4661 loc = loc->next;
4662 }
4663
4664 /* If we didn't find an assertion already registered for
4665 NAME COMP_CODE VAL, add a new one at the end of the list of
4666 assertions associated with NAME. */
4667 n = XNEW (struct assert_locus_d);
4668 n->bb = dest_bb;
4669 n->e = e;
4670 n->si = si;
4671 n->comp_code = comp_code;
4672 n->val = val;
4673 n->expr = expr;
4674 n->next = NULL;
4675
4676 if (last_loc)
4677 last_loc->next = n;
4678 else
4679 asserts_for[SSA_NAME_VERSION (name)] = n;
4680
4681 bitmap_set_bit (need_assert_for, SSA_NAME_VERSION (name));
4682 }
4683
4684 /* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME.
4685 Extract a suitable test code and value and store them into *CODE_P and
4686 *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P.
4687
4688 If no extraction was possible, return FALSE, otherwise return TRUE.
4689
4690 If INVERT is true, then we invert the result stored into *CODE_P. */
4691
4692 static bool
4693 extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code,
4694 tree cond_op0, tree cond_op1,
4695 bool invert, enum tree_code *code_p,
4696 tree *val_p)
4697 {
4698 enum tree_code comp_code;
4699 tree val;
4700
4701 /* Otherwise, we have a comparison of the form NAME COMP VAL
4702 or VAL COMP NAME. */
4703 if (name == cond_op1)
4704 {
4705 /* If the predicate is of the form VAL COMP NAME, flip
4706 COMP around because we need to register NAME as the
4707 first operand in the predicate. */
4708 comp_code = swap_tree_comparison (cond_code);
4709 val = cond_op0;
4710 }
4711 else
4712 {
4713 /* The comparison is of the form NAME COMP VAL, so the
4714 comparison code remains unchanged. */
4715 comp_code = cond_code;
4716 val = cond_op1;
4717 }
4718
4719 /* Invert the comparison code as necessary. */
4720 if (invert)
4721 comp_code = invert_tree_comparison (comp_code, 0);
4722
4723 /* VRP does not handle float types. */
4724 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (val)))
4725 return false;
4726
4727 /* Do not register always-false predicates.
4728 FIXME: this works around a limitation in fold() when dealing with
4729 enumerations. Given 'enum { N1, N2 } x;', fold will not
4730 fold 'if (x > N2)' to 'if (0)'. */
4731 if ((comp_code == GT_EXPR || comp_code == LT_EXPR)
4732 && INTEGRAL_TYPE_P (TREE_TYPE (val)))
4733 {
4734 tree min = TYPE_MIN_VALUE (TREE_TYPE (val));
4735 tree max = TYPE_MAX_VALUE (TREE_TYPE (val));
4736
4737 if (comp_code == GT_EXPR
4738 && (!max
4739 || compare_values (val, max) == 0))
4740 return false;
4741
4742 if (comp_code == LT_EXPR
4743 && (!min
4744 || compare_values (val, min) == 0))
4745 return false;
4746 }
4747 *code_p = comp_code;
4748 *val_p = val;
4749 return true;
4750 }
4751
4752 /* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any
4753 (otherwise return VAL). VAL and MASK must be zero-extended for
4754 precision PREC. If SGNBIT is non-zero, first xor VAL with SGNBIT
4755 (to transform signed values into unsigned) and at the end xor
4756 SGNBIT back. */
4757
4758 static double_int
4759 masked_increment (double_int val, double_int mask, double_int sgnbit,
4760 unsigned int prec)
4761 {
4762 double_int bit = double_int_one, res;
4763 unsigned int i;
4764
4765 val ^= sgnbit;
4766 for (i = 0; i < prec; i++, bit += bit)
4767 {
4768 res = mask;
4769 if ((res & bit).is_zero ())
4770 continue;
4771 res = bit - double_int_one;
4772 res = (val + bit).and_not (res);
4773 res &= mask;
4774 if (res.ugt (val))
4775 return res ^ sgnbit;
4776 }
4777 return val ^ sgnbit;
4778 }
4779
4780 /* Try to register an edge assertion for SSA name NAME on edge E for
4781 the condition COND contributing to the conditional jump pointed to by BSI.
4782 Invert the condition COND if INVERT is true.
4783 Return true if an assertion for NAME could be registered. */
4784
4785 static bool
4786 register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi,
4787 enum tree_code cond_code,
4788 tree cond_op0, tree cond_op1, bool invert)
4789 {
4790 tree val;
4791 enum tree_code comp_code;
4792 bool retval = false;
4793
4794 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
4795 cond_op0,
4796 cond_op1,
4797 invert, &comp_code, &val))
4798 return false;
4799
4800 /* Only register an ASSERT_EXPR if NAME was found in the sub-graph
4801 reachable from E. */
4802 if (live_on_edge (e, name)
4803 && !has_single_use (name))
4804 {
4805 register_new_assert_for (name, name, comp_code, val, NULL, e, bsi);
4806 retval = true;
4807 }
4808
4809 /* In the case of NAME <= CST and NAME being defined as
4810 NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2
4811 and NAME2 <= CST - CST2. We can do the same for NAME > CST.
4812 This catches range and anti-range tests. */
4813 if ((comp_code == LE_EXPR
4814 || comp_code == GT_EXPR)
4815 && TREE_CODE (val) == INTEGER_CST
4816 && TYPE_UNSIGNED (TREE_TYPE (val)))
4817 {
4818 gimple def_stmt = SSA_NAME_DEF_STMT (name);
4819 tree cst2 = NULL_TREE, name2 = NULL_TREE, name3 = NULL_TREE;
4820
4821 /* Extract CST2 from the (optional) addition. */
4822 if (is_gimple_assign (def_stmt)
4823 && gimple_assign_rhs_code (def_stmt) == PLUS_EXPR)
4824 {
4825 name2 = gimple_assign_rhs1 (def_stmt);
4826 cst2 = gimple_assign_rhs2 (def_stmt);
4827 if (TREE_CODE (name2) == SSA_NAME
4828 && TREE_CODE (cst2) == INTEGER_CST)
4829 def_stmt = SSA_NAME_DEF_STMT (name2);
4830 }
4831
4832 /* Extract NAME2 from the (optional) sign-changing cast. */
4833 if (gimple_assign_cast_p (def_stmt))
4834 {
4835 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt))
4836 && ! TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
4837 && (TYPE_PRECISION (gimple_expr_type (def_stmt))
4838 == TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))))
4839 name3 = gimple_assign_rhs1 (def_stmt);
4840 }
4841
4842 /* If name3 is used later, create an ASSERT_EXPR for it. */
4843 if (name3 != NULL_TREE
4844 && TREE_CODE (name3) == SSA_NAME
4845 && (cst2 == NULL_TREE
4846 || TREE_CODE (cst2) == INTEGER_CST)
4847 && INTEGRAL_TYPE_P (TREE_TYPE (name3))
4848 && live_on_edge (e, name3)
4849 && !has_single_use (name3))
4850 {
4851 tree tmp;
4852
4853 /* Build an expression for the range test. */
4854 tmp = build1 (NOP_EXPR, TREE_TYPE (name), name3);
4855 if (cst2 != NULL_TREE)
4856 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
4857
4858 if (dump_file)
4859 {
4860 fprintf (dump_file, "Adding assert for ");
4861 print_generic_expr (dump_file, name3, 0);
4862 fprintf (dump_file, " from ");
4863 print_generic_expr (dump_file, tmp, 0);
4864 fprintf (dump_file, "\n");
4865 }
4866
4867 register_new_assert_for (name3, tmp, comp_code, val, NULL, e, bsi);
4868
4869 retval = true;
4870 }
4871
4872 /* If name2 is used later, create an ASSERT_EXPR for it. */
4873 if (name2 != NULL_TREE
4874 && TREE_CODE (name2) == SSA_NAME
4875 && TREE_CODE (cst2) == INTEGER_CST
4876 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
4877 && live_on_edge (e, name2)
4878 && !has_single_use (name2))
4879 {
4880 tree tmp;
4881
4882 /* Build an expression for the range test. */
4883 tmp = name2;
4884 if (TREE_TYPE (name) != TREE_TYPE (name2))
4885 tmp = build1 (NOP_EXPR, TREE_TYPE (name), tmp);
4886 if (cst2 != NULL_TREE)
4887 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
4888
4889 if (dump_file)
4890 {
4891 fprintf (dump_file, "Adding assert for ");
4892 print_generic_expr (dump_file, name2, 0);
4893 fprintf (dump_file, " from ");
4894 print_generic_expr (dump_file, tmp, 0);
4895 fprintf (dump_file, "\n");
4896 }
4897
4898 register_new_assert_for (name2, tmp, comp_code, val, NULL, e, bsi);
4899
4900 retval = true;
4901 }
4902 }
4903
4904 /* In the case of post-in/decrement tests like if (i++) ... and uses
4905 of the in/decremented value on the edge the extra name we want to
4906 assert for is not on the def chain of the name compared. Instead
4907 it is in the set of use stmts. */
4908 if ((comp_code == NE_EXPR
4909 || comp_code == EQ_EXPR)
4910 && TREE_CODE (val) == INTEGER_CST)
4911 {
4912 imm_use_iterator ui;
4913 gimple use_stmt;
4914 FOR_EACH_IMM_USE_STMT (use_stmt, ui, name)
4915 {
4916 /* Cut off to use-stmts that are in the predecessor. */
4917 if (gimple_bb (use_stmt) != e->src)
4918 continue;
4919
4920 if (!is_gimple_assign (use_stmt))
4921 continue;
4922
4923 enum tree_code code = gimple_assign_rhs_code (use_stmt);
4924 if (code != PLUS_EXPR
4925 && code != MINUS_EXPR)
4926 continue;
4927
4928 tree cst = gimple_assign_rhs2 (use_stmt);
4929 if (TREE_CODE (cst) != INTEGER_CST)
4930 continue;
4931
4932 tree name2 = gimple_assign_lhs (use_stmt);
4933 if (live_on_edge (e, name2))
4934 {
4935 cst = int_const_binop (code, val, cst);
4936 register_new_assert_for (name2, name2, comp_code, cst,
4937 NULL, e, bsi);
4938 retval = true;
4939 }
4940 }
4941 }
4942
4943 if (TREE_CODE_CLASS (comp_code) == tcc_comparison
4944 && TREE_CODE (val) == INTEGER_CST)
4945 {
4946 gimple def_stmt = SSA_NAME_DEF_STMT (name);
4947 tree name2 = NULL_TREE, names[2], cst2 = NULL_TREE;
4948 tree val2 = NULL_TREE;
4949 double_int mask = double_int_zero;
4950 unsigned int prec = TYPE_PRECISION (TREE_TYPE (val));
4951 unsigned int nprec = prec;
4952 enum tree_code rhs_code = ERROR_MARK;
4953
4954 if (is_gimple_assign (def_stmt))
4955 rhs_code = gimple_assign_rhs_code (def_stmt);
4956
4957 /* Add asserts for NAME cmp CST and NAME being defined
4958 as NAME = (int) NAME2. */
4959 if (!TYPE_UNSIGNED (TREE_TYPE (val))
4960 && (comp_code == LE_EXPR || comp_code == LT_EXPR
4961 || comp_code == GT_EXPR || comp_code == GE_EXPR)
4962 && gimple_assign_cast_p (def_stmt))
4963 {
4964 name2 = gimple_assign_rhs1 (def_stmt);
4965 if (CONVERT_EXPR_CODE_P (rhs_code)
4966 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
4967 && TYPE_UNSIGNED (TREE_TYPE (name2))
4968 && prec == TYPE_PRECISION (TREE_TYPE (name2))
4969 && (comp_code == LE_EXPR || comp_code == GT_EXPR
4970 || !tree_int_cst_equal (val,
4971 TYPE_MIN_VALUE (TREE_TYPE (val))))
4972 && live_on_edge (e, name2)
4973 && !has_single_use (name2))
4974 {
4975 tree tmp, cst;
4976 enum tree_code new_comp_code = comp_code;
4977
4978 cst = fold_convert (TREE_TYPE (name2),
4979 TYPE_MIN_VALUE (TREE_TYPE (val)));
4980 /* Build an expression for the range test. */
4981 tmp = build2 (PLUS_EXPR, TREE_TYPE (name2), name2, cst);
4982 cst = fold_build2 (PLUS_EXPR, TREE_TYPE (name2), cst,
4983 fold_convert (TREE_TYPE (name2), val));
4984 if (comp_code == LT_EXPR || comp_code == GE_EXPR)
4985 {
4986 new_comp_code = comp_code == LT_EXPR ? LE_EXPR : GT_EXPR;
4987 cst = fold_build2 (MINUS_EXPR, TREE_TYPE (name2), cst,
4988 build_int_cst (TREE_TYPE (name2), 1));
4989 }
4990
4991 if (dump_file)
4992 {
4993 fprintf (dump_file, "Adding assert for ");
4994 print_generic_expr (dump_file, name2, 0);
4995 fprintf (dump_file, " from ");
4996 print_generic_expr (dump_file, tmp, 0);
4997 fprintf (dump_file, "\n");
4998 }
4999
5000 register_new_assert_for (name2, tmp, new_comp_code, cst, NULL,
5001 e, bsi);
5002
5003 retval = true;
5004 }
5005 }
5006
5007 /* Add asserts for NAME cmp CST and NAME being defined as
5008 NAME = NAME2 >> CST2.
5009
5010 Extract CST2 from the right shift. */
5011 if (rhs_code == RSHIFT_EXPR)
5012 {
5013 name2 = gimple_assign_rhs1 (def_stmt);
5014 cst2 = gimple_assign_rhs2 (def_stmt);
5015 if (TREE_CODE (name2) == SSA_NAME
5016 && host_integerp (cst2, 1)
5017 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
5018 && IN_RANGE (tree_low_cst (cst2, 1), 1, prec - 1)
5019 && prec <= HOST_BITS_PER_DOUBLE_INT
5020 && prec == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (val)))
5021 && live_on_edge (e, name2)
5022 && !has_single_use (name2))
5023 {
5024 mask = double_int::mask (tree_low_cst (cst2, 1));
5025 val2 = fold_binary (LSHIFT_EXPR, TREE_TYPE (val), val, cst2);
5026 }
5027 }
5028 if (val2 != NULL_TREE
5029 && TREE_CODE (val2) == INTEGER_CST
5030 && simple_cst_equal (fold_build2 (RSHIFT_EXPR,
5031 TREE_TYPE (val),
5032 val2, cst2), val))
5033 {
5034 enum tree_code new_comp_code = comp_code;
5035 tree tmp, new_val;
5036
5037 tmp = name2;
5038 if (comp_code == EQ_EXPR || comp_code == NE_EXPR)
5039 {
5040 if (!TYPE_UNSIGNED (TREE_TYPE (val)))
5041 {
5042 tree type = build_nonstandard_integer_type (prec, 1);
5043 tmp = build1 (NOP_EXPR, type, name2);
5044 val2 = fold_convert (type, val2);
5045 }
5046 tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (tmp), tmp, val2);
5047 new_val = double_int_to_tree (TREE_TYPE (tmp), mask);
5048 new_comp_code = comp_code == EQ_EXPR ? LE_EXPR : GT_EXPR;
5049 }
5050 else if (comp_code == LT_EXPR || comp_code == GE_EXPR)
5051 {
5052 double_int minval
5053 = double_int::min_value (prec, TYPE_UNSIGNED (TREE_TYPE (val)));
5054 new_val = val2;
5055 if (minval == tree_to_double_int (new_val))
5056 new_val = NULL_TREE;
5057 }
5058 else
5059 {
5060 double_int maxval
5061 = double_int::max_value (prec, TYPE_UNSIGNED (TREE_TYPE (val)));
5062 mask |= tree_to_double_int (val2);
5063 if (mask == maxval)
5064 new_val = NULL_TREE;
5065 else
5066 new_val = double_int_to_tree (TREE_TYPE (val2), mask);
5067 }
5068
5069 if (new_val)
5070 {
5071 if (dump_file)
5072 {
5073 fprintf (dump_file, "Adding assert for ");
5074 print_generic_expr (dump_file, name2, 0);
5075 fprintf (dump_file, " from ");
5076 print_generic_expr (dump_file, tmp, 0);
5077 fprintf (dump_file, "\n");
5078 }
5079
5080 register_new_assert_for (name2, tmp, new_comp_code, new_val,
5081 NULL, e, bsi);
5082 retval = true;
5083 }
5084 }
5085
5086 /* Add asserts for NAME cmp CST and NAME being defined as
5087 NAME = NAME2 & CST2.
5088
5089 Extract CST2 from the and.
5090
5091 Also handle
5092 NAME = (unsigned) NAME2;
5093 casts where NAME's type is unsigned and has smaller precision
5094 than NAME2's type as if it was NAME = NAME2 & MASK. */
5095 names[0] = NULL_TREE;
5096 names[1] = NULL_TREE;
5097 cst2 = NULL_TREE;
5098 if (rhs_code == BIT_AND_EXPR
5099 || (CONVERT_EXPR_CODE_P (rhs_code)
5100 && TREE_CODE (TREE_TYPE (val)) == INTEGER_TYPE
5101 && TYPE_UNSIGNED (TREE_TYPE (val))
5102 && TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
5103 > prec
5104 && !retval))
5105 {
5106 name2 = gimple_assign_rhs1 (def_stmt);
5107 if (rhs_code == BIT_AND_EXPR)
5108 cst2 = gimple_assign_rhs2 (def_stmt);
5109 else
5110 {
5111 cst2 = TYPE_MAX_VALUE (TREE_TYPE (val));
5112 nprec = TYPE_PRECISION (TREE_TYPE (name2));
5113 }
5114 if (TREE_CODE (name2) == SSA_NAME
5115 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
5116 && TREE_CODE (cst2) == INTEGER_CST
5117 && !integer_zerop (cst2)
5118 && nprec <= HOST_BITS_PER_DOUBLE_INT
5119 && (nprec > 1
5120 || TYPE_UNSIGNED (TREE_TYPE (val))))
5121 {
5122 gimple def_stmt2 = SSA_NAME_DEF_STMT (name2);
5123 if (gimple_assign_cast_p (def_stmt2))
5124 {
5125 names[1] = gimple_assign_rhs1 (def_stmt2);
5126 if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt2))
5127 || !INTEGRAL_TYPE_P (TREE_TYPE (names[1]))
5128 || (TYPE_PRECISION (TREE_TYPE (name2))
5129 != TYPE_PRECISION (TREE_TYPE (names[1])))
5130 || !live_on_edge (e, names[1])
5131 || has_single_use (names[1]))
5132 names[1] = NULL_TREE;
5133 }
5134 if (live_on_edge (e, name2)
5135 && !has_single_use (name2))
5136 names[0] = name2;
5137 }
5138 }
5139 if (names[0] || names[1])
5140 {
5141 double_int minv, maxv = double_int_zero, valv, cst2v;
5142 double_int tem, sgnbit;
5143 bool valid_p = false, valn = false, cst2n = false;
5144 enum tree_code ccode = comp_code;
5145
5146 valv = tree_to_double_int (val).zext (nprec);
5147 cst2v = tree_to_double_int (cst2).zext (nprec);
5148 if (!TYPE_UNSIGNED (TREE_TYPE (val)))
5149 {
5150 valn = valv.sext (nprec).is_negative ();
5151 cst2n = cst2v.sext (nprec).is_negative ();
5152 }
5153 /* If CST2 doesn't have most significant bit set,
5154 but VAL is negative, we have comparison like
5155 if ((x & 0x123) > -4) (always true). Just give up. */
5156 if (!cst2n && valn)
5157 ccode = ERROR_MARK;
5158 if (cst2n)
5159 sgnbit = double_int_one.llshift (nprec - 1, nprec).zext (nprec);
5160 else
5161 sgnbit = double_int_zero;
5162 minv = valv & cst2v;
5163 switch (ccode)
5164 {
5165 case EQ_EXPR:
5166 /* Minimum unsigned value for equality is VAL & CST2
5167 (should be equal to VAL, otherwise we probably should
5168 have folded the comparison into false) and
5169 maximum unsigned value is VAL | ~CST2. */
5170 maxv = valv | ~cst2v;
5171 maxv = maxv.zext (nprec);
5172 valid_p = true;
5173 break;
5174 case NE_EXPR:
5175 tem = valv | ~cst2v;
5176 tem = tem.zext (nprec);
5177 /* If VAL is 0, handle (X & CST2) != 0 as (X & CST2) > 0U. */
5178 if (valv.is_zero ())
5179 {
5180 cst2n = false;
5181 sgnbit = double_int_zero;
5182 goto gt_expr;
5183 }
5184 /* If (VAL | ~CST2) is all ones, handle it as
5185 (X & CST2) < VAL. */
5186 if (tem == double_int::mask (nprec))
5187 {
5188 cst2n = false;
5189 valn = false;
5190 sgnbit = double_int_zero;
5191 goto lt_expr;
5192 }
5193 if (!cst2n
5194 && cst2v.sext (nprec).is_negative ())
5195 sgnbit
5196 = double_int_one.llshift (nprec - 1, nprec).zext (nprec);
5197 if (!sgnbit.is_zero ())
5198 {
5199 if (valv == sgnbit)
5200 {
5201 cst2n = true;
5202 valn = true;
5203 goto gt_expr;
5204 }
5205 if (tem == double_int::mask (nprec - 1))
5206 {
5207 cst2n = true;
5208 goto lt_expr;
5209 }
5210 if (!cst2n)
5211 sgnbit = double_int_zero;
5212 }
5213 break;
5214 case GE_EXPR:
5215 /* Minimum unsigned value for >= if (VAL & CST2) == VAL
5216 is VAL and maximum unsigned value is ~0. For signed
5217 comparison, if CST2 doesn't have most significant bit
5218 set, handle it similarly. If CST2 has MSB set,
5219 the minimum is the same, and maximum is ~0U/2. */
5220 if (minv != valv)
5221 {
5222 /* If (VAL & CST2) != VAL, X & CST2 can't be equal to
5223 VAL. */
5224 minv = masked_increment (valv, cst2v, sgnbit, nprec);
5225 if (minv == valv)
5226 break;
5227 }
5228 maxv = double_int::mask (nprec - (cst2n ? 1 : 0));
5229 valid_p = true;
5230 break;
5231 case GT_EXPR:
5232 gt_expr:
5233 /* Find out smallest MINV where MINV > VAL
5234 && (MINV & CST2) == MINV, if any. If VAL is signed and
5235 CST2 has MSB set, compute it biased by 1 << (nprec - 1). */
5236 minv = masked_increment (valv, cst2v, sgnbit, nprec);
5237 if (minv == valv)
5238 break;
5239 maxv = double_int::mask (nprec - (cst2n ? 1 : 0));
5240 valid_p = true;
5241 break;
5242 case LE_EXPR:
5243 /* Minimum unsigned value for <= is 0 and maximum
5244 unsigned value is VAL | ~CST2 if (VAL & CST2) == VAL.
5245 Otherwise, find smallest VAL2 where VAL2 > VAL
5246 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
5247 as maximum.
5248 For signed comparison, if CST2 doesn't have most
5249 significant bit set, handle it similarly. If CST2 has
5250 MSB set, the maximum is the same and minimum is INT_MIN. */
5251 if (minv == valv)
5252 maxv = valv;
5253 else
5254 {
5255 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
5256 if (maxv == valv)
5257 break;
5258 maxv -= double_int_one;
5259 }
5260 maxv |= ~cst2v;
5261 maxv = maxv.zext (nprec);
5262 minv = sgnbit;
5263 valid_p = true;
5264 break;
5265 case LT_EXPR:
5266 lt_expr:
5267 /* Minimum unsigned value for < is 0 and maximum
5268 unsigned value is (VAL-1) | ~CST2 if (VAL & CST2) == VAL.
5269 Otherwise, find smallest VAL2 where VAL2 > VAL
5270 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
5271 as maximum.
5272 For signed comparison, if CST2 doesn't have most
5273 significant bit set, handle it similarly. If CST2 has
5274 MSB set, the maximum is the same and minimum is INT_MIN. */
5275 if (minv == valv)
5276 {
5277 if (valv == sgnbit)
5278 break;
5279 maxv = valv;
5280 }
5281 else
5282 {
5283 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
5284 if (maxv == valv)
5285 break;
5286 }
5287 maxv -= double_int_one;
5288 maxv |= ~cst2v;
5289 maxv = maxv.zext (nprec);
5290 minv = sgnbit;
5291 valid_p = true;
5292 break;
5293 default:
5294 break;
5295 }
5296 if (valid_p
5297 && (maxv - minv).zext (nprec) != double_int::mask (nprec))
5298 {
5299 tree tmp, new_val, type;
5300 int i;
5301
5302 for (i = 0; i < 2; i++)
5303 if (names[i])
5304 {
5305 double_int maxv2 = maxv;
5306 tmp = names[i];
5307 type = TREE_TYPE (names[i]);
5308 if (!TYPE_UNSIGNED (type))
5309 {
5310 type = build_nonstandard_integer_type (nprec, 1);
5311 tmp = build1 (NOP_EXPR, type, names[i]);
5312 }
5313 if (!minv.is_zero ())
5314 {
5315 tmp = build2 (PLUS_EXPR, type, tmp,
5316 double_int_to_tree (type, -minv));
5317 maxv2 = maxv - minv;
5318 }
5319 new_val = double_int_to_tree (type, maxv2);
5320
5321 if (dump_file)
5322 {
5323 fprintf (dump_file, "Adding assert for ");
5324 print_generic_expr (dump_file, names[i], 0);
5325 fprintf (dump_file, " from ");
5326 print_generic_expr (dump_file, tmp, 0);
5327 fprintf (dump_file, "\n");
5328 }
5329
5330 register_new_assert_for (names[i], tmp, LE_EXPR,
5331 new_val, NULL, e, bsi);
5332 retval = true;
5333 }
5334 }
5335 }
5336 }
5337
5338 return retval;
5339 }
5340
5341 /* OP is an operand of a truth value expression which is known to have
5342 a particular value. Register any asserts for OP and for any
5343 operands in OP's defining statement.
5344
5345 If CODE is EQ_EXPR, then we want to register OP is zero (false),
5346 if CODE is NE_EXPR, then we want to register OP is nonzero (true). */
5347
5348 static bool
5349 register_edge_assert_for_1 (tree op, enum tree_code code,
5350 edge e, gimple_stmt_iterator bsi)
5351 {
5352 bool retval = false;
5353 gimple op_def;
5354 tree val;
5355 enum tree_code rhs_code;
5356
5357 /* We only care about SSA_NAMEs. */
5358 if (TREE_CODE (op) != SSA_NAME)
5359 return false;
5360
5361 /* We know that OP will have a zero or nonzero value. If OP is used
5362 more than once go ahead and register an assert for OP.
5363
5364 The FOUND_IN_SUBGRAPH support is not helpful in this situation as
5365 it will always be set for OP (because OP is used in a COND_EXPR in
5366 the subgraph). */
5367 if (!has_single_use (op))
5368 {
5369 val = build_int_cst (TREE_TYPE (op), 0);
5370 register_new_assert_for (op, op, code, val, NULL, e, bsi);
5371 retval = true;
5372 }
5373
5374 /* Now look at how OP is set. If it's set from a comparison,
5375 a truth operation or some bit operations, then we may be able
5376 to register information about the operands of that assignment. */
5377 op_def = SSA_NAME_DEF_STMT (op);
5378 if (gimple_code (op_def) != GIMPLE_ASSIGN)
5379 return retval;
5380
5381 rhs_code = gimple_assign_rhs_code (op_def);
5382
5383 if (TREE_CODE_CLASS (rhs_code) == tcc_comparison)
5384 {
5385 bool invert = (code == EQ_EXPR ? true : false);
5386 tree op0 = gimple_assign_rhs1 (op_def);
5387 tree op1 = gimple_assign_rhs2 (op_def);
5388
5389 if (TREE_CODE (op0) == SSA_NAME)
5390 retval |= register_edge_assert_for_2 (op0, e, bsi, rhs_code, op0, op1,
5391 invert);
5392 if (TREE_CODE (op1) == SSA_NAME)
5393 retval |= register_edge_assert_for_2 (op1, e, bsi, rhs_code, op0, op1,
5394 invert);
5395 }
5396 else if ((code == NE_EXPR
5397 && gimple_assign_rhs_code (op_def) == BIT_AND_EXPR)
5398 || (code == EQ_EXPR
5399 && gimple_assign_rhs_code (op_def) == BIT_IOR_EXPR))
5400 {
5401 /* Recurse on each operand. */
5402 tree op0 = gimple_assign_rhs1 (op_def);
5403 tree op1 = gimple_assign_rhs2 (op_def);
5404 if (TREE_CODE (op0) == SSA_NAME
5405 && has_single_use (op0))
5406 retval |= register_edge_assert_for_1 (op0, code, e, bsi);
5407 if (TREE_CODE (op1) == SSA_NAME
5408 && has_single_use (op1))
5409 retval |= register_edge_assert_for_1 (op1, code, e, bsi);
5410 }
5411 else if (gimple_assign_rhs_code (op_def) == BIT_NOT_EXPR
5412 && TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (op_def))) == 1)
5413 {
5414 /* Recurse, flipping CODE. */
5415 code = invert_tree_comparison (code, false);
5416 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def),
5417 code, e, bsi);
5418 }
5419 else if (gimple_assign_rhs_code (op_def) == SSA_NAME)
5420 {
5421 /* Recurse through the copy. */
5422 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def),
5423 code, e, bsi);
5424 }
5425 else if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (op_def)))
5426 {
5427 /* Recurse through the type conversion. */
5428 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def),
5429 code, e, bsi);
5430 }
5431
5432 return retval;
5433 }
5434
5435 /* Try to register an edge assertion for SSA name NAME on edge E for
5436 the condition COND contributing to the conditional jump pointed to by SI.
5437 Return true if an assertion for NAME could be registered. */
5438
5439 static bool
5440 register_edge_assert_for (tree name, edge e, gimple_stmt_iterator si,
5441 enum tree_code cond_code, tree cond_op0,
5442 tree cond_op1)
5443 {
5444 tree val;
5445 enum tree_code comp_code;
5446 bool retval = false;
5447 bool is_else_edge = (e->flags & EDGE_FALSE_VALUE) != 0;
5448
5449 /* Do not attempt to infer anything in names that flow through
5450 abnormal edges. */
5451 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
5452 return false;
5453
5454 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
5455 cond_op0, cond_op1,
5456 is_else_edge,
5457 &comp_code, &val))
5458 return false;
5459
5460 /* Register ASSERT_EXPRs for name. */
5461 retval |= register_edge_assert_for_2 (name, e, si, cond_code, cond_op0,
5462 cond_op1, is_else_edge);
5463
5464
5465 /* If COND is effectively an equality test of an SSA_NAME against
5466 the value zero or one, then we may be able to assert values
5467 for SSA_NAMEs which flow into COND. */
5468
5469 /* In the case of NAME == 1 or NAME != 0, for BIT_AND_EXPR defining
5470 statement of NAME we can assert both operands of the BIT_AND_EXPR
5471 have nonzero value. */
5472 if (((comp_code == EQ_EXPR && integer_onep (val))
5473 || (comp_code == NE_EXPR && integer_zerop (val))))
5474 {
5475 gimple def_stmt = SSA_NAME_DEF_STMT (name);
5476
5477 if (is_gimple_assign (def_stmt)
5478 && gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR)
5479 {
5480 tree op0 = gimple_assign_rhs1 (def_stmt);
5481 tree op1 = gimple_assign_rhs2 (def_stmt);
5482 retval |= register_edge_assert_for_1 (op0, NE_EXPR, e, si);
5483 retval |= register_edge_assert_for_1 (op1, NE_EXPR, e, si);
5484 }
5485 }
5486
5487 /* In the case of NAME == 0 or NAME != 1, for BIT_IOR_EXPR defining
5488 statement of NAME we can assert both operands of the BIT_IOR_EXPR
5489 have zero value. */
5490 if (((comp_code == EQ_EXPR && integer_zerop (val))
5491 || (comp_code == NE_EXPR && integer_onep (val))))
5492 {
5493 gimple def_stmt = SSA_NAME_DEF_STMT (name);
5494
5495 /* For BIT_IOR_EXPR only if NAME == 0 both operands have
5496 necessarily zero value, or if type-precision is one. */
5497 if (is_gimple_assign (def_stmt)
5498 && (gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR
5499 && (TYPE_PRECISION (TREE_TYPE (name)) == 1
5500 || comp_code == EQ_EXPR)))
5501 {
5502 tree op0 = gimple_assign_rhs1 (def_stmt);
5503 tree op1 = gimple_assign_rhs2 (def_stmt);
5504 retval |= register_edge_assert_for_1 (op0, EQ_EXPR, e, si);
5505 retval |= register_edge_assert_for_1 (op1, EQ_EXPR, e, si);
5506 }
5507 }
5508
5509 return retval;
5510 }
5511
5512
5513 /* Determine whether the outgoing edges of BB should receive an
5514 ASSERT_EXPR for each of the operands of BB's LAST statement.
5515 The last statement of BB must be a COND_EXPR.
5516
5517 If any of the sub-graphs rooted at BB have an interesting use of
5518 the predicate operands, an assert location node is added to the
5519 list of assertions for the corresponding operands. */
5520
5521 static bool
5522 find_conditional_asserts (basic_block bb, gimple last)
5523 {
5524 bool need_assert;
5525 gimple_stmt_iterator bsi;
5526 tree op;
5527 edge_iterator ei;
5528 edge e;
5529 ssa_op_iter iter;
5530
5531 need_assert = false;
5532 bsi = gsi_for_stmt (last);
5533
5534 /* Look for uses of the operands in each of the sub-graphs
5535 rooted at BB. We need to check each of the outgoing edges
5536 separately, so that we know what kind of ASSERT_EXPR to
5537 insert. */
5538 FOR_EACH_EDGE (e, ei, bb->succs)
5539 {
5540 if (e->dest == bb)
5541 continue;
5542
5543 /* Register the necessary assertions for each operand in the
5544 conditional predicate. */
5545 FOR_EACH_SSA_TREE_OPERAND (op, last, iter, SSA_OP_USE)
5546 {
5547 need_assert |= register_edge_assert_for (op, e, bsi,
5548 gimple_cond_code (last),
5549 gimple_cond_lhs (last),
5550 gimple_cond_rhs (last));
5551 }
5552 }
5553
5554 return need_assert;
5555 }
5556
5557 struct case_info
5558 {
5559 tree expr;
5560 basic_block bb;
5561 };
5562
5563 /* Compare two case labels sorting first by the destination bb index
5564 and then by the case value. */
5565
5566 static int
5567 compare_case_labels (const void *p1, const void *p2)
5568 {
5569 const struct case_info *ci1 = (const struct case_info *) p1;
5570 const struct case_info *ci2 = (const struct case_info *) p2;
5571 int idx1 = ci1->bb->index;
5572 int idx2 = ci2->bb->index;
5573
5574 if (idx1 < idx2)
5575 return -1;
5576 else if (idx1 == idx2)
5577 {
5578 /* Make sure the default label is first in a group. */
5579 if (!CASE_LOW (ci1->expr))
5580 return -1;
5581 else if (!CASE_LOW (ci2->expr))
5582 return 1;
5583 else
5584 return tree_int_cst_compare (CASE_LOW (ci1->expr),
5585 CASE_LOW (ci2->expr));
5586 }
5587 else
5588 return 1;
5589 }
5590
5591 /* Determine whether the outgoing edges of BB should receive an
5592 ASSERT_EXPR for each of the operands of BB's LAST statement.
5593 The last statement of BB must be a SWITCH_EXPR.
5594
5595 If any of the sub-graphs rooted at BB have an interesting use of
5596 the predicate operands, an assert location node is added to the
5597 list of assertions for the corresponding operands. */
5598
5599 static bool
5600 find_switch_asserts (basic_block bb, gimple last)
5601 {
5602 bool need_assert;
5603 gimple_stmt_iterator bsi;
5604 tree op;
5605 edge e;
5606 struct case_info *ci;
5607 size_t n = gimple_switch_num_labels (last);
5608 #if GCC_VERSION >= 4000
5609 unsigned int idx;
5610 #else
5611 /* Work around GCC 3.4 bug (PR 37086). */
5612 volatile unsigned int idx;
5613 #endif
5614
5615 need_assert = false;
5616 bsi = gsi_for_stmt (last);
5617 op = gimple_switch_index (last);
5618 if (TREE_CODE (op) != SSA_NAME)
5619 return false;
5620
5621 /* Build a vector of case labels sorted by destination label. */
5622 ci = XNEWVEC (struct case_info, n);
5623 for (idx = 0; idx < n; ++idx)
5624 {
5625 ci[idx].expr = gimple_switch_label (last, idx);
5626 ci[idx].bb = label_to_block (CASE_LABEL (ci[idx].expr));
5627 }
5628 qsort (ci, n, sizeof (struct case_info), compare_case_labels);
5629
5630 for (idx = 0; idx < n; ++idx)
5631 {
5632 tree min, max;
5633 tree cl = ci[idx].expr;
5634 basic_block cbb = ci[idx].bb;
5635
5636 min = CASE_LOW (cl);
5637 max = CASE_HIGH (cl);
5638
5639 /* If there are multiple case labels with the same destination
5640 we need to combine them to a single value range for the edge. */
5641 if (idx + 1 < n && cbb == ci[idx + 1].bb)
5642 {
5643 /* Skip labels until the last of the group. */
5644 do {
5645 ++idx;
5646 } while (idx < n && cbb == ci[idx].bb);
5647 --idx;
5648
5649 /* Pick up the maximum of the case label range. */
5650 if (CASE_HIGH (ci[idx].expr))
5651 max = CASE_HIGH (ci[idx].expr);
5652 else
5653 max = CASE_LOW (ci[idx].expr);
5654 }
5655
5656 /* Nothing to do if the range includes the default label until we
5657 can register anti-ranges. */
5658 if (min == NULL_TREE)
5659 continue;
5660
5661 /* Find the edge to register the assert expr on. */
5662 e = find_edge (bb, cbb);
5663
5664 /* Register the necessary assertions for the operand in the
5665 SWITCH_EXPR. */
5666 need_assert |= register_edge_assert_for (op, e, bsi,
5667 max ? GE_EXPR : EQ_EXPR,
5668 op,
5669 fold_convert (TREE_TYPE (op),
5670 min));
5671 if (max)
5672 {
5673 need_assert |= register_edge_assert_for (op, e, bsi, LE_EXPR,
5674 op,
5675 fold_convert (TREE_TYPE (op),
5676 max));
5677 }
5678 }
5679
5680 XDELETEVEC (ci);
5681 return need_assert;
5682 }
5683
5684
5685 /* Traverse all the statements in block BB looking for statements that
5686 may generate useful assertions for the SSA names in their operand.
5687 If a statement produces a useful assertion A for name N_i, then the
5688 list of assertions already generated for N_i is scanned to
5689 determine if A is actually needed.
5690
5691 If N_i already had the assertion A at a location dominating the
5692 current location, then nothing needs to be done. Otherwise, the
5693 new location for A is recorded instead.
5694
5695 1- For every statement S in BB, all the variables used by S are
5696 added to bitmap FOUND_IN_SUBGRAPH.
5697
5698 2- If statement S uses an operand N in a way that exposes a known
5699 value range for N, then if N was not already generated by an
5700 ASSERT_EXPR, create a new assert location for N. For instance,
5701 if N is a pointer and the statement dereferences it, we can
5702 assume that N is not NULL.
5703
5704 3- COND_EXPRs are a special case of #2. We can derive range
5705 information from the predicate but need to insert different
5706 ASSERT_EXPRs for each of the sub-graphs rooted at the
5707 conditional block. If the last statement of BB is a conditional
5708 expression of the form 'X op Y', then
5709
5710 a) Remove X and Y from the set FOUND_IN_SUBGRAPH.
5711
5712 b) If the conditional is the only entry point to the sub-graph
5713 corresponding to the THEN_CLAUSE, recurse into it. On
5714 return, if X and/or Y are marked in FOUND_IN_SUBGRAPH, then
5715 an ASSERT_EXPR is added for the corresponding variable.
5716
5717 c) Repeat step (b) on the ELSE_CLAUSE.
5718
5719 d) Mark X and Y in FOUND_IN_SUBGRAPH.
5720
5721 For instance,
5722
5723 if (a == 9)
5724 b = a;
5725 else
5726 b = c + 1;
5727
5728 In this case, an assertion on the THEN clause is useful to
5729 determine that 'a' is always 9 on that edge. However, an assertion
5730 on the ELSE clause would be unnecessary.
5731
5732 4- If BB does not end in a conditional expression, then we recurse
5733 into BB's dominator children.
5734
5735 At the end of the recursive traversal, every SSA name will have a
5736 list of locations where ASSERT_EXPRs should be added. When a new
5737 location for name N is found, it is registered by calling
5738 register_new_assert_for. That function keeps track of all the
5739 registered assertions to prevent adding unnecessary assertions.
5740 For instance, if a pointer P_4 is dereferenced more than once in a
5741 dominator tree, only the location dominating all the dereference of
5742 P_4 will receive an ASSERT_EXPR.
5743
5744 If this function returns true, then it means that there are names
5745 for which we need to generate ASSERT_EXPRs. Those assertions are
5746 inserted by process_assert_insertions. */
5747
5748 static bool
5749 find_assert_locations_1 (basic_block bb, sbitmap live)
5750 {
5751 gimple_stmt_iterator si;
5752 gimple last;
5753 bool need_assert;
5754
5755 need_assert = false;
5756 last = last_stmt (bb);
5757
5758 /* If BB's last statement is a conditional statement involving integer
5759 operands, determine if we need to add ASSERT_EXPRs. */
5760 if (last
5761 && gimple_code (last) == GIMPLE_COND
5762 && !fp_predicate (last)
5763 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
5764 need_assert |= find_conditional_asserts (bb, last);
5765
5766 /* If BB's last statement is a switch statement involving integer
5767 operands, determine if we need to add ASSERT_EXPRs. */
5768 if (last
5769 && gimple_code (last) == GIMPLE_SWITCH
5770 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
5771 need_assert |= find_switch_asserts (bb, last);
5772
5773 /* Traverse all the statements in BB marking used names and looking
5774 for statements that may infer assertions for their used operands. */
5775 for (si = gsi_last_bb (bb); !gsi_end_p (si); gsi_prev (&si))
5776 {
5777 gimple stmt;
5778 tree op;
5779 ssa_op_iter i;
5780
5781 stmt = gsi_stmt (si);
5782
5783 if (is_gimple_debug (stmt))
5784 continue;
5785
5786 /* See if we can derive an assertion for any of STMT's operands. */
5787 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
5788 {
5789 tree value;
5790 enum tree_code comp_code;
5791
5792 /* If op is not live beyond this stmt, do not bother to insert
5793 asserts for it. */
5794 if (!bitmap_bit_p (live, SSA_NAME_VERSION (op)))
5795 continue;
5796
5797 /* If OP is used in such a way that we can infer a value
5798 range for it, and we don't find a previous assertion for
5799 it, create a new assertion location node for OP. */
5800 if (infer_value_range (stmt, op, &comp_code, &value))
5801 {
5802 /* If we are able to infer a nonzero value range for OP,
5803 then walk backwards through the use-def chain to see if OP
5804 was set via a typecast.
5805
5806 If so, then we can also infer a nonzero value range
5807 for the operand of the NOP_EXPR. */
5808 if (comp_code == NE_EXPR && integer_zerop (value))
5809 {
5810 tree t = op;
5811 gimple def_stmt = SSA_NAME_DEF_STMT (t);
5812
5813 while (is_gimple_assign (def_stmt)
5814 && gimple_assign_rhs_code (def_stmt) == NOP_EXPR
5815 && TREE_CODE
5816 (gimple_assign_rhs1 (def_stmt)) == SSA_NAME
5817 && POINTER_TYPE_P
5818 (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))
5819 {
5820 t = gimple_assign_rhs1 (def_stmt);
5821 def_stmt = SSA_NAME_DEF_STMT (t);
5822
5823 /* Note we want to register the assert for the
5824 operand of the NOP_EXPR after SI, not after the
5825 conversion. */
5826 if (! has_single_use (t))
5827 {
5828 register_new_assert_for (t, t, comp_code, value,
5829 bb, NULL, si);
5830 need_assert = true;
5831 }
5832 }
5833 }
5834
5835 register_new_assert_for (op, op, comp_code, value, bb, NULL, si);
5836 need_assert = true;
5837 }
5838 }
5839
5840 /* Update live. */
5841 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
5842 bitmap_set_bit (live, SSA_NAME_VERSION (op));
5843 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_DEF)
5844 bitmap_clear_bit (live, SSA_NAME_VERSION (op));
5845 }
5846
5847 /* Traverse all PHI nodes in BB, updating live. */
5848 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
5849 {
5850 use_operand_p arg_p;
5851 ssa_op_iter i;
5852 gimple phi = gsi_stmt (si);
5853 tree res = gimple_phi_result (phi);
5854
5855 if (virtual_operand_p (res))
5856 continue;
5857
5858 FOR_EACH_PHI_ARG (arg_p, phi, i, SSA_OP_USE)
5859 {
5860 tree arg = USE_FROM_PTR (arg_p);
5861 if (TREE_CODE (arg) == SSA_NAME)
5862 bitmap_set_bit (live, SSA_NAME_VERSION (arg));
5863 }
5864
5865 bitmap_clear_bit (live, SSA_NAME_VERSION (res));
5866 }
5867
5868 return need_assert;
5869 }
5870
5871 /* Do an RPO walk over the function computing SSA name liveness
5872 on-the-fly and deciding on assert expressions to insert.
5873 Returns true if there are assert expressions to be inserted. */
5874
5875 static bool
5876 find_assert_locations (void)
5877 {
5878 int *rpo = XNEWVEC (int, last_basic_block);
5879 int *bb_rpo = XNEWVEC (int, last_basic_block);
5880 int *last_rpo = XCNEWVEC (int, last_basic_block);
5881 int rpo_cnt, i;
5882 bool need_asserts;
5883
5884 live = XCNEWVEC (sbitmap, last_basic_block);
5885 rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
5886 for (i = 0; i < rpo_cnt; ++i)
5887 bb_rpo[rpo[i]] = i;
5888
5889 /* Pre-seed loop latch liveness from loop header PHI nodes. Due to
5890 the order we compute liveness and insert asserts we otherwise
5891 fail to insert asserts into the loop latch. */
5892 loop_p loop;
5893 loop_iterator li;
5894 FOR_EACH_LOOP (li, loop, 0)
5895 {
5896 i = loop->latch->index;
5897 unsigned int j = single_succ_edge (loop->latch)->dest_idx;
5898 for (gimple_stmt_iterator gsi = gsi_start_phis (loop->header);
5899 !gsi_end_p (gsi); gsi_next (&gsi))
5900 {
5901 gimple phi = gsi_stmt (gsi);
5902 if (virtual_operand_p (gimple_phi_result (phi)))
5903 continue;
5904 tree arg = gimple_phi_arg_def (phi, j);
5905 if (TREE_CODE (arg) == SSA_NAME)
5906 {
5907 if (live[i] == NULL)
5908 {
5909 live[i] = sbitmap_alloc (num_ssa_names);
5910 bitmap_clear (live[i]);
5911 }
5912 bitmap_set_bit (live[i], SSA_NAME_VERSION (arg));
5913 }
5914 }
5915 }
5916
5917 need_asserts = false;
5918 for (i = rpo_cnt - 1; i >= 0; --i)
5919 {
5920 basic_block bb = BASIC_BLOCK (rpo[i]);
5921 edge e;
5922 edge_iterator ei;
5923
5924 if (!live[rpo[i]])
5925 {
5926 live[rpo[i]] = sbitmap_alloc (num_ssa_names);
5927 bitmap_clear (live[rpo[i]]);
5928 }
5929
5930 /* Process BB and update the live information with uses in
5931 this block. */
5932 need_asserts |= find_assert_locations_1 (bb, live[rpo[i]]);
5933
5934 /* Merge liveness into the predecessor blocks and free it. */
5935 if (!bitmap_empty_p (live[rpo[i]]))
5936 {
5937 int pred_rpo = i;
5938 FOR_EACH_EDGE (e, ei, bb->preds)
5939 {
5940 int pred = e->src->index;
5941 if ((e->flags & EDGE_DFS_BACK) || pred == ENTRY_BLOCK)
5942 continue;
5943
5944 if (!live[pred])
5945 {
5946 live[pred] = sbitmap_alloc (num_ssa_names);
5947 bitmap_clear (live[pred]);
5948 }
5949 bitmap_ior (live[pred], live[pred], live[rpo[i]]);
5950
5951 if (bb_rpo[pred] < pred_rpo)
5952 pred_rpo = bb_rpo[pred];
5953 }
5954
5955 /* Record the RPO number of the last visited block that needs
5956 live information from this block. */
5957 last_rpo[rpo[i]] = pred_rpo;
5958 }
5959 else
5960 {
5961 sbitmap_free (live[rpo[i]]);
5962 live[rpo[i]] = NULL;
5963 }
5964
5965 /* We can free all successors live bitmaps if all their
5966 predecessors have been visited already. */
5967 FOR_EACH_EDGE (e, ei, bb->succs)
5968 if (last_rpo[e->dest->index] == i
5969 && live[e->dest->index])
5970 {
5971 sbitmap_free (live[e->dest->index]);
5972 live[e->dest->index] = NULL;
5973 }
5974 }
5975
5976 XDELETEVEC (rpo);
5977 XDELETEVEC (bb_rpo);
5978 XDELETEVEC (last_rpo);
5979 for (i = 0; i < last_basic_block; ++i)
5980 if (live[i])
5981 sbitmap_free (live[i]);
5982 XDELETEVEC (live);
5983
5984 return need_asserts;
5985 }
5986
5987 /* Create an ASSERT_EXPR for NAME and insert it in the location
5988 indicated by LOC. Return true if we made any edge insertions. */
5989
5990 static bool
5991 process_assert_insertions_for (tree name, assert_locus_t loc)
5992 {
5993 /* Build the comparison expression NAME_i COMP_CODE VAL. */
5994 gimple stmt;
5995 tree cond;
5996 gimple assert_stmt;
5997 edge_iterator ei;
5998 edge e;
5999
6000 /* If we have X <=> X do not insert an assert expr for that. */
6001 if (loc->expr == loc->val)
6002 return false;
6003
6004 cond = build2 (loc->comp_code, boolean_type_node, loc->expr, loc->val);
6005 assert_stmt = build_assert_expr_for (cond, name);
6006 if (loc->e)
6007 {
6008 /* We have been asked to insert the assertion on an edge. This
6009 is used only by COND_EXPR and SWITCH_EXPR assertions. */
6010 gcc_checking_assert (gimple_code (gsi_stmt (loc->si)) == GIMPLE_COND
6011 || (gimple_code (gsi_stmt (loc->si))
6012 == GIMPLE_SWITCH));
6013
6014 gsi_insert_on_edge (loc->e, assert_stmt);
6015 return true;
6016 }
6017
6018 /* Otherwise, we can insert right after LOC->SI iff the
6019 statement must not be the last statement in the block. */
6020 stmt = gsi_stmt (loc->si);
6021 if (!stmt_ends_bb_p (stmt))
6022 {
6023 gsi_insert_after (&loc->si, assert_stmt, GSI_SAME_STMT);
6024 return false;
6025 }
6026
6027 /* If STMT must be the last statement in BB, we can only insert new
6028 assertions on the non-abnormal edge out of BB. Note that since
6029 STMT is not control flow, there may only be one non-abnormal edge
6030 out of BB. */
6031 FOR_EACH_EDGE (e, ei, loc->bb->succs)
6032 if (!(e->flags & EDGE_ABNORMAL))
6033 {
6034 gsi_insert_on_edge (e, assert_stmt);
6035 return true;
6036 }
6037
6038 gcc_unreachable ();
6039 }
6040
6041
6042 /* Process all the insertions registered for every name N_i registered
6043 in NEED_ASSERT_FOR. The list of assertions to be inserted are
6044 found in ASSERTS_FOR[i]. */
6045
6046 static void
6047 process_assert_insertions (void)
6048 {
6049 unsigned i;
6050 bitmap_iterator bi;
6051 bool update_edges_p = false;
6052 int num_asserts = 0;
6053
6054 if (dump_file && (dump_flags & TDF_DETAILS))
6055 dump_all_asserts (dump_file);
6056
6057 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
6058 {
6059 assert_locus_t loc = asserts_for[i];
6060 gcc_assert (loc);
6061
6062 while (loc)
6063 {
6064 assert_locus_t next = loc->next;
6065 update_edges_p |= process_assert_insertions_for (ssa_name (i), loc);
6066 free (loc);
6067 loc = next;
6068 num_asserts++;
6069 }
6070 }
6071
6072 if (update_edges_p)
6073 gsi_commit_edge_inserts ();
6074
6075 statistics_counter_event (cfun, "Number of ASSERT_EXPR expressions inserted",
6076 num_asserts);
6077 }
6078
6079
6080 /* Traverse the flowgraph looking for conditional jumps to insert range
6081 expressions. These range expressions are meant to provide information
6082 to optimizations that need to reason in terms of value ranges. They
6083 will not be expanded into RTL. For instance, given:
6084
6085 x = ...
6086 y = ...
6087 if (x < y)
6088 y = x - 2;
6089 else
6090 x = y + 3;
6091
6092 this pass will transform the code into:
6093
6094 x = ...
6095 y = ...
6096 if (x < y)
6097 {
6098 x = ASSERT_EXPR <x, x < y>
6099 y = x - 2
6100 }
6101 else
6102 {
6103 y = ASSERT_EXPR <y, x <= y>
6104 x = y + 3
6105 }
6106
6107 The idea is that once copy and constant propagation have run, other
6108 optimizations will be able to determine what ranges of values can 'x'
6109 take in different paths of the code, simply by checking the reaching
6110 definition of 'x'. */
6111
6112 static void
6113 insert_range_assertions (void)
6114 {
6115 need_assert_for = BITMAP_ALLOC (NULL);
6116 asserts_for = XCNEWVEC (assert_locus_t, num_ssa_names);
6117
6118 calculate_dominance_info (CDI_DOMINATORS);
6119
6120 if (find_assert_locations ())
6121 {
6122 process_assert_insertions ();
6123 update_ssa (TODO_update_ssa_no_phi);
6124 }
6125
6126 if (dump_file && (dump_flags & TDF_DETAILS))
6127 {
6128 fprintf (dump_file, "\nSSA form after inserting ASSERT_EXPRs\n");
6129 dump_function_to_file (current_function_decl, dump_file, dump_flags);
6130 }
6131
6132 free (asserts_for);
6133 BITMAP_FREE (need_assert_for);
6134 }
6135
6136 /* Checks one ARRAY_REF in REF, located at LOCUS. Ignores flexible arrays
6137 and "struct" hacks. If VRP can determine that the
6138 array subscript is a constant, check if it is outside valid
6139 range. If the array subscript is a RANGE, warn if it is
6140 non-overlapping with valid range.
6141 IGNORE_OFF_BY_ONE is true if the ARRAY_REF is inside a ADDR_EXPR. */
6142
6143 static void
6144 check_array_ref (location_t location, tree ref, bool ignore_off_by_one)
6145 {
6146 value_range_t* vr = NULL;
6147 tree low_sub, up_sub;
6148 tree low_bound, up_bound, up_bound_p1;
6149 tree base;
6150
6151 if (TREE_NO_WARNING (ref))
6152 return;
6153
6154 low_sub = up_sub = TREE_OPERAND (ref, 1);
6155 up_bound = array_ref_up_bound (ref);
6156
6157 /* Can not check flexible arrays. */
6158 if (!up_bound
6159 || TREE_CODE (up_bound) != INTEGER_CST)
6160 return;
6161
6162 /* Accesses to trailing arrays via pointers may access storage
6163 beyond the types array bounds. */
6164 base = get_base_address (ref);
6165 if (base && TREE_CODE (base) == MEM_REF)
6166 {
6167 tree cref, next = NULL_TREE;
6168
6169 if (TREE_CODE (TREE_OPERAND (ref, 0)) != COMPONENT_REF)
6170 return;
6171
6172 cref = TREE_OPERAND (ref, 0);
6173 if (TREE_CODE (TREE_TYPE (TREE_OPERAND (cref, 0))) == RECORD_TYPE)
6174 for (next = DECL_CHAIN (TREE_OPERAND (cref, 1));
6175 next && TREE_CODE (next) != FIELD_DECL;
6176 next = DECL_CHAIN (next))
6177 ;
6178
6179 /* If this is the last field in a struct type or a field in a
6180 union type do not warn. */
6181 if (!next)
6182 return;
6183 }
6184
6185 low_bound = array_ref_low_bound (ref);
6186 up_bound_p1 = int_const_binop (PLUS_EXPR, up_bound, integer_one_node);
6187
6188 if (TREE_CODE (low_sub) == SSA_NAME)
6189 {
6190 vr = get_value_range (low_sub);
6191 if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
6192 {
6193 low_sub = vr->type == VR_RANGE ? vr->max : vr->min;
6194 up_sub = vr->type == VR_RANGE ? vr->min : vr->max;
6195 }
6196 }
6197
6198 if (vr && vr->type == VR_ANTI_RANGE)
6199 {
6200 if (TREE_CODE (up_sub) == INTEGER_CST
6201 && tree_int_cst_lt (up_bound, up_sub)
6202 && TREE_CODE (low_sub) == INTEGER_CST
6203 && tree_int_cst_lt (low_sub, low_bound))
6204 {
6205 warning_at (location, OPT_Warray_bounds,
6206 "array subscript is outside array bounds");
6207 TREE_NO_WARNING (ref) = 1;
6208 }
6209 }
6210 else if (TREE_CODE (up_sub) == INTEGER_CST
6211 && (ignore_off_by_one
6212 ? (tree_int_cst_lt (up_bound, up_sub)
6213 && !tree_int_cst_equal (up_bound_p1, up_sub))
6214 : (tree_int_cst_lt (up_bound, up_sub)
6215 || tree_int_cst_equal (up_bound_p1, up_sub))))
6216 {
6217 if (dump_file && (dump_flags & TDF_DETAILS))
6218 {
6219 fprintf (dump_file, "Array bound warning for ");
6220 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
6221 fprintf (dump_file, "\n");
6222 }
6223 warning_at (location, OPT_Warray_bounds,
6224 "array subscript is above array bounds");
6225 TREE_NO_WARNING (ref) = 1;
6226 }
6227 else if (TREE_CODE (low_sub) == INTEGER_CST
6228 && tree_int_cst_lt (low_sub, low_bound))
6229 {
6230 if (dump_file && (dump_flags & TDF_DETAILS))
6231 {
6232 fprintf (dump_file, "Array bound warning for ");
6233 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
6234 fprintf (dump_file, "\n");
6235 }
6236 warning_at (location, OPT_Warray_bounds,
6237 "array subscript is below array bounds");
6238 TREE_NO_WARNING (ref) = 1;
6239 }
6240 }
6241
6242 /* Searches if the expr T, located at LOCATION computes
6243 address of an ARRAY_REF, and call check_array_ref on it. */
6244
6245 static void
6246 search_for_addr_array (tree t, location_t location)
6247 {
6248 while (TREE_CODE (t) == SSA_NAME)
6249 {
6250 gimple g = SSA_NAME_DEF_STMT (t);
6251
6252 if (gimple_code (g) != GIMPLE_ASSIGN)
6253 return;
6254
6255 if (get_gimple_rhs_class (gimple_assign_rhs_code (g))
6256 != GIMPLE_SINGLE_RHS)
6257 return;
6258
6259 t = gimple_assign_rhs1 (g);
6260 }
6261
6262
6263 /* We are only interested in addresses of ARRAY_REF's. */
6264 if (TREE_CODE (t) != ADDR_EXPR)
6265 return;
6266
6267 /* Check each ARRAY_REFs in the reference chain. */
6268 do
6269 {
6270 if (TREE_CODE (t) == ARRAY_REF)
6271 check_array_ref (location, t, true /*ignore_off_by_one*/);
6272
6273 t = TREE_OPERAND (t, 0);
6274 }
6275 while (handled_component_p (t));
6276
6277 if (TREE_CODE (t) == MEM_REF
6278 && TREE_CODE (TREE_OPERAND (t, 0)) == ADDR_EXPR
6279 && !TREE_NO_WARNING (t))
6280 {
6281 tree tem = TREE_OPERAND (TREE_OPERAND (t, 0), 0);
6282 tree low_bound, up_bound, el_sz;
6283 double_int idx;
6284 if (TREE_CODE (TREE_TYPE (tem)) != ARRAY_TYPE
6285 || TREE_CODE (TREE_TYPE (TREE_TYPE (tem))) == ARRAY_TYPE
6286 || !TYPE_DOMAIN (TREE_TYPE (tem)))
6287 return;
6288
6289 low_bound = TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
6290 up_bound = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
6291 el_sz = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (tem)));
6292 if (!low_bound
6293 || TREE_CODE (low_bound) != INTEGER_CST
6294 || !up_bound
6295 || TREE_CODE (up_bound) != INTEGER_CST
6296 || !el_sz
6297 || TREE_CODE (el_sz) != INTEGER_CST)
6298 return;
6299
6300 idx = mem_ref_offset (t);
6301 idx = idx.sdiv (tree_to_double_int (el_sz), TRUNC_DIV_EXPR);
6302 if (idx.slt (double_int_zero))
6303 {
6304 if (dump_file && (dump_flags & TDF_DETAILS))
6305 {
6306 fprintf (dump_file, "Array bound warning for ");
6307 dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
6308 fprintf (dump_file, "\n");
6309 }
6310 warning_at (location, OPT_Warray_bounds,
6311 "array subscript is below array bounds");
6312 TREE_NO_WARNING (t) = 1;
6313 }
6314 else if (idx.sgt (tree_to_double_int (up_bound)
6315 - tree_to_double_int (low_bound)
6316 + double_int_one))
6317 {
6318 if (dump_file && (dump_flags & TDF_DETAILS))
6319 {
6320 fprintf (dump_file, "Array bound warning for ");
6321 dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
6322 fprintf (dump_file, "\n");
6323 }
6324 warning_at (location, OPT_Warray_bounds,
6325 "array subscript is above array bounds");
6326 TREE_NO_WARNING (t) = 1;
6327 }
6328 }
6329 }
6330
6331 /* walk_tree() callback that checks if *TP is
6332 an ARRAY_REF inside an ADDR_EXPR (in which an array
6333 subscript one outside the valid range is allowed). Call
6334 check_array_ref for each ARRAY_REF found. The location is
6335 passed in DATA. */
6336
6337 static tree
6338 check_array_bounds (tree *tp, int *walk_subtree, void *data)
6339 {
6340 tree t = *tp;
6341 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
6342 location_t location;
6343
6344 if (EXPR_HAS_LOCATION (t))
6345 location = EXPR_LOCATION (t);
6346 else
6347 {
6348 location_t *locp = (location_t *) wi->info;
6349 location = *locp;
6350 }
6351
6352 *walk_subtree = TRUE;
6353
6354 if (TREE_CODE (t) == ARRAY_REF)
6355 check_array_ref (location, t, false /*ignore_off_by_one*/);
6356
6357 if (TREE_CODE (t) == MEM_REF
6358 || (TREE_CODE (t) == RETURN_EXPR && TREE_OPERAND (t, 0)))
6359 search_for_addr_array (TREE_OPERAND (t, 0), location);
6360
6361 if (TREE_CODE (t) == ADDR_EXPR)
6362 *walk_subtree = FALSE;
6363
6364 return NULL_TREE;
6365 }
6366
6367 /* Walk over all statements of all reachable BBs and call check_array_bounds
6368 on them. */
6369
6370 static void
6371 check_all_array_refs (void)
6372 {
6373 basic_block bb;
6374 gimple_stmt_iterator si;
6375
6376 FOR_EACH_BB (bb)
6377 {
6378 edge_iterator ei;
6379 edge e;
6380 bool executable = false;
6381
6382 /* Skip blocks that were found to be unreachable. */
6383 FOR_EACH_EDGE (e, ei, bb->preds)
6384 executable |= !!(e->flags & EDGE_EXECUTABLE);
6385 if (!executable)
6386 continue;
6387
6388 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
6389 {
6390 gimple stmt = gsi_stmt (si);
6391 struct walk_stmt_info wi;
6392 if (!gimple_has_location (stmt))
6393 continue;
6394
6395 if (is_gimple_call (stmt))
6396 {
6397 size_t i;
6398 size_t n = gimple_call_num_args (stmt);
6399 for (i = 0; i < n; i++)
6400 {
6401 tree arg = gimple_call_arg (stmt, i);
6402 search_for_addr_array (arg, gimple_location (stmt));
6403 }
6404 }
6405 else
6406 {
6407 memset (&wi, 0, sizeof (wi));
6408 wi.info = CONST_CAST (void *, (const void *)
6409 gimple_location_ptr (stmt));
6410
6411 walk_gimple_op (gsi_stmt (si),
6412 check_array_bounds,
6413 &wi);
6414 }
6415 }
6416 }
6417 }
6418
6419 /* Return true if all imm uses of VAR are either in STMT, or
6420 feed (optionally through a chain of single imm uses) GIMPLE_COND
6421 in basic block COND_BB. */
6422
6423 static bool
6424 all_imm_uses_in_stmt_or_feed_cond (tree var, gimple stmt, basic_block cond_bb)
6425 {
6426 use_operand_p use_p, use2_p;
6427 imm_use_iterator iter;
6428
6429 FOR_EACH_IMM_USE_FAST (use_p, iter, var)
6430 if (USE_STMT (use_p) != stmt)
6431 {
6432 gimple use_stmt = USE_STMT (use_p), use_stmt2;
6433 if (is_gimple_debug (use_stmt))
6434 continue;
6435 while (is_gimple_assign (use_stmt)
6436 && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
6437 && single_imm_use (gimple_assign_lhs (use_stmt),
6438 &use2_p, &use_stmt2))
6439 use_stmt = use_stmt2;
6440 if (gimple_code (use_stmt) != GIMPLE_COND
6441 || gimple_bb (use_stmt) != cond_bb)
6442 return false;
6443 }
6444 return true;
6445 }
6446
6447 /* Handle
6448 _4 = x_3 & 31;
6449 if (_4 != 0)
6450 goto <bb 6>;
6451 else
6452 goto <bb 7>;
6453 <bb 6>:
6454 __builtin_unreachable ();
6455 <bb 7>:
6456 x_5 = ASSERT_EXPR <x_3, ...>;
6457 If x_3 has no other immediate uses (checked by caller),
6458 var is the x_3 var from ASSERT_EXPR, we can clear low 5 bits
6459 from the non-zero bitmask. */
6460
6461 static void
6462 maybe_set_nonzero_bits (basic_block bb, tree var)
6463 {
6464 edge e = single_pred_edge (bb);
6465 basic_block cond_bb = e->src;
6466 gimple stmt = last_stmt (cond_bb);
6467 tree cst;
6468
6469 if (stmt == NULL
6470 || gimple_code (stmt) != GIMPLE_COND
6471 || gimple_cond_code (stmt) != ((e->flags & EDGE_TRUE_VALUE)
6472 ? EQ_EXPR : NE_EXPR)
6473 || TREE_CODE (gimple_cond_lhs (stmt)) != SSA_NAME
6474 || !integer_zerop (gimple_cond_rhs (stmt)))
6475 return;
6476
6477 stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (stmt));
6478 if (!is_gimple_assign (stmt)
6479 || gimple_assign_rhs_code (stmt) != BIT_AND_EXPR
6480 || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST)
6481 return;
6482 if (gimple_assign_rhs1 (stmt) != var)
6483 {
6484 gimple stmt2;
6485
6486 if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
6487 return;
6488 stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
6489 if (!gimple_assign_cast_p (stmt2)
6490 || gimple_assign_rhs1 (stmt2) != var
6491 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt2))
6492 || (TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (stmt)))
6493 != TYPE_PRECISION (TREE_TYPE (var))))
6494 return;
6495 }
6496 cst = gimple_assign_rhs2 (stmt);
6497 set_nonzero_bits (var, (get_nonzero_bits (var)
6498 & ~tree_to_double_int (cst)));
6499 }
6500
6501 /* Convert range assertion expressions into the implied copies and
6502 copy propagate away the copies. Doing the trivial copy propagation
6503 here avoids the need to run the full copy propagation pass after
6504 VRP.
6505
6506 FIXME, this will eventually lead to copy propagation removing the
6507 names that had useful range information attached to them. For
6508 instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>,
6509 then N_i will have the range [3, +INF].
6510
6511 However, by converting the assertion into the implied copy
6512 operation N_i = N_j, we will then copy-propagate N_j into the uses
6513 of N_i and lose the range information. We may want to hold on to
6514 ASSERT_EXPRs a little while longer as the ranges could be used in
6515 things like jump threading.
6516
6517 The problem with keeping ASSERT_EXPRs around is that passes after
6518 VRP need to handle them appropriately.
6519
6520 Another approach would be to make the range information a first
6521 class property of the SSA_NAME so that it can be queried from
6522 any pass. This is made somewhat more complex by the need for
6523 multiple ranges to be associated with one SSA_NAME. */
6524
6525 static void
6526 remove_range_assertions (void)
6527 {
6528 basic_block bb;
6529 gimple_stmt_iterator si;
6530 /* 1 if looking at ASSERT_EXPRs immediately at the beginning of
6531 a basic block preceeded by GIMPLE_COND branching to it and
6532 __builtin_trap, -1 if not yet checked, 0 otherwise. */
6533 int is_unreachable;
6534
6535 /* Note that the BSI iterator bump happens at the bottom of the
6536 loop and no bump is necessary if we're removing the statement
6537 referenced by the current BSI. */
6538 FOR_EACH_BB (bb)
6539 for (si = gsi_after_labels (bb), is_unreachable = -1; !gsi_end_p (si);)
6540 {
6541 gimple stmt = gsi_stmt (si);
6542 gimple use_stmt;
6543
6544 if (is_gimple_assign (stmt)
6545 && gimple_assign_rhs_code (stmt) == ASSERT_EXPR)
6546 {
6547 tree lhs = gimple_assign_lhs (stmt);
6548 tree rhs = gimple_assign_rhs1 (stmt);
6549 tree var;
6550 tree cond = fold (ASSERT_EXPR_COND (rhs));
6551 use_operand_p use_p;
6552 imm_use_iterator iter;
6553
6554 gcc_assert (cond != boolean_false_node);
6555
6556 var = ASSERT_EXPR_VAR (rhs);
6557 gcc_assert (TREE_CODE (var) == SSA_NAME);
6558
6559 if (!POINTER_TYPE_P (TREE_TYPE (lhs))
6560 && SSA_NAME_RANGE_INFO (lhs))
6561 {
6562 if (is_unreachable == -1)
6563 {
6564 is_unreachable = 0;
6565 if (single_pred_p (bb)
6566 && assert_unreachable_fallthru_edge_p
6567 (single_pred_edge (bb)))
6568 is_unreachable = 1;
6569 }
6570 /* Handle
6571 if (x_7 >= 10 && x_7 < 20)
6572 __builtin_unreachable ();
6573 x_8 = ASSERT_EXPR <x_7, ...>;
6574 if the only uses of x_7 are in the ASSERT_EXPR and
6575 in the condition. In that case, we can copy the
6576 range info from x_8 computed in this pass also
6577 for x_7. */
6578 if (is_unreachable
6579 && all_imm_uses_in_stmt_or_feed_cond (var, stmt,
6580 single_pred (bb)))
6581 {
6582 set_range_info (var, SSA_NAME_RANGE_INFO (lhs)->min,
6583 SSA_NAME_RANGE_INFO (lhs)->max);
6584 maybe_set_nonzero_bits (bb, var);
6585 }
6586 }
6587
6588 /* Propagate the RHS into every use of the LHS. */
6589 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
6590 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
6591 SET_USE (use_p, var);
6592
6593 /* And finally, remove the copy, it is not needed. */
6594 gsi_remove (&si, true);
6595 release_defs (stmt);
6596 }
6597 else
6598 {
6599 gsi_next (&si);
6600 is_unreachable = 0;
6601 }
6602 }
6603 }
6604
6605
6606 /* Return true if STMT is interesting for VRP. */
6607
6608 static bool
6609 stmt_interesting_for_vrp (gimple stmt)
6610 {
6611 if (gimple_code (stmt) == GIMPLE_PHI)
6612 {
6613 tree res = gimple_phi_result (stmt);
6614 return (!virtual_operand_p (res)
6615 && (INTEGRAL_TYPE_P (TREE_TYPE (res))
6616 || POINTER_TYPE_P (TREE_TYPE (res))));
6617 }
6618 else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
6619 {
6620 tree lhs = gimple_get_lhs (stmt);
6621
6622 /* In general, assignments with virtual operands are not useful
6623 for deriving ranges, with the obvious exception of calls to
6624 builtin functions. */
6625 if (lhs && TREE_CODE (lhs) == SSA_NAME
6626 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
6627 || POINTER_TYPE_P (TREE_TYPE (lhs)))
6628 && (is_gimple_call (stmt)
6629 || !gimple_vuse (stmt)))
6630 return true;
6631 }
6632 else if (gimple_code (stmt) == GIMPLE_COND
6633 || gimple_code (stmt) == GIMPLE_SWITCH)
6634 return true;
6635
6636 return false;
6637 }
6638
6639
6640 /* Initialize local data structures for VRP. */
6641
6642 static void
6643 vrp_initialize (void)
6644 {
6645 basic_block bb;
6646
6647 values_propagated = false;
6648 num_vr_values = num_ssa_names;
6649 vr_value = XCNEWVEC (value_range_t *, num_vr_values);
6650 vr_phi_edge_counts = XCNEWVEC (int, num_ssa_names);
6651
6652 FOR_EACH_BB (bb)
6653 {
6654 gimple_stmt_iterator si;
6655
6656 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
6657 {
6658 gimple phi = gsi_stmt (si);
6659 if (!stmt_interesting_for_vrp (phi))
6660 {
6661 tree lhs = PHI_RESULT (phi);
6662 set_value_range_to_varying (get_value_range (lhs));
6663 prop_set_simulate_again (phi, false);
6664 }
6665 else
6666 prop_set_simulate_again (phi, true);
6667 }
6668
6669 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
6670 {
6671 gimple stmt = gsi_stmt (si);
6672
6673 /* If the statement is a control insn, then we do not
6674 want to avoid simulating the statement once. Failure
6675 to do so means that those edges will never get added. */
6676 if (stmt_ends_bb_p (stmt))
6677 prop_set_simulate_again (stmt, true);
6678 else if (!stmt_interesting_for_vrp (stmt))
6679 {
6680 ssa_op_iter i;
6681 tree def;
6682 FOR_EACH_SSA_TREE_OPERAND (def, stmt, i, SSA_OP_DEF)
6683 set_value_range_to_varying (get_value_range (def));
6684 prop_set_simulate_again (stmt, false);
6685 }
6686 else
6687 prop_set_simulate_again (stmt, true);
6688 }
6689 }
6690 }
6691
6692 /* Return the singleton value-range for NAME or NAME. */
6693
6694 static inline tree
6695 vrp_valueize (tree name)
6696 {
6697 if (TREE_CODE (name) == SSA_NAME)
6698 {
6699 value_range_t *vr = get_value_range (name);
6700 if (vr->type == VR_RANGE
6701 && (vr->min == vr->max
6702 || operand_equal_p (vr->min, vr->max, 0)))
6703 return vr->min;
6704 }
6705 return name;
6706 }
6707
6708 /* Visit assignment STMT. If it produces an interesting range, record
6709 the SSA name in *OUTPUT_P. */
6710
6711 static enum ssa_prop_result
6712 vrp_visit_assignment_or_call (gimple stmt, tree *output_p)
6713 {
6714 tree def, lhs;
6715 ssa_op_iter iter;
6716 enum gimple_code code = gimple_code (stmt);
6717 lhs = gimple_get_lhs (stmt);
6718
6719 /* We only keep track of ranges in integral and pointer types. */
6720 if (TREE_CODE (lhs) == SSA_NAME
6721 && ((INTEGRAL_TYPE_P (TREE_TYPE (lhs))
6722 /* It is valid to have NULL MIN/MAX values on a type. See
6723 build_range_type. */
6724 && TYPE_MIN_VALUE (TREE_TYPE (lhs))
6725 && TYPE_MAX_VALUE (TREE_TYPE (lhs)))
6726 || POINTER_TYPE_P (TREE_TYPE (lhs))))
6727 {
6728 value_range_t new_vr = VR_INITIALIZER;
6729
6730 /* Try folding the statement to a constant first. */
6731 tree tem = gimple_fold_stmt_to_constant (stmt, vrp_valueize);
6732 if (tem && !is_overflow_infinity (tem))
6733 set_value_range (&new_vr, VR_RANGE, tem, tem, NULL);
6734 /* Then dispatch to value-range extracting functions. */
6735 else if (code == GIMPLE_CALL)
6736 extract_range_basic (&new_vr, stmt);
6737 else
6738 extract_range_from_assignment (&new_vr, stmt);
6739
6740 if (update_value_range (lhs, &new_vr))
6741 {
6742 *output_p = lhs;
6743
6744 if (dump_file && (dump_flags & TDF_DETAILS))
6745 {
6746 fprintf (dump_file, "Found new range for ");
6747 print_generic_expr (dump_file, lhs, 0);
6748 fprintf (dump_file, ": ");
6749 dump_value_range (dump_file, &new_vr);
6750 fprintf (dump_file, "\n\n");
6751 }
6752
6753 if (new_vr.type == VR_VARYING)
6754 return SSA_PROP_VARYING;
6755
6756 return SSA_PROP_INTERESTING;
6757 }
6758
6759 return SSA_PROP_NOT_INTERESTING;
6760 }
6761
6762 /* Every other statement produces no useful ranges. */
6763 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_DEF)
6764 set_value_range_to_varying (get_value_range (def));
6765
6766 return SSA_PROP_VARYING;
6767 }
6768
6769 /* Helper that gets the value range of the SSA_NAME with version I
6770 or a symbolic range containing the SSA_NAME only if the value range
6771 is varying or undefined. */
6772
6773 static inline value_range_t
6774 get_vr_for_comparison (int i)
6775 {
6776 value_range_t vr = *get_value_range (ssa_name (i));
6777
6778 /* If name N_i does not have a valid range, use N_i as its own
6779 range. This allows us to compare against names that may
6780 have N_i in their ranges. */
6781 if (vr.type == VR_VARYING || vr.type == VR_UNDEFINED)
6782 {
6783 vr.type = VR_RANGE;
6784 vr.min = ssa_name (i);
6785 vr.max = ssa_name (i);
6786 }
6787
6788 return vr;
6789 }
6790
6791 /* Compare all the value ranges for names equivalent to VAR with VAL
6792 using comparison code COMP. Return the same value returned by
6793 compare_range_with_value, including the setting of
6794 *STRICT_OVERFLOW_P. */
6795
6796 static tree
6797 compare_name_with_value (enum tree_code comp, tree var, tree val,
6798 bool *strict_overflow_p)
6799 {
6800 bitmap_iterator bi;
6801 unsigned i;
6802 bitmap e;
6803 tree retval, t;
6804 int used_strict_overflow;
6805 bool sop;
6806 value_range_t equiv_vr;
6807
6808 /* Get the set of equivalences for VAR. */
6809 e = get_value_range (var)->equiv;
6810
6811 /* Start at -1. Set it to 0 if we do a comparison without relying
6812 on overflow, or 1 if all comparisons rely on overflow. */
6813 used_strict_overflow = -1;
6814
6815 /* Compare vars' value range with val. */
6816 equiv_vr = get_vr_for_comparison (SSA_NAME_VERSION (var));
6817 sop = false;
6818 retval = compare_range_with_value (comp, &equiv_vr, val, &sop);
6819 if (retval)
6820 used_strict_overflow = sop ? 1 : 0;
6821
6822 /* If the equiv set is empty we have done all work we need to do. */
6823 if (e == NULL)
6824 {
6825 if (retval
6826 && used_strict_overflow > 0)
6827 *strict_overflow_p = true;
6828 return retval;
6829 }
6830
6831 EXECUTE_IF_SET_IN_BITMAP (e, 0, i, bi)
6832 {
6833 equiv_vr = get_vr_for_comparison (i);
6834 sop = false;
6835 t = compare_range_with_value (comp, &equiv_vr, val, &sop);
6836 if (t)
6837 {
6838 /* If we get different answers from different members
6839 of the equivalence set this check must be in a dead
6840 code region. Folding it to a trap representation
6841 would be correct here. For now just return don't-know. */
6842 if (retval != NULL
6843 && t != retval)
6844 {
6845 retval = NULL_TREE;
6846 break;
6847 }
6848 retval = t;
6849
6850 if (!sop)
6851 used_strict_overflow = 0;
6852 else if (used_strict_overflow < 0)
6853 used_strict_overflow = 1;
6854 }
6855 }
6856
6857 if (retval
6858 && used_strict_overflow > 0)
6859 *strict_overflow_p = true;
6860
6861 return retval;
6862 }
6863
6864
6865 /* Given a comparison code COMP and names N1 and N2, compare all the
6866 ranges equivalent to N1 against all the ranges equivalent to N2
6867 to determine the value of N1 COMP N2. Return the same value
6868 returned by compare_ranges. Set *STRICT_OVERFLOW_P to indicate
6869 whether we relied on an overflow infinity in the comparison. */
6870
6871
6872 static tree
6873 compare_names (enum tree_code comp, tree n1, tree n2,
6874 bool *strict_overflow_p)
6875 {
6876 tree t, retval;
6877 bitmap e1, e2;
6878 bitmap_iterator bi1, bi2;
6879 unsigned i1, i2;
6880 int used_strict_overflow;
6881 static bitmap_obstack *s_obstack = NULL;
6882 static bitmap s_e1 = NULL, s_e2 = NULL;
6883
6884 /* Compare the ranges of every name equivalent to N1 against the
6885 ranges of every name equivalent to N2. */
6886 e1 = get_value_range (n1)->equiv;
6887 e2 = get_value_range (n2)->equiv;
6888
6889 /* Use the fake bitmaps if e1 or e2 are not available. */
6890 if (s_obstack == NULL)
6891 {
6892 s_obstack = XNEW (bitmap_obstack);
6893 bitmap_obstack_initialize (s_obstack);
6894 s_e1 = BITMAP_ALLOC (s_obstack);
6895 s_e2 = BITMAP_ALLOC (s_obstack);
6896 }
6897 if (e1 == NULL)
6898 e1 = s_e1;
6899 if (e2 == NULL)
6900 e2 = s_e2;
6901
6902 /* Add N1 and N2 to their own set of equivalences to avoid
6903 duplicating the body of the loop just to check N1 and N2
6904 ranges. */
6905 bitmap_set_bit (e1, SSA_NAME_VERSION (n1));
6906 bitmap_set_bit (e2, SSA_NAME_VERSION (n2));
6907
6908 /* If the equivalence sets have a common intersection, then the two
6909 names can be compared without checking their ranges. */
6910 if (bitmap_intersect_p (e1, e2))
6911 {
6912 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
6913 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
6914
6915 return (comp == EQ_EXPR || comp == GE_EXPR || comp == LE_EXPR)
6916 ? boolean_true_node
6917 : boolean_false_node;
6918 }
6919
6920 /* Start at -1. Set it to 0 if we do a comparison without relying
6921 on overflow, or 1 if all comparisons rely on overflow. */
6922 used_strict_overflow = -1;
6923
6924 /* Otherwise, compare all the equivalent ranges. First, add N1 and
6925 N2 to their own set of equivalences to avoid duplicating the body
6926 of the loop just to check N1 and N2 ranges. */
6927 EXECUTE_IF_SET_IN_BITMAP (e1, 0, i1, bi1)
6928 {
6929 value_range_t vr1 = get_vr_for_comparison (i1);
6930
6931 t = retval = NULL_TREE;
6932 EXECUTE_IF_SET_IN_BITMAP (e2, 0, i2, bi2)
6933 {
6934 bool sop = false;
6935
6936 value_range_t vr2 = get_vr_for_comparison (i2);
6937
6938 t = compare_ranges (comp, &vr1, &vr2, &sop);
6939 if (t)
6940 {
6941 /* If we get different answers from different members
6942 of the equivalence set this check must be in a dead
6943 code region. Folding it to a trap representation
6944 would be correct here. For now just return don't-know. */
6945 if (retval != NULL
6946 && t != retval)
6947 {
6948 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
6949 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
6950 return NULL_TREE;
6951 }
6952 retval = t;
6953
6954 if (!sop)
6955 used_strict_overflow = 0;
6956 else if (used_strict_overflow < 0)
6957 used_strict_overflow = 1;
6958 }
6959 }
6960
6961 if (retval)
6962 {
6963 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
6964 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
6965 if (used_strict_overflow > 0)
6966 *strict_overflow_p = true;
6967 return retval;
6968 }
6969 }
6970
6971 /* None of the equivalent ranges are useful in computing this
6972 comparison. */
6973 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
6974 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
6975 return NULL_TREE;
6976 }
6977
6978 /* Helper function for vrp_evaluate_conditional_warnv. */
6979
6980 static tree
6981 vrp_evaluate_conditional_warnv_with_ops_using_ranges (enum tree_code code,
6982 tree op0, tree op1,
6983 bool * strict_overflow_p)
6984 {
6985 value_range_t *vr0, *vr1;
6986
6987 vr0 = (TREE_CODE (op0) == SSA_NAME) ? get_value_range (op0) : NULL;
6988 vr1 = (TREE_CODE (op1) == SSA_NAME) ? get_value_range (op1) : NULL;
6989
6990 if (vr0 && vr1)
6991 return compare_ranges (code, vr0, vr1, strict_overflow_p);
6992 else if (vr0 && vr1 == NULL)
6993 return compare_range_with_value (code, vr0, op1, strict_overflow_p);
6994 else if (vr0 == NULL && vr1)
6995 return (compare_range_with_value
6996 (swap_tree_comparison (code), vr1, op0, strict_overflow_p));
6997 return NULL;
6998 }
6999
7000 /* Helper function for vrp_evaluate_conditional_warnv. */
7001
7002 static tree
7003 vrp_evaluate_conditional_warnv_with_ops (enum tree_code code, tree op0,
7004 tree op1, bool use_equiv_p,
7005 bool *strict_overflow_p, bool *only_ranges)
7006 {
7007 tree ret;
7008 if (only_ranges)
7009 *only_ranges = true;
7010
7011 /* We only deal with integral and pointer types. */
7012 if (!INTEGRAL_TYPE_P (TREE_TYPE (op0))
7013 && !POINTER_TYPE_P (TREE_TYPE (op0)))
7014 return NULL_TREE;
7015
7016 if (use_equiv_p)
7017 {
7018 if (only_ranges
7019 && (ret = vrp_evaluate_conditional_warnv_with_ops_using_ranges
7020 (code, op0, op1, strict_overflow_p)))
7021 return ret;
7022 *only_ranges = false;
7023 if (TREE_CODE (op0) == SSA_NAME && TREE_CODE (op1) == SSA_NAME)
7024 return compare_names (code, op0, op1, strict_overflow_p);
7025 else if (TREE_CODE (op0) == SSA_NAME)
7026 return compare_name_with_value (code, op0, op1, strict_overflow_p);
7027 else if (TREE_CODE (op1) == SSA_NAME)
7028 return (compare_name_with_value
7029 (swap_tree_comparison (code), op1, op0, strict_overflow_p));
7030 }
7031 else
7032 return vrp_evaluate_conditional_warnv_with_ops_using_ranges (code, op0, op1,
7033 strict_overflow_p);
7034 return NULL_TREE;
7035 }
7036
7037 /* Given (CODE OP0 OP1) within STMT, try to simplify it based on value range
7038 information. Return NULL if the conditional can not be evaluated.
7039 The ranges of all the names equivalent with the operands in COND
7040 will be used when trying to compute the value. If the result is
7041 based on undefined signed overflow, issue a warning if
7042 appropriate. */
7043
7044 static tree
7045 vrp_evaluate_conditional (enum tree_code code, tree op0, tree op1, gimple stmt)
7046 {
7047 bool sop;
7048 tree ret;
7049 bool only_ranges;
7050
7051 /* Some passes and foldings leak constants with overflow flag set
7052 into the IL. Avoid doing wrong things with these and bail out. */
7053 if ((TREE_CODE (op0) == INTEGER_CST
7054 && TREE_OVERFLOW (op0))
7055 || (TREE_CODE (op1) == INTEGER_CST
7056 && TREE_OVERFLOW (op1)))
7057 return NULL_TREE;
7058
7059 sop = false;
7060 ret = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, true, &sop,
7061 &only_ranges);
7062
7063 if (ret && sop)
7064 {
7065 enum warn_strict_overflow_code wc;
7066 const char* warnmsg;
7067
7068 if (is_gimple_min_invariant (ret))
7069 {
7070 wc = WARN_STRICT_OVERFLOW_CONDITIONAL;
7071 warnmsg = G_("assuming signed overflow does not occur when "
7072 "simplifying conditional to constant");
7073 }
7074 else
7075 {
7076 wc = WARN_STRICT_OVERFLOW_COMPARISON;
7077 warnmsg = G_("assuming signed overflow does not occur when "
7078 "simplifying conditional");
7079 }
7080
7081 if (issue_strict_overflow_warning (wc))
7082 {
7083 location_t location;
7084
7085 if (!gimple_has_location (stmt))
7086 location = input_location;
7087 else
7088 location = gimple_location (stmt);
7089 warning_at (location, OPT_Wstrict_overflow, "%s", warnmsg);
7090 }
7091 }
7092
7093 if (warn_type_limits
7094 && ret && only_ranges
7095 && TREE_CODE_CLASS (code) == tcc_comparison
7096 && TREE_CODE (op0) == SSA_NAME)
7097 {
7098 /* If the comparison is being folded and the operand on the LHS
7099 is being compared against a constant value that is outside of
7100 the natural range of OP0's type, then the predicate will
7101 always fold regardless of the value of OP0. If -Wtype-limits
7102 was specified, emit a warning. */
7103 tree type = TREE_TYPE (op0);
7104 value_range_t *vr0 = get_value_range (op0);
7105
7106 if (vr0->type != VR_VARYING
7107 && INTEGRAL_TYPE_P (type)
7108 && vrp_val_is_min (vr0->min)
7109 && vrp_val_is_max (vr0->max)
7110 && is_gimple_min_invariant (op1))
7111 {
7112 location_t location;
7113
7114 if (!gimple_has_location (stmt))
7115 location = input_location;
7116 else
7117 location = gimple_location (stmt);
7118
7119 warning_at (location, OPT_Wtype_limits,
7120 integer_zerop (ret)
7121 ? G_("comparison always false "
7122 "due to limited range of data type")
7123 : G_("comparison always true "
7124 "due to limited range of data type"));
7125 }
7126 }
7127
7128 return ret;
7129 }
7130
7131
7132 /* Visit conditional statement STMT. If we can determine which edge
7133 will be taken out of STMT's basic block, record it in
7134 *TAKEN_EDGE_P and return SSA_PROP_INTERESTING. Otherwise, return
7135 SSA_PROP_VARYING. */
7136
7137 static enum ssa_prop_result
7138 vrp_visit_cond_stmt (gimple stmt, edge *taken_edge_p)
7139 {
7140 tree val;
7141 bool sop;
7142
7143 *taken_edge_p = NULL;
7144
7145 if (dump_file && (dump_flags & TDF_DETAILS))
7146 {
7147 tree use;
7148 ssa_op_iter i;
7149
7150 fprintf (dump_file, "\nVisiting conditional with predicate: ");
7151 print_gimple_stmt (dump_file, stmt, 0, 0);
7152 fprintf (dump_file, "\nWith known ranges\n");
7153
7154 FOR_EACH_SSA_TREE_OPERAND (use, stmt, i, SSA_OP_USE)
7155 {
7156 fprintf (dump_file, "\t");
7157 print_generic_expr (dump_file, use, 0);
7158 fprintf (dump_file, ": ");
7159 dump_value_range (dump_file, vr_value[SSA_NAME_VERSION (use)]);
7160 }
7161
7162 fprintf (dump_file, "\n");
7163 }
7164
7165 /* Compute the value of the predicate COND by checking the known
7166 ranges of each of its operands.
7167
7168 Note that we cannot evaluate all the equivalent ranges here
7169 because those ranges may not yet be final and with the current
7170 propagation strategy, we cannot determine when the value ranges
7171 of the names in the equivalence set have changed.
7172
7173 For instance, given the following code fragment
7174
7175 i_5 = PHI <8, i_13>
7176 ...
7177 i_14 = ASSERT_EXPR <i_5, i_5 != 0>
7178 if (i_14 == 1)
7179 ...
7180
7181 Assume that on the first visit to i_14, i_5 has the temporary
7182 range [8, 8] because the second argument to the PHI function is
7183 not yet executable. We derive the range ~[0, 0] for i_14 and the
7184 equivalence set { i_5 }. So, when we visit 'if (i_14 == 1)' for
7185 the first time, since i_14 is equivalent to the range [8, 8], we
7186 determine that the predicate is always false.
7187
7188 On the next round of propagation, i_13 is determined to be
7189 VARYING, which causes i_5 to drop down to VARYING. So, another
7190 visit to i_14 is scheduled. In this second visit, we compute the
7191 exact same range and equivalence set for i_14, namely ~[0, 0] and
7192 { i_5 }. But we did not have the previous range for i_5
7193 registered, so vrp_visit_assignment thinks that the range for
7194 i_14 has not changed. Therefore, the predicate 'if (i_14 == 1)'
7195 is not visited again, which stops propagation from visiting
7196 statements in the THEN clause of that if().
7197
7198 To properly fix this we would need to keep the previous range
7199 value for the names in the equivalence set. This way we would've
7200 discovered that from one visit to the other i_5 changed from
7201 range [8, 8] to VR_VARYING.
7202
7203 However, fixing this apparent limitation may not be worth the
7204 additional checking. Testing on several code bases (GCC, DLV,
7205 MICO, TRAMP3D and SPEC2000) showed that doing this results in
7206 4 more predicates folded in SPEC. */
7207 sop = false;
7208
7209 val = vrp_evaluate_conditional_warnv_with_ops (gimple_cond_code (stmt),
7210 gimple_cond_lhs (stmt),
7211 gimple_cond_rhs (stmt),
7212 false, &sop, NULL);
7213 if (val)
7214 {
7215 if (!sop)
7216 *taken_edge_p = find_taken_edge (gimple_bb (stmt), val);
7217 else
7218 {
7219 if (dump_file && (dump_flags & TDF_DETAILS))
7220 fprintf (dump_file,
7221 "\nIgnoring predicate evaluation because "
7222 "it assumes that signed overflow is undefined");
7223 val = NULL_TREE;
7224 }
7225 }
7226
7227 if (dump_file && (dump_flags & TDF_DETAILS))
7228 {
7229 fprintf (dump_file, "\nPredicate evaluates to: ");
7230 if (val == NULL_TREE)
7231 fprintf (dump_file, "DON'T KNOW\n");
7232 else
7233 print_generic_stmt (dump_file, val, 0);
7234 }
7235
7236 return (*taken_edge_p) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING;
7237 }
7238
7239 /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL
7240 that includes the value VAL. The search is restricted to the range
7241 [START_IDX, n - 1] where n is the size of VEC.
7242
7243 If there is a CASE_LABEL for VAL, its index is placed in IDX and true is
7244 returned.
7245
7246 If there is no CASE_LABEL for VAL and there is one that is larger than VAL,
7247 it is placed in IDX and false is returned.
7248
7249 If VAL is larger than any CASE_LABEL, n is placed on IDX and false is
7250 returned. */
7251
7252 static bool
7253 find_case_label_index (gimple stmt, size_t start_idx, tree val, size_t *idx)
7254 {
7255 size_t n = gimple_switch_num_labels (stmt);
7256 size_t low, high;
7257
7258 /* Find case label for minimum of the value range or the next one.
7259 At each iteration we are searching in [low, high - 1]. */
7260
7261 for (low = start_idx, high = n; high != low; )
7262 {
7263 tree t;
7264 int cmp;
7265 /* Note that i != high, so we never ask for n. */
7266 size_t i = (high + low) / 2;
7267 t = gimple_switch_label (stmt, i);
7268
7269 /* Cache the result of comparing CASE_LOW and val. */
7270 cmp = tree_int_cst_compare (CASE_LOW (t), val);
7271
7272 if (cmp == 0)
7273 {
7274 /* Ranges cannot be empty. */
7275 *idx = i;
7276 return true;
7277 }
7278 else if (cmp > 0)
7279 high = i;
7280 else
7281 {
7282 low = i + 1;
7283 if (CASE_HIGH (t) != NULL
7284 && tree_int_cst_compare (CASE_HIGH (t), val) >= 0)
7285 {
7286 *idx = i;
7287 return true;
7288 }
7289 }
7290 }
7291
7292 *idx = high;
7293 return false;
7294 }
7295
7296 /* Searches the case label vector VEC for the range of CASE_LABELs that is used
7297 for values between MIN and MAX. The first index is placed in MIN_IDX. The
7298 last index is placed in MAX_IDX. If the range of CASE_LABELs is empty
7299 then MAX_IDX < MIN_IDX.
7300 Returns true if the default label is not needed. */
7301
7302 static bool
7303 find_case_label_range (gimple stmt, tree min, tree max, size_t *min_idx,
7304 size_t *max_idx)
7305 {
7306 size_t i, j;
7307 bool min_take_default = !find_case_label_index (stmt, 1, min, &i);
7308 bool max_take_default = !find_case_label_index (stmt, i, max, &j);
7309
7310 if (i == j
7311 && min_take_default
7312 && max_take_default)
7313 {
7314 /* Only the default case label reached.
7315 Return an empty range. */
7316 *min_idx = 1;
7317 *max_idx = 0;
7318 return false;
7319 }
7320 else
7321 {
7322 bool take_default = min_take_default || max_take_default;
7323 tree low, high;
7324 size_t k;
7325
7326 if (max_take_default)
7327 j--;
7328
7329 /* If the case label range is continuous, we do not need
7330 the default case label. Verify that. */
7331 high = CASE_LOW (gimple_switch_label (stmt, i));
7332 if (CASE_HIGH (gimple_switch_label (stmt, i)))
7333 high = CASE_HIGH (gimple_switch_label (stmt, i));
7334 for (k = i + 1; k <= j; ++k)
7335 {
7336 low = CASE_LOW (gimple_switch_label (stmt, k));
7337 if (!integer_onep (int_const_binop (MINUS_EXPR, low, high)))
7338 {
7339 take_default = true;
7340 break;
7341 }
7342 high = low;
7343 if (CASE_HIGH (gimple_switch_label (stmt, k)))
7344 high = CASE_HIGH (gimple_switch_label (stmt, k));
7345 }
7346
7347 *min_idx = i;
7348 *max_idx = j;
7349 return !take_default;
7350 }
7351 }
7352
7353 /* Searches the case label vector VEC for the ranges of CASE_LABELs that are
7354 used in range VR. The indices are placed in MIN_IDX1, MAX_IDX, MIN_IDX2 and
7355 MAX_IDX2. If the ranges of CASE_LABELs are empty then MAX_IDX1 < MIN_IDX1.
7356 Returns true if the default label is not needed. */
7357
7358 static bool
7359 find_case_label_ranges (gimple stmt, value_range_t *vr, size_t *min_idx1,
7360 size_t *max_idx1, size_t *min_idx2,
7361 size_t *max_idx2)
7362 {
7363 size_t i, j, k, l;
7364 unsigned int n = gimple_switch_num_labels (stmt);
7365 bool take_default;
7366 tree case_low, case_high;
7367 tree min = vr->min, max = vr->max;
7368
7369 gcc_checking_assert (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE);
7370
7371 take_default = !find_case_label_range (stmt, min, max, &i, &j);
7372
7373 /* Set second range to emtpy. */
7374 *min_idx2 = 1;
7375 *max_idx2 = 0;
7376
7377 if (vr->type == VR_RANGE)
7378 {
7379 *min_idx1 = i;
7380 *max_idx1 = j;
7381 return !take_default;
7382 }
7383
7384 /* Set first range to all case labels. */
7385 *min_idx1 = 1;
7386 *max_idx1 = n - 1;
7387
7388 if (i > j)
7389 return false;
7390
7391 /* Make sure all the values of case labels [i , j] are contained in
7392 range [MIN, MAX]. */
7393 case_low = CASE_LOW (gimple_switch_label (stmt, i));
7394 case_high = CASE_HIGH (gimple_switch_label (stmt, j));
7395 if (tree_int_cst_compare (case_low, min) < 0)
7396 i += 1;
7397 if (case_high != NULL_TREE
7398 && tree_int_cst_compare (max, case_high) < 0)
7399 j -= 1;
7400
7401 if (i > j)
7402 return false;
7403
7404 /* If the range spans case labels [i, j], the corresponding anti-range spans
7405 the labels [1, i - 1] and [j + 1, n - 1]. */
7406 k = j + 1;
7407 l = n - 1;
7408 if (k > l)
7409 {
7410 k = 1;
7411 l = 0;
7412 }
7413
7414 j = i - 1;
7415 i = 1;
7416 if (i > j)
7417 {
7418 i = k;
7419 j = l;
7420 k = 1;
7421 l = 0;
7422 }
7423
7424 *min_idx1 = i;
7425 *max_idx1 = j;
7426 *min_idx2 = k;
7427 *max_idx2 = l;
7428 return false;
7429 }
7430
7431 /* Visit switch statement STMT. If we can determine which edge
7432 will be taken out of STMT's basic block, record it in
7433 *TAKEN_EDGE_P and return SSA_PROP_INTERESTING. Otherwise, return
7434 SSA_PROP_VARYING. */
7435
7436 static enum ssa_prop_result
7437 vrp_visit_switch_stmt (gimple stmt, edge *taken_edge_p)
7438 {
7439 tree op, val;
7440 value_range_t *vr;
7441 size_t i = 0, j = 0, k, l;
7442 bool take_default;
7443
7444 *taken_edge_p = NULL;
7445 op = gimple_switch_index (stmt);
7446 if (TREE_CODE (op) != SSA_NAME)
7447 return SSA_PROP_VARYING;
7448
7449 vr = get_value_range (op);
7450 if (dump_file && (dump_flags & TDF_DETAILS))
7451 {
7452 fprintf (dump_file, "\nVisiting switch expression with operand ");
7453 print_generic_expr (dump_file, op, 0);
7454 fprintf (dump_file, " with known range ");
7455 dump_value_range (dump_file, vr);
7456 fprintf (dump_file, "\n");
7457 }
7458
7459 if ((vr->type != VR_RANGE
7460 && vr->type != VR_ANTI_RANGE)
7461 || symbolic_range_p (vr))
7462 return SSA_PROP_VARYING;
7463
7464 /* Find the single edge that is taken from the switch expression. */
7465 take_default = !find_case_label_ranges (stmt, vr, &i, &j, &k, &l);
7466
7467 /* Check if the range spans no CASE_LABEL. If so, we only reach the default
7468 label */
7469 if (j < i)
7470 {
7471 gcc_assert (take_default);
7472 val = gimple_switch_default_label (stmt);
7473 }
7474 else
7475 {
7476 /* Check if labels with index i to j and maybe the default label
7477 are all reaching the same label. */
7478
7479 val = gimple_switch_label (stmt, i);
7480 if (take_default
7481 && CASE_LABEL (gimple_switch_default_label (stmt))
7482 != CASE_LABEL (val))
7483 {
7484 if (dump_file && (dump_flags & TDF_DETAILS))
7485 fprintf (dump_file, " not a single destination for this "
7486 "range\n");
7487 return SSA_PROP_VARYING;
7488 }
7489 for (++i; i <= j; ++i)
7490 {
7491 if (CASE_LABEL (gimple_switch_label (stmt, i)) != CASE_LABEL (val))
7492 {
7493 if (dump_file && (dump_flags & TDF_DETAILS))
7494 fprintf (dump_file, " not a single destination for this "
7495 "range\n");
7496 return SSA_PROP_VARYING;
7497 }
7498 }
7499 for (; k <= l; ++k)
7500 {
7501 if (CASE_LABEL (gimple_switch_label (stmt, k)) != CASE_LABEL (val))
7502 {
7503 if (dump_file && (dump_flags & TDF_DETAILS))
7504 fprintf (dump_file, " not a single destination for this "
7505 "range\n");
7506 return SSA_PROP_VARYING;
7507 }
7508 }
7509 }
7510
7511 *taken_edge_p = find_edge (gimple_bb (stmt),
7512 label_to_block (CASE_LABEL (val)));
7513
7514 if (dump_file && (dump_flags & TDF_DETAILS))
7515 {
7516 fprintf (dump_file, " will take edge to ");
7517 print_generic_stmt (dump_file, CASE_LABEL (val), 0);
7518 }
7519
7520 return SSA_PROP_INTERESTING;
7521 }
7522
7523
7524 /* Evaluate statement STMT. If the statement produces a useful range,
7525 return SSA_PROP_INTERESTING and record the SSA name with the
7526 interesting range into *OUTPUT_P.
7527
7528 If STMT is a conditional branch and we can determine its truth
7529 value, the taken edge is recorded in *TAKEN_EDGE_P.
7530
7531 If STMT produces a varying value, return SSA_PROP_VARYING. */
7532
7533 static enum ssa_prop_result
7534 vrp_visit_stmt (gimple stmt, edge *taken_edge_p, tree *output_p)
7535 {
7536 tree def;
7537 ssa_op_iter iter;
7538
7539 if (dump_file && (dump_flags & TDF_DETAILS))
7540 {
7541 fprintf (dump_file, "\nVisiting statement:\n");
7542 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
7543 fprintf (dump_file, "\n");
7544 }
7545
7546 if (!stmt_interesting_for_vrp (stmt))
7547 gcc_assert (stmt_ends_bb_p (stmt));
7548 else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
7549 return vrp_visit_assignment_or_call (stmt, output_p);
7550 else if (gimple_code (stmt) == GIMPLE_COND)
7551 return vrp_visit_cond_stmt (stmt, taken_edge_p);
7552 else if (gimple_code (stmt) == GIMPLE_SWITCH)
7553 return vrp_visit_switch_stmt (stmt, taken_edge_p);
7554
7555 /* All other statements produce nothing of interest for VRP, so mark
7556 their outputs varying and prevent further simulation. */
7557 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_DEF)
7558 set_value_range_to_varying (get_value_range (def));
7559
7560 return SSA_PROP_VARYING;
7561 }
7562
7563 /* Union the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
7564 { VR1TYPE, VR0MIN, VR0MAX } and store the result
7565 in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest
7566 possible such range. The resulting range is not canonicalized. */
7567
7568 static void
7569 union_ranges (enum value_range_type *vr0type,
7570 tree *vr0min, tree *vr0max,
7571 enum value_range_type vr1type,
7572 tree vr1min, tree vr1max)
7573 {
7574 bool mineq = operand_equal_p (*vr0min, vr1min, 0);
7575 bool maxeq = operand_equal_p (*vr0max, vr1max, 0);
7576
7577 /* [] is vr0, () is vr1 in the following classification comments. */
7578 if (mineq && maxeq)
7579 {
7580 /* [( )] */
7581 if (*vr0type == vr1type)
7582 /* Nothing to do for equal ranges. */
7583 ;
7584 else if ((*vr0type == VR_RANGE
7585 && vr1type == VR_ANTI_RANGE)
7586 || (*vr0type == VR_ANTI_RANGE
7587 && vr1type == VR_RANGE))
7588 {
7589 /* For anti-range with range union the result is varying. */
7590 goto give_up;
7591 }
7592 else
7593 gcc_unreachable ();
7594 }
7595 else if (operand_less_p (*vr0max, vr1min) == 1
7596 || operand_less_p (vr1max, *vr0min) == 1)
7597 {
7598 /* [ ] ( ) or ( ) [ ]
7599 If the ranges have an empty intersection, result of the union
7600 operation is the anti-range or if both are anti-ranges
7601 it covers all. */
7602 if (*vr0type == VR_ANTI_RANGE
7603 && vr1type == VR_ANTI_RANGE)
7604 goto give_up;
7605 else if (*vr0type == VR_ANTI_RANGE
7606 && vr1type == VR_RANGE)
7607 ;
7608 else if (*vr0type == VR_RANGE
7609 && vr1type == VR_ANTI_RANGE)
7610 {
7611 *vr0type = vr1type;
7612 *vr0min = vr1min;
7613 *vr0max = vr1max;
7614 }
7615 else if (*vr0type == VR_RANGE
7616 && vr1type == VR_RANGE)
7617 {
7618 /* The result is the convex hull of both ranges. */
7619 if (operand_less_p (*vr0max, vr1min) == 1)
7620 {
7621 /* If the result can be an anti-range, create one. */
7622 if (TREE_CODE (*vr0max) == INTEGER_CST
7623 && TREE_CODE (vr1min) == INTEGER_CST
7624 && vrp_val_is_min (*vr0min)
7625 && vrp_val_is_max (vr1max))
7626 {
7627 tree min = int_const_binop (PLUS_EXPR,
7628 *vr0max, integer_one_node);
7629 tree max = int_const_binop (MINUS_EXPR,
7630 vr1min, integer_one_node);
7631 if (!operand_less_p (max, min))
7632 {
7633 *vr0type = VR_ANTI_RANGE;
7634 *vr0min = min;
7635 *vr0max = max;
7636 }
7637 else
7638 *vr0max = vr1max;
7639 }
7640 else
7641 *vr0max = vr1max;
7642 }
7643 else
7644 {
7645 /* If the result can be an anti-range, create one. */
7646 if (TREE_CODE (vr1max) == INTEGER_CST
7647 && TREE_CODE (*vr0min) == INTEGER_CST
7648 && vrp_val_is_min (vr1min)
7649 && vrp_val_is_max (*vr0max))
7650 {
7651 tree min = int_const_binop (PLUS_EXPR,
7652 vr1max, integer_one_node);
7653 tree max = int_const_binop (MINUS_EXPR,
7654 *vr0min, integer_one_node);
7655 if (!operand_less_p (max, min))
7656 {
7657 *vr0type = VR_ANTI_RANGE;
7658 *vr0min = min;
7659 *vr0max = max;
7660 }
7661 else
7662 *vr0min = vr1min;
7663 }
7664 else
7665 *vr0min = vr1min;
7666 }
7667 }
7668 else
7669 gcc_unreachable ();
7670 }
7671 else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1)
7672 && (mineq || operand_less_p (*vr0min, vr1min) == 1))
7673 {
7674 /* [ ( ) ] or [( ) ] or [ ( )] */
7675 if (*vr0type == VR_RANGE
7676 && vr1type == VR_RANGE)
7677 ;
7678 else if (*vr0type == VR_ANTI_RANGE
7679 && vr1type == VR_ANTI_RANGE)
7680 {
7681 *vr0type = vr1type;
7682 *vr0min = vr1min;
7683 *vr0max = vr1max;
7684 }
7685 else if (*vr0type == VR_ANTI_RANGE
7686 && vr1type == VR_RANGE)
7687 {
7688 /* Arbitrarily choose the right or left gap. */
7689 if (!mineq && TREE_CODE (vr1min) == INTEGER_CST)
7690 *vr0max = int_const_binop (MINUS_EXPR, vr1min, integer_one_node);
7691 else if (!maxeq && TREE_CODE (vr1max) == INTEGER_CST)
7692 *vr0min = int_const_binop (PLUS_EXPR, vr1max, integer_one_node);
7693 else
7694 goto give_up;
7695 }
7696 else if (*vr0type == VR_RANGE
7697 && vr1type == VR_ANTI_RANGE)
7698 /* The result covers everything. */
7699 goto give_up;
7700 else
7701 gcc_unreachable ();
7702 }
7703 else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1)
7704 && (mineq || operand_less_p (vr1min, *vr0min) == 1))
7705 {
7706 /* ( [ ] ) or ([ ] ) or ( [ ]) */
7707 if (*vr0type == VR_RANGE
7708 && vr1type == VR_RANGE)
7709 {
7710 *vr0type = vr1type;
7711 *vr0min = vr1min;
7712 *vr0max = vr1max;
7713 }
7714 else if (*vr0type == VR_ANTI_RANGE
7715 && vr1type == VR_ANTI_RANGE)
7716 ;
7717 else if (*vr0type == VR_RANGE
7718 && vr1type == VR_ANTI_RANGE)
7719 {
7720 *vr0type = VR_ANTI_RANGE;
7721 if (!mineq && TREE_CODE (*vr0min) == INTEGER_CST)
7722 {
7723 *vr0max = int_const_binop (MINUS_EXPR, *vr0min, integer_one_node);
7724 *vr0min = vr1min;
7725 }
7726 else if (!maxeq && TREE_CODE (*vr0max) == INTEGER_CST)
7727 {
7728 *vr0min = int_const_binop (PLUS_EXPR, *vr0max, integer_one_node);
7729 *vr0max = vr1max;
7730 }
7731 else
7732 goto give_up;
7733 }
7734 else if (*vr0type == VR_ANTI_RANGE
7735 && vr1type == VR_RANGE)
7736 /* The result covers everything. */
7737 goto give_up;
7738 else
7739 gcc_unreachable ();
7740 }
7741 else if ((operand_less_p (vr1min, *vr0max) == 1
7742 || operand_equal_p (vr1min, *vr0max, 0))
7743 && operand_less_p (*vr0min, vr1min) == 1)
7744 {
7745 /* [ ( ] ) or [ ]( ) */
7746 if (*vr0type == VR_RANGE
7747 && vr1type == VR_RANGE)
7748 *vr0max = vr1max;
7749 else if (*vr0type == VR_ANTI_RANGE
7750 && vr1type == VR_ANTI_RANGE)
7751 *vr0min = vr1min;
7752 else if (*vr0type == VR_ANTI_RANGE
7753 && vr1type == VR_RANGE)
7754 {
7755 if (TREE_CODE (vr1min) == INTEGER_CST)
7756 *vr0max = int_const_binop (MINUS_EXPR, vr1min, integer_one_node);
7757 else
7758 goto give_up;
7759 }
7760 else if (*vr0type == VR_RANGE
7761 && vr1type == VR_ANTI_RANGE)
7762 {
7763 if (TREE_CODE (*vr0max) == INTEGER_CST)
7764 {
7765 *vr0type = vr1type;
7766 *vr0min = int_const_binop (PLUS_EXPR, *vr0max, integer_one_node);
7767 *vr0max = vr1max;
7768 }
7769 else
7770 goto give_up;
7771 }
7772 else
7773 gcc_unreachable ();
7774 }
7775 else if ((operand_less_p (*vr0min, vr1max) == 1
7776 || operand_equal_p (*vr0min, vr1max, 0))
7777 && operand_less_p (vr1min, *vr0min) == 1)
7778 {
7779 /* ( [ ) ] or ( )[ ] */
7780 if (*vr0type == VR_RANGE
7781 && vr1type == VR_RANGE)
7782 *vr0min = vr1min;
7783 else if (*vr0type == VR_ANTI_RANGE
7784 && vr1type == VR_ANTI_RANGE)
7785 *vr0max = vr1max;
7786 else if (*vr0type == VR_ANTI_RANGE
7787 && vr1type == VR_RANGE)
7788 {
7789 if (TREE_CODE (vr1max) == INTEGER_CST)
7790 *vr0min = int_const_binop (PLUS_EXPR, vr1max, integer_one_node);
7791 else
7792 goto give_up;
7793 }
7794 else if (*vr0type == VR_RANGE
7795 && vr1type == VR_ANTI_RANGE)
7796 {
7797 if (TREE_CODE (*vr0min) == INTEGER_CST)
7798 {
7799 *vr0type = vr1type;
7800 *vr0min = vr1min;
7801 *vr0max = int_const_binop (MINUS_EXPR, *vr0min, integer_one_node);
7802 }
7803 else
7804 goto give_up;
7805 }
7806 else
7807 gcc_unreachable ();
7808 }
7809 else
7810 goto give_up;
7811
7812 return;
7813
7814 give_up:
7815 *vr0type = VR_VARYING;
7816 *vr0min = NULL_TREE;
7817 *vr0max = NULL_TREE;
7818 }
7819
7820 /* Intersect the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
7821 { VR1TYPE, VR0MIN, VR0MAX } and store the result
7822 in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest
7823 possible such range. The resulting range is not canonicalized. */
7824
7825 static void
7826 intersect_ranges (enum value_range_type *vr0type,
7827 tree *vr0min, tree *vr0max,
7828 enum value_range_type vr1type,
7829 tree vr1min, tree vr1max)
7830 {
7831 bool mineq = operand_equal_p (*vr0min, vr1min, 0);
7832 bool maxeq = operand_equal_p (*vr0max, vr1max, 0);
7833
7834 /* [] is vr0, () is vr1 in the following classification comments. */
7835 if (mineq && maxeq)
7836 {
7837 /* [( )] */
7838 if (*vr0type == vr1type)
7839 /* Nothing to do for equal ranges. */
7840 ;
7841 else if ((*vr0type == VR_RANGE
7842 && vr1type == VR_ANTI_RANGE)
7843 || (*vr0type == VR_ANTI_RANGE
7844 && vr1type == VR_RANGE))
7845 {
7846 /* For anti-range with range intersection the result is empty. */
7847 *vr0type = VR_UNDEFINED;
7848 *vr0min = NULL_TREE;
7849 *vr0max = NULL_TREE;
7850 }
7851 else
7852 gcc_unreachable ();
7853 }
7854 else if (operand_less_p (*vr0max, vr1min) == 1
7855 || operand_less_p (vr1max, *vr0min) == 1)
7856 {
7857 /* [ ] ( ) or ( ) [ ]
7858 If the ranges have an empty intersection, the result of the
7859 intersect operation is the range for intersecting an
7860 anti-range with a range or empty when intersecting two ranges. */
7861 if (*vr0type == VR_RANGE
7862 && vr1type == VR_ANTI_RANGE)
7863 ;
7864 else if (*vr0type == VR_ANTI_RANGE
7865 && vr1type == VR_RANGE)
7866 {
7867 *vr0type = vr1type;
7868 *vr0min = vr1min;
7869 *vr0max = vr1max;
7870 }
7871 else if (*vr0type == VR_RANGE
7872 && vr1type == VR_RANGE)
7873 {
7874 *vr0type = VR_UNDEFINED;
7875 *vr0min = NULL_TREE;
7876 *vr0max = NULL_TREE;
7877 }
7878 else if (*vr0type == VR_ANTI_RANGE
7879 && vr1type == VR_ANTI_RANGE)
7880 {
7881 /* If the anti-ranges are adjacent to each other merge them. */
7882 if (TREE_CODE (*vr0max) == INTEGER_CST
7883 && TREE_CODE (vr1min) == INTEGER_CST
7884 && operand_less_p (*vr0max, vr1min) == 1
7885 && integer_onep (int_const_binop (MINUS_EXPR,
7886 vr1min, *vr0max)))
7887 *vr0max = vr1max;
7888 else if (TREE_CODE (vr1max) == INTEGER_CST
7889 && TREE_CODE (*vr0min) == INTEGER_CST
7890 && operand_less_p (vr1max, *vr0min) == 1
7891 && integer_onep (int_const_binop (MINUS_EXPR,
7892 *vr0min, vr1max)))
7893 *vr0min = vr1min;
7894 /* Else arbitrarily take VR0. */
7895 }
7896 }
7897 else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1)
7898 && (mineq || operand_less_p (*vr0min, vr1min) == 1))
7899 {
7900 /* [ ( ) ] or [( ) ] or [ ( )] */
7901 if (*vr0type == VR_RANGE
7902 && vr1type == VR_RANGE)
7903 {
7904 /* If both are ranges the result is the inner one. */
7905 *vr0type = vr1type;
7906 *vr0min = vr1min;
7907 *vr0max = vr1max;
7908 }
7909 else if (*vr0type == VR_RANGE
7910 && vr1type == VR_ANTI_RANGE)
7911 {
7912 /* Choose the right gap if the left one is empty. */
7913 if (mineq)
7914 {
7915 if (TREE_CODE (vr1max) == INTEGER_CST)
7916 *vr0min = int_const_binop (PLUS_EXPR, vr1max, integer_one_node);
7917 else
7918 *vr0min = vr1max;
7919 }
7920 /* Choose the left gap if the right one is empty. */
7921 else if (maxeq)
7922 {
7923 if (TREE_CODE (vr1min) == INTEGER_CST)
7924 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
7925 integer_one_node);
7926 else
7927 *vr0max = vr1min;
7928 }
7929 /* Choose the anti-range if the range is effectively varying. */
7930 else if (vrp_val_is_min (*vr0min)
7931 && vrp_val_is_max (*vr0max))
7932 {
7933 *vr0type = vr1type;
7934 *vr0min = vr1min;
7935 *vr0max = vr1max;
7936 }
7937 /* Else choose the range. */
7938 }
7939 else if (*vr0type == VR_ANTI_RANGE
7940 && vr1type == VR_ANTI_RANGE)
7941 /* If both are anti-ranges the result is the outer one. */
7942 ;
7943 else if (*vr0type == VR_ANTI_RANGE
7944 && vr1type == VR_RANGE)
7945 {
7946 /* The intersection is empty. */
7947 *vr0type = VR_UNDEFINED;
7948 *vr0min = NULL_TREE;
7949 *vr0max = NULL_TREE;
7950 }
7951 else
7952 gcc_unreachable ();
7953 }
7954 else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1)
7955 && (mineq || operand_less_p (vr1min, *vr0min) == 1))
7956 {
7957 /* ( [ ] ) or ([ ] ) or ( [ ]) */
7958 if (*vr0type == VR_RANGE
7959 && vr1type == VR_RANGE)
7960 /* Choose the inner range. */
7961 ;
7962 else if (*vr0type == VR_ANTI_RANGE
7963 && vr1type == VR_RANGE)
7964 {
7965 /* Choose the right gap if the left is empty. */
7966 if (mineq)
7967 {
7968 *vr0type = VR_RANGE;
7969 if (TREE_CODE (*vr0max) == INTEGER_CST)
7970 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
7971 integer_one_node);
7972 else
7973 *vr0min = *vr0max;
7974 *vr0max = vr1max;
7975 }
7976 /* Choose the left gap if the right is empty. */
7977 else if (maxeq)
7978 {
7979 *vr0type = VR_RANGE;
7980 if (TREE_CODE (*vr0min) == INTEGER_CST)
7981 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
7982 integer_one_node);
7983 else
7984 *vr0max = *vr0min;
7985 *vr0min = vr1min;
7986 }
7987 /* Choose the anti-range if the range is effectively varying. */
7988 else if (vrp_val_is_min (vr1min)
7989 && vrp_val_is_max (vr1max))
7990 ;
7991 /* Else choose the range. */
7992 else
7993 {
7994 *vr0type = vr1type;
7995 *vr0min = vr1min;
7996 *vr0max = vr1max;
7997 }
7998 }
7999 else if (*vr0type == VR_ANTI_RANGE
8000 && vr1type == VR_ANTI_RANGE)
8001 {
8002 /* If both are anti-ranges the result is the outer one. */
8003 *vr0type = vr1type;
8004 *vr0min = vr1min;
8005 *vr0max = vr1max;
8006 }
8007 else if (vr1type == VR_ANTI_RANGE
8008 && *vr0type == VR_RANGE)
8009 {
8010 /* The intersection is empty. */
8011 *vr0type = VR_UNDEFINED;
8012 *vr0min = NULL_TREE;
8013 *vr0max = NULL_TREE;
8014 }
8015 else
8016 gcc_unreachable ();
8017 }
8018 else if ((operand_less_p (vr1min, *vr0max) == 1
8019 || operand_equal_p (vr1min, *vr0max, 0))
8020 && operand_less_p (*vr0min, vr1min) == 1)
8021 {
8022 /* [ ( ] ) or [ ]( ) */
8023 if (*vr0type == VR_ANTI_RANGE
8024 && vr1type == VR_ANTI_RANGE)
8025 *vr0max = vr1max;
8026 else if (*vr0type == VR_RANGE
8027 && vr1type == VR_RANGE)
8028 *vr0min = vr1min;
8029 else if (*vr0type == VR_RANGE
8030 && vr1type == VR_ANTI_RANGE)
8031 {
8032 if (TREE_CODE (vr1min) == INTEGER_CST)
8033 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
8034 integer_one_node);
8035 else
8036 *vr0max = vr1min;
8037 }
8038 else if (*vr0type == VR_ANTI_RANGE
8039 && vr1type == VR_RANGE)
8040 {
8041 *vr0type = VR_RANGE;
8042 if (TREE_CODE (*vr0max) == INTEGER_CST)
8043 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
8044 integer_one_node);
8045 else
8046 *vr0min = *vr0max;
8047 *vr0max = vr1max;
8048 }
8049 else
8050 gcc_unreachable ();
8051 }
8052 else if ((operand_less_p (*vr0min, vr1max) == 1
8053 || operand_equal_p (*vr0min, vr1max, 0))
8054 && operand_less_p (vr1min, *vr0min) == 1)
8055 {
8056 /* ( [ ) ] or ( )[ ] */
8057 if (*vr0type == VR_ANTI_RANGE
8058 && vr1type == VR_ANTI_RANGE)
8059 *vr0min = vr1min;
8060 else if (*vr0type == VR_RANGE
8061 && vr1type == VR_RANGE)
8062 *vr0max = vr1max;
8063 else if (*vr0type == VR_RANGE
8064 && vr1type == VR_ANTI_RANGE)
8065 {
8066 if (TREE_CODE (vr1max) == INTEGER_CST)
8067 *vr0min = int_const_binop (PLUS_EXPR, vr1max,
8068 integer_one_node);
8069 else
8070 *vr0min = vr1max;
8071 }
8072 else if (*vr0type == VR_ANTI_RANGE
8073 && vr1type == VR_RANGE)
8074 {
8075 *vr0type = VR_RANGE;
8076 if (TREE_CODE (*vr0min) == INTEGER_CST)
8077 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
8078 integer_one_node);
8079 else
8080 *vr0max = *vr0min;
8081 *vr0min = vr1min;
8082 }
8083 else
8084 gcc_unreachable ();
8085 }
8086
8087 /* As a fallback simply use { *VRTYPE, *VR0MIN, *VR0MAX } as
8088 result for the intersection. That's always a conservative
8089 correct estimate. */
8090
8091 return;
8092 }
8093
8094
8095 /* Intersect the two value-ranges *VR0 and *VR1 and store the result
8096 in *VR0. This may not be the smallest possible such range. */
8097
8098 static void
8099 vrp_intersect_ranges_1 (value_range_t *vr0, value_range_t *vr1)
8100 {
8101 value_range_t saved;
8102
8103 /* If either range is VR_VARYING the other one wins. */
8104 if (vr1->type == VR_VARYING)
8105 return;
8106 if (vr0->type == VR_VARYING)
8107 {
8108 copy_value_range (vr0, vr1);
8109 return;
8110 }
8111
8112 /* When either range is VR_UNDEFINED the resulting range is
8113 VR_UNDEFINED, too. */
8114 if (vr0->type == VR_UNDEFINED)
8115 return;
8116 if (vr1->type == VR_UNDEFINED)
8117 {
8118 set_value_range_to_undefined (vr0);
8119 return;
8120 }
8121
8122 /* Save the original vr0 so we can return it as conservative intersection
8123 result when our worker turns things to varying. */
8124 saved = *vr0;
8125 intersect_ranges (&vr0->type, &vr0->min, &vr0->max,
8126 vr1->type, vr1->min, vr1->max);
8127 /* Make sure to canonicalize the result though as the inversion of a
8128 VR_RANGE can still be a VR_RANGE. */
8129 set_and_canonicalize_value_range (vr0, vr0->type,
8130 vr0->min, vr0->max, vr0->equiv);
8131 /* If that failed, use the saved original VR0. */
8132 if (vr0->type == VR_VARYING)
8133 {
8134 *vr0 = saved;
8135 return;
8136 }
8137 /* If the result is VR_UNDEFINED there is no need to mess with
8138 the equivalencies. */
8139 if (vr0->type == VR_UNDEFINED)
8140 return;
8141
8142 /* The resulting set of equivalences for range intersection is the union of
8143 the two sets. */
8144 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
8145 bitmap_ior_into (vr0->equiv, vr1->equiv);
8146 else if (vr1->equiv && !vr0->equiv)
8147 bitmap_copy (vr0->equiv, vr1->equiv);
8148 }
8149
8150 static void
8151 vrp_intersect_ranges (value_range_t *vr0, value_range_t *vr1)
8152 {
8153 if (dump_file && (dump_flags & TDF_DETAILS))
8154 {
8155 fprintf (dump_file, "Intersecting\n ");
8156 dump_value_range (dump_file, vr0);
8157 fprintf (dump_file, "\nand\n ");
8158 dump_value_range (dump_file, vr1);
8159 fprintf (dump_file, "\n");
8160 }
8161 vrp_intersect_ranges_1 (vr0, vr1);
8162 if (dump_file && (dump_flags & TDF_DETAILS))
8163 {
8164 fprintf (dump_file, "to\n ");
8165 dump_value_range (dump_file, vr0);
8166 fprintf (dump_file, "\n");
8167 }
8168 }
8169
8170 /* Meet operation for value ranges. Given two value ranges VR0 and
8171 VR1, store in VR0 a range that contains both VR0 and VR1. This
8172 may not be the smallest possible such range. */
8173
8174 static void
8175 vrp_meet_1 (value_range_t *vr0, value_range_t *vr1)
8176 {
8177 value_range_t saved;
8178
8179 if (vr0->type == VR_UNDEFINED)
8180 {
8181 set_value_range (vr0, vr1->type, vr1->min, vr1->max, vr1->equiv);
8182 return;
8183 }
8184
8185 if (vr1->type == VR_UNDEFINED)
8186 {
8187 /* VR0 already has the resulting range. */
8188 return;
8189 }
8190
8191 if (vr0->type == VR_VARYING)
8192 {
8193 /* Nothing to do. VR0 already has the resulting range. */
8194 return;
8195 }
8196
8197 if (vr1->type == VR_VARYING)
8198 {
8199 set_value_range_to_varying (vr0);
8200 return;
8201 }
8202
8203 saved = *vr0;
8204 union_ranges (&vr0->type, &vr0->min, &vr0->max,
8205 vr1->type, vr1->min, vr1->max);
8206 if (vr0->type == VR_VARYING)
8207 {
8208 /* Failed to find an efficient meet. Before giving up and setting
8209 the result to VARYING, see if we can at least derive a useful
8210 anti-range. FIXME, all this nonsense about distinguishing
8211 anti-ranges from ranges is necessary because of the odd
8212 semantics of range_includes_zero_p and friends. */
8213 if (((saved.type == VR_RANGE
8214 && range_includes_zero_p (saved.min, saved.max) == 0)
8215 || (saved.type == VR_ANTI_RANGE
8216 && range_includes_zero_p (saved.min, saved.max) == 1))
8217 && ((vr1->type == VR_RANGE
8218 && range_includes_zero_p (vr1->min, vr1->max) == 0)
8219 || (vr1->type == VR_ANTI_RANGE
8220 && range_includes_zero_p (vr1->min, vr1->max) == 1)))
8221 {
8222 set_value_range_to_nonnull (vr0, TREE_TYPE (saved.min));
8223
8224 /* Since this meet operation did not result from the meeting of
8225 two equivalent names, VR0 cannot have any equivalences. */
8226 if (vr0->equiv)
8227 bitmap_clear (vr0->equiv);
8228 return;
8229 }
8230
8231 set_value_range_to_varying (vr0);
8232 return;
8233 }
8234 set_and_canonicalize_value_range (vr0, vr0->type, vr0->min, vr0->max,
8235 vr0->equiv);
8236 if (vr0->type == VR_VARYING)
8237 return;
8238
8239 /* The resulting set of equivalences is always the intersection of
8240 the two sets. */
8241 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
8242 bitmap_and_into (vr0->equiv, vr1->equiv);
8243 else if (vr0->equiv && !vr1->equiv)
8244 bitmap_clear (vr0->equiv);
8245 }
8246
8247 static void
8248 vrp_meet (value_range_t *vr0, value_range_t *vr1)
8249 {
8250 if (dump_file && (dump_flags & TDF_DETAILS))
8251 {
8252 fprintf (dump_file, "Meeting\n ");
8253 dump_value_range (dump_file, vr0);
8254 fprintf (dump_file, "\nand\n ");
8255 dump_value_range (dump_file, vr1);
8256 fprintf (dump_file, "\n");
8257 }
8258 vrp_meet_1 (vr0, vr1);
8259 if (dump_file && (dump_flags & TDF_DETAILS))
8260 {
8261 fprintf (dump_file, "to\n ");
8262 dump_value_range (dump_file, vr0);
8263 fprintf (dump_file, "\n");
8264 }
8265 }
8266
8267
8268 /* Visit all arguments for PHI node PHI that flow through executable
8269 edges. If a valid value range can be derived from all the incoming
8270 value ranges, set a new range for the LHS of PHI. */
8271
8272 static enum ssa_prop_result
8273 vrp_visit_phi_node (gimple phi)
8274 {
8275 size_t i;
8276 tree lhs = PHI_RESULT (phi);
8277 value_range_t *lhs_vr = get_value_range (lhs);
8278 value_range_t vr_result = VR_INITIALIZER;
8279 bool first = true;
8280 int edges, old_edges;
8281 struct loop *l;
8282
8283 if (dump_file && (dump_flags & TDF_DETAILS))
8284 {
8285 fprintf (dump_file, "\nVisiting PHI node: ");
8286 print_gimple_stmt (dump_file, phi, 0, dump_flags);
8287 }
8288
8289 edges = 0;
8290 for (i = 0; i < gimple_phi_num_args (phi); i++)
8291 {
8292 edge e = gimple_phi_arg_edge (phi, i);
8293
8294 if (dump_file && (dump_flags & TDF_DETAILS))
8295 {
8296 fprintf (dump_file,
8297 "\n Argument #%d (%d -> %d %sexecutable)\n",
8298 (int) i, e->src->index, e->dest->index,
8299 (e->flags & EDGE_EXECUTABLE) ? "" : "not ");
8300 }
8301
8302 if (e->flags & EDGE_EXECUTABLE)
8303 {
8304 tree arg = PHI_ARG_DEF (phi, i);
8305 value_range_t vr_arg;
8306
8307 ++edges;
8308
8309 if (TREE_CODE (arg) == SSA_NAME)
8310 {
8311 vr_arg = *(get_value_range (arg));
8312 /* Do not allow equivalences or symbolic ranges to leak in from
8313 backedges. That creates invalid equivalencies.
8314 See PR53465 and PR54767. */
8315 if (e->flags & EDGE_DFS_BACK
8316 && (vr_arg.type == VR_RANGE
8317 || vr_arg.type == VR_ANTI_RANGE))
8318 {
8319 vr_arg.equiv = NULL;
8320 if (symbolic_range_p (&vr_arg))
8321 {
8322 vr_arg.type = VR_VARYING;
8323 vr_arg.min = NULL_TREE;
8324 vr_arg.max = NULL_TREE;
8325 }
8326 }
8327 }
8328 else
8329 {
8330 if (is_overflow_infinity (arg))
8331 arg = drop_tree_overflow (arg);
8332
8333 vr_arg.type = VR_RANGE;
8334 vr_arg.min = arg;
8335 vr_arg.max = arg;
8336 vr_arg.equiv = NULL;
8337 }
8338
8339 if (dump_file && (dump_flags & TDF_DETAILS))
8340 {
8341 fprintf (dump_file, "\t");
8342 print_generic_expr (dump_file, arg, dump_flags);
8343 fprintf (dump_file, "\n\tValue: ");
8344 dump_value_range (dump_file, &vr_arg);
8345 fprintf (dump_file, "\n");
8346 }
8347
8348 if (first)
8349 copy_value_range (&vr_result, &vr_arg);
8350 else
8351 vrp_meet (&vr_result, &vr_arg);
8352 first = false;
8353
8354 if (vr_result.type == VR_VARYING)
8355 break;
8356 }
8357 }
8358
8359 if (vr_result.type == VR_VARYING)
8360 goto varying;
8361 else if (vr_result.type == VR_UNDEFINED)
8362 goto update_range;
8363
8364 old_edges = vr_phi_edge_counts[SSA_NAME_VERSION (lhs)];
8365 vr_phi_edge_counts[SSA_NAME_VERSION (lhs)] = edges;
8366
8367 /* To prevent infinite iterations in the algorithm, derive ranges
8368 when the new value is slightly bigger or smaller than the
8369 previous one. We don't do this if we have seen a new executable
8370 edge; this helps us avoid an overflow infinity for conditionals
8371 which are not in a loop. If the old value-range was VR_UNDEFINED
8372 use the updated range and iterate one more time. */
8373 if (edges > 0
8374 && gimple_phi_num_args (phi) > 1
8375 && edges == old_edges
8376 && lhs_vr->type != VR_UNDEFINED)
8377 {
8378 int cmp_min = compare_values (lhs_vr->min, vr_result.min);
8379 int cmp_max = compare_values (lhs_vr->max, vr_result.max);
8380
8381 /* For non VR_RANGE or for pointers fall back to varying if
8382 the range changed. */
8383 if ((lhs_vr->type != VR_RANGE || vr_result.type != VR_RANGE
8384 || POINTER_TYPE_P (TREE_TYPE (lhs)))
8385 && (cmp_min != 0 || cmp_max != 0))
8386 goto varying;
8387
8388 /* If the new minimum is smaller or larger than the previous
8389 one, go all the way to -INF. In the first case, to avoid
8390 iterating millions of times to reach -INF, and in the
8391 other case to avoid infinite bouncing between different
8392 minimums. */
8393 if (cmp_min > 0 || cmp_min < 0)
8394 {
8395 if (!needs_overflow_infinity (TREE_TYPE (vr_result.min))
8396 || !vrp_var_may_overflow (lhs, phi))
8397 vr_result.min = TYPE_MIN_VALUE (TREE_TYPE (vr_result.min));
8398 else if (supports_overflow_infinity (TREE_TYPE (vr_result.min)))
8399 vr_result.min =
8400 negative_overflow_infinity (TREE_TYPE (vr_result.min));
8401 }
8402
8403 /* Similarly, if the new maximum is smaller or larger than
8404 the previous one, go all the way to +INF. */
8405 if (cmp_max < 0 || cmp_max > 0)
8406 {
8407 if (!needs_overflow_infinity (TREE_TYPE (vr_result.max))
8408 || !vrp_var_may_overflow (lhs, phi))
8409 vr_result.max = TYPE_MAX_VALUE (TREE_TYPE (vr_result.max));
8410 else if (supports_overflow_infinity (TREE_TYPE (vr_result.max)))
8411 vr_result.max =
8412 positive_overflow_infinity (TREE_TYPE (vr_result.max));
8413 }
8414
8415 /* If we dropped either bound to +-INF then if this is a loop
8416 PHI node SCEV may known more about its value-range. */
8417 if ((cmp_min > 0 || cmp_min < 0
8418 || cmp_max < 0 || cmp_max > 0)
8419 && current_loops
8420 && (l = loop_containing_stmt (phi))
8421 && l->header == gimple_bb (phi))
8422 adjust_range_with_scev (&vr_result, l, phi, lhs);
8423
8424 /* If we will end up with a (-INF, +INF) range, set it to
8425 VARYING. Same if the previous max value was invalid for
8426 the type and we end up with vr_result.min > vr_result.max. */
8427 if ((vrp_val_is_max (vr_result.max)
8428 && vrp_val_is_min (vr_result.min))
8429 || compare_values (vr_result.min,
8430 vr_result.max) > 0)
8431 goto varying;
8432 }
8433
8434 /* If the new range is different than the previous value, keep
8435 iterating. */
8436 update_range:
8437 if (update_value_range (lhs, &vr_result))
8438 {
8439 if (dump_file && (dump_flags & TDF_DETAILS))
8440 {
8441 fprintf (dump_file, "Found new range for ");
8442 print_generic_expr (dump_file, lhs, 0);
8443 fprintf (dump_file, ": ");
8444 dump_value_range (dump_file, &vr_result);
8445 fprintf (dump_file, "\n\n");
8446 }
8447
8448 return SSA_PROP_INTERESTING;
8449 }
8450
8451 /* Nothing changed, don't add outgoing edges. */
8452 return SSA_PROP_NOT_INTERESTING;
8453
8454 /* No match found. Set the LHS to VARYING. */
8455 varying:
8456 set_value_range_to_varying (lhs_vr);
8457 return SSA_PROP_VARYING;
8458 }
8459
8460 /* Simplify boolean operations if the source is known
8461 to be already a boolean. */
8462 static bool
8463 simplify_truth_ops_using_ranges (gimple_stmt_iterator *gsi, gimple stmt)
8464 {
8465 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
8466 tree lhs, op0, op1;
8467 bool need_conversion;
8468
8469 /* We handle only !=/== case here. */
8470 gcc_assert (rhs_code == EQ_EXPR || rhs_code == NE_EXPR);
8471
8472 op0 = gimple_assign_rhs1 (stmt);
8473 if (!op_with_boolean_value_range_p (op0))
8474 return false;
8475
8476 op1 = gimple_assign_rhs2 (stmt);
8477 if (!op_with_boolean_value_range_p (op1))
8478 return false;
8479
8480 /* Reduce number of cases to handle to NE_EXPR. As there is no
8481 BIT_XNOR_EXPR we cannot replace A == B with a single statement. */
8482 if (rhs_code == EQ_EXPR)
8483 {
8484 if (TREE_CODE (op1) == INTEGER_CST)
8485 op1 = int_const_binop (BIT_XOR_EXPR, op1, integer_one_node);
8486 else
8487 return false;
8488 }
8489
8490 lhs = gimple_assign_lhs (stmt);
8491 need_conversion
8492 = !useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (op0));
8493
8494 /* Make sure to not sign-extend a 1-bit 1 when converting the result. */
8495 if (need_conversion
8496 && !TYPE_UNSIGNED (TREE_TYPE (op0))
8497 && TYPE_PRECISION (TREE_TYPE (op0)) == 1
8498 && TYPE_PRECISION (TREE_TYPE (lhs)) > 1)
8499 return false;
8500
8501 /* For A != 0 we can substitute A itself. */
8502 if (integer_zerop (op1))
8503 gimple_assign_set_rhs_with_ops (gsi,
8504 need_conversion
8505 ? NOP_EXPR : TREE_CODE (op0),
8506 op0, NULL_TREE);
8507 /* For A != B we substitute A ^ B. Either with conversion. */
8508 else if (need_conversion)
8509 {
8510 tree tem = make_ssa_name (TREE_TYPE (op0), NULL);
8511 gimple newop = gimple_build_assign_with_ops (BIT_XOR_EXPR, tem, op0, op1);
8512 gsi_insert_before (gsi, newop, GSI_SAME_STMT);
8513 gimple_assign_set_rhs_with_ops (gsi, NOP_EXPR, tem, NULL_TREE);
8514 }
8515 /* Or without. */
8516 else
8517 gimple_assign_set_rhs_with_ops (gsi, BIT_XOR_EXPR, op0, op1);
8518 update_stmt (gsi_stmt (*gsi));
8519
8520 return true;
8521 }
8522
8523 /* Simplify a division or modulo operator to a right shift or
8524 bitwise and if the first operand is unsigned or is greater
8525 than zero and the second operand is an exact power of two. */
8526
8527 static bool
8528 simplify_div_or_mod_using_ranges (gimple stmt)
8529 {
8530 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
8531 tree val = NULL;
8532 tree op0 = gimple_assign_rhs1 (stmt);
8533 tree op1 = gimple_assign_rhs2 (stmt);
8534 value_range_t *vr = get_value_range (gimple_assign_rhs1 (stmt));
8535
8536 if (TYPE_UNSIGNED (TREE_TYPE (op0)))
8537 {
8538 val = integer_one_node;
8539 }
8540 else
8541 {
8542 bool sop = false;
8543
8544 val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, &sop);
8545
8546 if (val
8547 && sop
8548 && integer_onep (val)
8549 && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
8550 {
8551 location_t location;
8552
8553 if (!gimple_has_location (stmt))
8554 location = input_location;
8555 else
8556 location = gimple_location (stmt);
8557 warning_at (location, OPT_Wstrict_overflow,
8558 "assuming signed overflow does not occur when "
8559 "simplifying %</%> or %<%%%> to %<>>%> or %<&%>");
8560 }
8561 }
8562
8563 if (val && integer_onep (val))
8564 {
8565 tree t;
8566
8567 if (rhs_code == TRUNC_DIV_EXPR)
8568 {
8569 t = build_int_cst (integer_type_node, tree_log2 (op1));
8570 gimple_assign_set_rhs_code (stmt, RSHIFT_EXPR);
8571 gimple_assign_set_rhs1 (stmt, op0);
8572 gimple_assign_set_rhs2 (stmt, t);
8573 }
8574 else
8575 {
8576 t = build_int_cst (TREE_TYPE (op1), 1);
8577 t = int_const_binop (MINUS_EXPR, op1, t);
8578 t = fold_convert (TREE_TYPE (op0), t);
8579
8580 gimple_assign_set_rhs_code (stmt, BIT_AND_EXPR);
8581 gimple_assign_set_rhs1 (stmt, op0);
8582 gimple_assign_set_rhs2 (stmt, t);
8583 }
8584
8585 update_stmt (stmt);
8586 return true;
8587 }
8588
8589 return false;
8590 }
8591
8592 /* If the operand to an ABS_EXPR is >= 0, then eliminate the
8593 ABS_EXPR. If the operand is <= 0, then simplify the
8594 ABS_EXPR into a NEGATE_EXPR. */
8595
8596 static bool
8597 simplify_abs_using_ranges (gimple stmt)
8598 {
8599 tree val = NULL;
8600 tree op = gimple_assign_rhs1 (stmt);
8601 tree type = TREE_TYPE (op);
8602 value_range_t *vr = get_value_range (op);
8603
8604 if (TYPE_UNSIGNED (type))
8605 {
8606 val = integer_zero_node;
8607 }
8608 else if (vr)
8609 {
8610 bool sop = false;
8611
8612 val = compare_range_with_value (LE_EXPR, vr, integer_zero_node, &sop);
8613 if (!val)
8614 {
8615 sop = false;
8616 val = compare_range_with_value (GE_EXPR, vr, integer_zero_node,
8617 &sop);
8618
8619 if (val)
8620 {
8621 if (integer_zerop (val))
8622 val = integer_one_node;
8623 else if (integer_onep (val))
8624 val = integer_zero_node;
8625 }
8626 }
8627
8628 if (val
8629 && (integer_onep (val) || integer_zerop (val)))
8630 {
8631 if (sop && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
8632 {
8633 location_t location;
8634
8635 if (!gimple_has_location (stmt))
8636 location = input_location;
8637 else
8638 location = gimple_location (stmt);
8639 warning_at (location, OPT_Wstrict_overflow,
8640 "assuming signed overflow does not occur when "
8641 "simplifying %<abs (X)%> to %<X%> or %<-X%>");
8642 }
8643
8644 gimple_assign_set_rhs1 (stmt, op);
8645 if (integer_onep (val))
8646 gimple_assign_set_rhs_code (stmt, NEGATE_EXPR);
8647 else
8648 gimple_assign_set_rhs_code (stmt, SSA_NAME);
8649 update_stmt (stmt);
8650 return true;
8651 }
8652 }
8653
8654 return false;
8655 }
8656
8657 /* Optimize away redundant BIT_AND_EXPR and BIT_IOR_EXPR.
8658 If all the bits that are being cleared by & are already
8659 known to be zero from VR, or all the bits that are being
8660 set by | are already known to be one from VR, the bit
8661 operation is redundant. */
8662
8663 static bool
8664 simplify_bit_ops_using_ranges (gimple_stmt_iterator *gsi, gimple stmt)
8665 {
8666 tree op0 = gimple_assign_rhs1 (stmt);
8667 tree op1 = gimple_assign_rhs2 (stmt);
8668 tree op = NULL_TREE;
8669 value_range_t vr0 = VR_INITIALIZER;
8670 value_range_t vr1 = VR_INITIALIZER;
8671 double_int may_be_nonzero0, may_be_nonzero1;
8672 double_int must_be_nonzero0, must_be_nonzero1;
8673 double_int mask;
8674
8675 if (TREE_CODE (op0) == SSA_NAME)
8676 vr0 = *(get_value_range (op0));
8677 else if (is_gimple_min_invariant (op0))
8678 set_value_range_to_value (&vr0, op0, NULL);
8679 else
8680 return false;
8681
8682 if (TREE_CODE (op1) == SSA_NAME)
8683 vr1 = *(get_value_range (op1));
8684 else if (is_gimple_min_invariant (op1))
8685 set_value_range_to_value (&vr1, op1, NULL);
8686 else
8687 return false;
8688
8689 if (!zero_nonzero_bits_from_vr (&vr0, &may_be_nonzero0, &must_be_nonzero0))
8690 return false;
8691 if (!zero_nonzero_bits_from_vr (&vr1, &may_be_nonzero1, &must_be_nonzero1))
8692 return false;
8693
8694 switch (gimple_assign_rhs_code (stmt))
8695 {
8696 case BIT_AND_EXPR:
8697 mask = may_be_nonzero0.and_not (must_be_nonzero1);
8698 if (mask.is_zero ())
8699 {
8700 op = op0;
8701 break;
8702 }
8703 mask = may_be_nonzero1.and_not (must_be_nonzero0);
8704 if (mask.is_zero ())
8705 {
8706 op = op1;
8707 break;
8708 }
8709 break;
8710 case BIT_IOR_EXPR:
8711 mask = may_be_nonzero0.and_not (must_be_nonzero1);
8712 if (mask.is_zero ())
8713 {
8714 op = op1;
8715 break;
8716 }
8717 mask = may_be_nonzero1.and_not (must_be_nonzero0);
8718 if (mask.is_zero ())
8719 {
8720 op = op0;
8721 break;
8722 }
8723 break;
8724 default:
8725 gcc_unreachable ();
8726 }
8727
8728 if (op == NULL_TREE)
8729 return false;
8730
8731 gimple_assign_set_rhs_with_ops (gsi, TREE_CODE (op), op, NULL);
8732 update_stmt (gsi_stmt (*gsi));
8733 return true;
8734 }
8735
8736 /* We are comparing trees OP0 and OP1 using COND_CODE. OP0 has
8737 a known value range VR.
8738
8739 If there is one and only one value which will satisfy the
8740 conditional, then return that value. Else return NULL. */
8741
8742 static tree
8743 test_for_singularity (enum tree_code cond_code, tree op0,
8744 tree op1, value_range_t *vr)
8745 {
8746 tree min = NULL;
8747 tree max = NULL;
8748
8749 /* Extract minimum/maximum values which satisfy the
8750 the conditional as it was written. */
8751 if (cond_code == LE_EXPR || cond_code == LT_EXPR)
8752 {
8753 /* This should not be negative infinity; there is no overflow
8754 here. */
8755 min = TYPE_MIN_VALUE (TREE_TYPE (op0));
8756
8757 max = op1;
8758 if (cond_code == LT_EXPR && !is_overflow_infinity (max))
8759 {
8760 tree one = build_int_cst (TREE_TYPE (op0), 1);
8761 max = fold_build2 (MINUS_EXPR, TREE_TYPE (op0), max, one);
8762 if (EXPR_P (max))
8763 TREE_NO_WARNING (max) = 1;
8764 }
8765 }
8766 else if (cond_code == GE_EXPR || cond_code == GT_EXPR)
8767 {
8768 /* This should not be positive infinity; there is no overflow
8769 here. */
8770 max = TYPE_MAX_VALUE (TREE_TYPE (op0));
8771
8772 min = op1;
8773 if (cond_code == GT_EXPR && !is_overflow_infinity (min))
8774 {
8775 tree one = build_int_cst (TREE_TYPE (op0), 1);
8776 min = fold_build2 (PLUS_EXPR, TREE_TYPE (op0), min, one);
8777 if (EXPR_P (min))
8778 TREE_NO_WARNING (min) = 1;
8779 }
8780 }
8781
8782 /* Now refine the minimum and maximum values using any
8783 value range information we have for op0. */
8784 if (min && max)
8785 {
8786 if (compare_values (vr->min, min) == 1)
8787 min = vr->min;
8788 if (compare_values (vr->max, max) == -1)
8789 max = vr->max;
8790
8791 /* If the new min/max values have converged to a single value,
8792 then there is only one value which can satisfy the condition,
8793 return that value. */
8794 if (operand_equal_p (min, max, 0) && is_gimple_min_invariant (min))
8795 return min;
8796 }
8797 return NULL;
8798 }
8799
8800 /* Return whether the value range *VR fits in an integer type specified
8801 by PRECISION and UNSIGNED_P. */
8802
8803 static bool
8804 range_fits_type_p (value_range_t *vr, unsigned precision, bool unsigned_p)
8805 {
8806 tree src_type;
8807 unsigned src_precision;
8808 double_int tem;
8809
8810 /* We can only handle integral and pointer types. */
8811 src_type = TREE_TYPE (vr->min);
8812 if (!INTEGRAL_TYPE_P (src_type)
8813 && !POINTER_TYPE_P (src_type))
8814 return false;
8815
8816 /* An extension is fine unless VR is signed and unsigned_p,
8817 and so is an identity transform. */
8818 src_precision = TYPE_PRECISION (TREE_TYPE (vr->min));
8819 if ((src_precision < precision
8820 && !(unsigned_p && !TYPE_UNSIGNED (src_type)))
8821 || (src_precision == precision
8822 && TYPE_UNSIGNED (src_type) == unsigned_p))
8823 return true;
8824
8825 /* Now we can only handle ranges with constant bounds. */
8826 if (vr->type != VR_RANGE
8827 || TREE_CODE (vr->min) != INTEGER_CST
8828 || TREE_CODE (vr->max) != INTEGER_CST)
8829 return false;
8830
8831 /* For sign changes, the MSB of the double_int has to be clear.
8832 An unsigned value with its MSB set cannot be represented by
8833 a signed double_int, while a negative value cannot be represented
8834 by an unsigned double_int. */
8835 if (TYPE_UNSIGNED (src_type) != unsigned_p
8836 && (TREE_INT_CST_HIGH (vr->min) | TREE_INT_CST_HIGH (vr->max)) < 0)
8837 return false;
8838
8839 /* Then we can perform the conversion on both ends and compare
8840 the result for equality. */
8841 tem = tree_to_double_int (vr->min).ext (precision, unsigned_p);
8842 if (tree_to_double_int (vr->min) != tem)
8843 return false;
8844 tem = tree_to_double_int (vr->max).ext (precision, unsigned_p);
8845 if (tree_to_double_int (vr->max) != tem)
8846 return false;
8847
8848 return true;
8849 }
8850
8851 /* Simplify a conditional using a relational operator to an equality
8852 test if the range information indicates only one value can satisfy
8853 the original conditional. */
8854
8855 static bool
8856 simplify_cond_using_ranges (gimple stmt)
8857 {
8858 tree op0 = gimple_cond_lhs (stmt);
8859 tree op1 = gimple_cond_rhs (stmt);
8860 enum tree_code cond_code = gimple_cond_code (stmt);
8861
8862 if (cond_code != NE_EXPR
8863 && cond_code != EQ_EXPR
8864 && TREE_CODE (op0) == SSA_NAME
8865 && INTEGRAL_TYPE_P (TREE_TYPE (op0))
8866 && is_gimple_min_invariant (op1))
8867 {
8868 value_range_t *vr = get_value_range (op0);
8869
8870 /* If we have range information for OP0, then we might be
8871 able to simplify this conditional. */
8872 if (vr->type == VR_RANGE)
8873 {
8874 tree new_tree = test_for_singularity (cond_code, op0, op1, vr);
8875
8876 if (new_tree)
8877 {
8878 if (dump_file)
8879 {
8880 fprintf (dump_file, "Simplified relational ");
8881 print_gimple_stmt (dump_file, stmt, 0, 0);
8882 fprintf (dump_file, " into ");
8883 }
8884
8885 gimple_cond_set_code (stmt, EQ_EXPR);
8886 gimple_cond_set_lhs (stmt, op0);
8887 gimple_cond_set_rhs (stmt, new_tree);
8888
8889 update_stmt (stmt);
8890
8891 if (dump_file)
8892 {
8893 print_gimple_stmt (dump_file, stmt, 0, 0);
8894 fprintf (dump_file, "\n");
8895 }
8896
8897 return true;
8898 }
8899
8900 /* Try again after inverting the condition. We only deal
8901 with integral types here, so no need to worry about
8902 issues with inverting FP comparisons. */
8903 cond_code = invert_tree_comparison (cond_code, false);
8904 new_tree = test_for_singularity (cond_code, op0, op1, vr);
8905
8906 if (new_tree)
8907 {
8908 if (dump_file)
8909 {
8910 fprintf (dump_file, "Simplified relational ");
8911 print_gimple_stmt (dump_file, stmt, 0, 0);
8912 fprintf (dump_file, " into ");
8913 }
8914
8915 gimple_cond_set_code (stmt, NE_EXPR);
8916 gimple_cond_set_lhs (stmt, op0);
8917 gimple_cond_set_rhs (stmt, new_tree);
8918
8919 update_stmt (stmt);
8920
8921 if (dump_file)
8922 {
8923 print_gimple_stmt (dump_file, stmt, 0, 0);
8924 fprintf (dump_file, "\n");
8925 }
8926
8927 return true;
8928 }
8929 }
8930 }
8931
8932 /* If we have a comparison of an SSA_NAME (OP0) against a constant,
8933 see if OP0 was set by a type conversion where the source of
8934 the conversion is another SSA_NAME with a range that fits
8935 into the range of OP0's type.
8936
8937 If so, the conversion is redundant as the earlier SSA_NAME can be
8938 used for the comparison directly if we just massage the constant in the
8939 comparison. */
8940 if (TREE_CODE (op0) == SSA_NAME
8941 && TREE_CODE (op1) == INTEGER_CST)
8942 {
8943 gimple def_stmt = SSA_NAME_DEF_STMT (op0);
8944 tree innerop;
8945
8946 if (!is_gimple_assign (def_stmt)
8947 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)))
8948 return false;
8949
8950 innerop = gimple_assign_rhs1 (def_stmt);
8951
8952 if (TREE_CODE (innerop) == SSA_NAME
8953 && !POINTER_TYPE_P (TREE_TYPE (innerop)))
8954 {
8955 value_range_t *vr = get_value_range (innerop);
8956
8957 if (range_int_cst_p (vr)
8958 && range_fits_type_p (vr,
8959 TYPE_PRECISION (TREE_TYPE (op0)),
8960 TYPE_UNSIGNED (TREE_TYPE (op0)))
8961 && int_fits_type_p (op1, TREE_TYPE (innerop))
8962 /* The range must not have overflowed, or if it did overflow
8963 we must not be wrapping/trapping overflow and optimizing
8964 with strict overflow semantics. */
8965 && ((!is_negative_overflow_infinity (vr->min)
8966 && !is_positive_overflow_infinity (vr->max))
8967 || TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (innerop))))
8968 {
8969 /* If the range overflowed and the user has asked for warnings
8970 when strict overflow semantics were used to optimize code,
8971 issue an appropriate warning. */
8972 if ((is_negative_overflow_infinity (vr->min)
8973 || is_positive_overflow_infinity (vr->max))
8974 && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_CONDITIONAL))
8975 {
8976 location_t location;
8977
8978 if (!gimple_has_location (stmt))
8979 location = input_location;
8980 else
8981 location = gimple_location (stmt);
8982 warning_at (location, OPT_Wstrict_overflow,
8983 "assuming signed overflow does not occur when "
8984 "simplifying conditional");
8985 }
8986
8987 tree newconst = fold_convert (TREE_TYPE (innerop), op1);
8988 gimple_cond_set_lhs (stmt, innerop);
8989 gimple_cond_set_rhs (stmt, newconst);
8990 return true;
8991 }
8992 }
8993 }
8994
8995 return false;
8996 }
8997
8998 /* Simplify a switch statement using the value range of the switch
8999 argument. */
9000
9001 static bool
9002 simplify_switch_using_ranges (gimple stmt)
9003 {
9004 tree op = gimple_switch_index (stmt);
9005 value_range_t *vr;
9006 bool take_default;
9007 edge e;
9008 edge_iterator ei;
9009 size_t i = 0, j = 0, n, n2;
9010 tree vec2;
9011 switch_update su;
9012 size_t k = 1, l = 0;
9013
9014 if (TREE_CODE (op) == SSA_NAME)
9015 {
9016 vr = get_value_range (op);
9017
9018 /* We can only handle integer ranges. */
9019 if ((vr->type != VR_RANGE
9020 && vr->type != VR_ANTI_RANGE)
9021 || symbolic_range_p (vr))
9022 return false;
9023
9024 /* Find case label for min/max of the value range. */
9025 take_default = !find_case_label_ranges (stmt, vr, &i, &j, &k, &l);
9026 }
9027 else if (TREE_CODE (op) == INTEGER_CST)
9028 {
9029 take_default = !find_case_label_index (stmt, 1, op, &i);
9030 if (take_default)
9031 {
9032 i = 1;
9033 j = 0;
9034 }
9035 else
9036 {
9037 j = i;
9038 }
9039 }
9040 else
9041 return false;
9042
9043 n = gimple_switch_num_labels (stmt);
9044
9045 /* Bail out if this is just all edges taken. */
9046 if (i == 1
9047 && j == n - 1
9048 && take_default)
9049 return false;
9050
9051 /* Build a new vector of taken case labels. */
9052 vec2 = make_tree_vec (j - i + 1 + l - k + 1 + (int)take_default);
9053 n2 = 0;
9054
9055 /* Add the default edge, if necessary. */
9056 if (take_default)
9057 TREE_VEC_ELT (vec2, n2++) = gimple_switch_default_label (stmt);
9058
9059 for (; i <= j; ++i, ++n2)
9060 TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, i);
9061
9062 for (; k <= l; ++k, ++n2)
9063 TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, k);
9064
9065 /* Mark needed edges. */
9066 for (i = 0; i < n2; ++i)
9067 {
9068 e = find_edge (gimple_bb (stmt),
9069 label_to_block (CASE_LABEL (TREE_VEC_ELT (vec2, i))));
9070 e->aux = (void *)-1;
9071 }
9072
9073 /* Queue not needed edges for later removal. */
9074 FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
9075 {
9076 if (e->aux == (void *)-1)
9077 {
9078 e->aux = NULL;
9079 continue;
9080 }
9081
9082 if (dump_file && (dump_flags & TDF_DETAILS))
9083 {
9084 fprintf (dump_file, "removing unreachable case label\n");
9085 }
9086 to_remove_edges.safe_push (e);
9087 e->flags &= ~EDGE_EXECUTABLE;
9088 }
9089
9090 /* And queue an update for the stmt. */
9091 su.stmt = stmt;
9092 su.vec = vec2;
9093 to_update_switch_stmts.safe_push (su);
9094 return false;
9095 }
9096
9097 /* Simplify an integral conversion from an SSA name in STMT. */
9098
9099 static bool
9100 simplify_conversion_using_ranges (gimple stmt)
9101 {
9102 tree innerop, middleop, finaltype;
9103 gimple def_stmt;
9104 value_range_t *innervr;
9105 bool inner_unsigned_p, middle_unsigned_p, final_unsigned_p;
9106 unsigned inner_prec, middle_prec, final_prec;
9107 double_int innermin, innermed, innermax, middlemin, middlemed, middlemax;
9108
9109 finaltype = TREE_TYPE (gimple_assign_lhs (stmt));
9110 if (!INTEGRAL_TYPE_P (finaltype))
9111 return false;
9112 middleop = gimple_assign_rhs1 (stmt);
9113 def_stmt = SSA_NAME_DEF_STMT (middleop);
9114 if (!is_gimple_assign (def_stmt)
9115 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)))
9116 return false;
9117 innerop = gimple_assign_rhs1 (def_stmt);
9118 if (TREE_CODE (innerop) != SSA_NAME
9119 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (innerop))
9120 return false;
9121
9122 /* Get the value-range of the inner operand. */
9123 innervr = get_value_range (innerop);
9124 if (innervr->type != VR_RANGE
9125 || TREE_CODE (innervr->min) != INTEGER_CST
9126 || TREE_CODE (innervr->max) != INTEGER_CST)
9127 return false;
9128
9129 /* Simulate the conversion chain to check if the result is equal if
9130 the middle conversion is removed. */
9131 innermin = tree_to_double_int (innervr->min);
9132 innermax = tree_to_double_int (innervr->max);
9133
9134 inner_prec = TYPE_PRECISION (TREE_TYPE (innerop));
9135 middle_prec = TYPE_PRECISION (TREE_TYPE (middleop));
9136 final_prec = TYPE_PRECISION (finaltype);
9137
9138 /* If the first conversion is not injective, the second must not
9139 be widening. */
9140 if ((innermax - innermin).ugt (double_int::mask (middle_prec))
9141 && middle_prec < final_prec)
9142 return false;
9143 /* We also want a medium value so that we can track the effect that
9144 narrowing conversions with sign change have. */
9145 inner_unsigned_p = TYPE_UNSIGNED (TREE_TYPE (innerop));
9146 if (inner_unsigned_p)
9147 innermed = double_int::mask (inner_prec).lrshift (1, inner_prec);
9148 else
9149 innermed = double_int_zero;
9150 if (innermin.cmp (innermed, inner_unsigned_p) >= 0
9151 || innermed.cmp (innermax, inner_unsigned_p) >= 0)
9152 innermed = innermin;
9153
9154 middle_unsigned_p = TYPE_UNSIGNED (TREE_TYPE (middleop));
9155 middlemin = innermin.ext (middle_prec, middle_unsigned_p);
9156 middlemed = innermed.ext (middle_prec, middle_unsigned_p);
9157 middlemax = innermax.ext (middle_prec, middle_unsigned_p);
9158
9159 /* Require that the final conversion applied to both the original
9160 and the intermediate range produces the same result. */
9161 final_unsigned_p = TYPE_UNSIGNED (finaltype);
9162 if (middlemin.ext (final_prec, final_unsigned_p)
9163 != innermin.ext (final_prec, final_unsigned_p)
9164 || middlemed.ext (final_prec, final_unsigned_p)
9165 != innermed.ext (final_prec, final_unsigned_p)
9166 || middlemax.ext (final_prec, final_unsigned_p)
9167 != innermax.ext (final_prec, final_unsigned_p))
9168 return false;
9169
9170 gimple_assign_set_rhs1 (stmt, innerop);
9171 update_stmt (stmt);
9172 return true;
9173 }
9174
9175 /* Simplify a conversion from integral SSA name to float in STMT. */
9176
9177 static bool
9178 simplify_float_conversion_using_ranges (gimple_stmt_iterator *gsi, gimple stmt)
9179 {
9180 tree rhs1 = gimple_assign_rhs1 (stmt);
9181 value_range_t *vr = get_value_range (rhs1);
9182 enum machine_mode fltmode = TYPE_MODE (TREE_TYPE (gimple_assign_lhs (stmt)));
9183 enum machine_mode mode;
9184 tree tem;
9185 gimple conv;
9186
9187 /* We can only handle constant ranges. */
9188 if (vr->type != VR_RANGE
9189 || TREE_CODE (vr->min) != INTEGER_CST
9190 || TREE_CODE (vr->max) != INTEGER_CST)
9191 return false;
9192
9193 /* First check if we can use a signed type in place of an unsigned. */
9194 if (TYPE_UNSIGNED (TREE_TYPE (rhs1))
9195 && (can_float_p (fltmode, TYPE_MODE (TREE_TYPE (rhs1)), 0)
9196 != CODE_FOR_nothing)
9197 && range_fits_type_p (vr, GET_MODE_PRECISION
9198 (TYPE_MODE (TREE_TYPE (rhs1))), 0))
9199 mode = TYPE_MODE (TREE_TYPE (rhs1));
9200 /* If we can do the conversion in the current input mode do nothing. */
9201 else if (can_float_p (fltmode, TYPE_MODE (TREE_TYPE (rhs1)),
9202 TYPE_UNSIGNED (TREE_TYPE (rhs1))) != CODE_FOR_nothing)
9203 return false;
9204 /* Otherwise search for a mode we can use, starting from the narrowest
9205 integer mode available. */
9206 else
9207 {
9208 mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
9209 do
9210 {
9211 /* If we cannot do a signed conversion to float from mode
9212 or if the value-range does not fit in the signed type
9213 try with a wider mode. */
9214 if (can_float_p (fltmode, mode, 0) != CODE_FOR_nothing
9215 && range_fits_type_p (vr, GET_MODE_PRECISION (mode), 0))
9216 break;
9217
9218 mode = GET_MODE_WIDER_MODE (mode);
9219 /* But do not widen the input. Instead leave that to the
9220 optabs expansion code. */
9221 if (GET_MODE_PRECISION (mode) > TYPE_PRECISION (TREE_TYPE (rhs1)))
9222 return false;
9223 }
9224 while (mode != VOIDmode);
9225 if (mode == VOIDmode)
9226 return false;
9227 }
9228
9229 /* It works, insert a truncation or sign-change before the
9230 float conversion. */
9231 tem = make_ssa_name (build_nonstandard_integer_type
9232 (GET_MODE_PRECISION (mode), 0), NULL);
9233 conv = gimple_build_assign_with_ops (NOP_EXPR, tem, rhs1, NULL_TREE);
9234 gsi_insert_before (gsi, conv, GSI_SAME_STMT);
9235 gimple_assign_set_rhs1 (stmt, tem);
9236 update_stmt (stmt);
9237
9238 return true;
9239 }
9240
9241 /* Simplify STMT using ranges if possible. */
9242
9243 static bool
9244 simplify_stmt_using_ranges (gimple_stmt_iterator *gsi)
9245 {
9246 gimple stmt = gsi_stmt (*gsi);
9247 if (is_gimple_assign (stmt))
9248 {
9249 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
9250 tree rhs1 = gimple_assign_rhs1 (stmt);
9251
9252 switch (rhs_code)
9253 {
9254 case EQ_EXPR:
9255 case NE_EXPR:
9256 /* Transform EQ_EXPR, NE_EXPR into BIT_XOR_EXPR or identity
9257 if the RHS is zero or one, and the LHS are known to be boolean
9258 values. */
9259 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
9260 return simplify_truth_ops_using_ranges (gsi, stmt);
9261 break;
9262
9263 /* Transform TRUNC_DIV_EXPR and TRUNC_MOD_EXPR into RSHIFT_EXPR
9264 and BIT_AND_EXPR respectively if the first operand is greater
9265 than zero and the second operand is an exact power of two. */
9266 case TRUNC_DIV_EXPR:
9267 case TRUNC_MOD_EXPR:
9268 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
9269 && integer_pow2p (gimple_assign_rhs2 (stmt)))
9270 return simplify_div_or_mod_using_ranges (stmt);
9271 break;
9272
9273 /* Transform ABS (X) into X or -X as appropriate. */
9274 case ABS_EXPR:
9275 if (TREE_CODE (rhs1) == SSA_NAME
9276 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
9277 return simplify_abs_using_ranges (stmt);
9278 break;
9279
9280 case BIT_AND_EXPR:
9281 case BIT_IOR_EXPR:
9282 /* Optimize away BIT_AND_EXPR and BIT_IOR_EXPR
9283 if all the bits being cleared are already cleared or
9284 all the bits being set are already set. */
9285 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
9286 return simplify_bit_ops_using_ranges (gsi, stmt);
9287 break;
9288
9289 CASE_CONVERT:
9290 if (TREE_CODE (rhs1) == SSA_NAME
9291 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
9292 return simplify_conversion_using_ranges (stmt);
9293 break;
9294
9295 case FLOAT_EXPR:
9296 if (TREE_CODE (rhs1) == SSA_NAME
9297 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
9298 return simplify_float_conversion_using_ranges (gsi, stmt);
9299 break;
9300
9301 default:
9302 break;
9303 }
9304 }
9305 else if (gimple_code (stmt) == GIMPLE_COND)
9306 return simplify_cond_using_ranges (stmt);
9307 else if (gimple_code (stmt) == GIMPLE_SWITCH)
9308 return simplify_switch_using_ranges (stmt);
9309
9310 return false;
9311 }
9312
9313 /* If the statement pointed by SI has a predicate whose value can be
9314 computed using the value range information computed by VRP, compute
9315 its value and return true. Otherwise, return false. */
9316
9317 static bool
9318 fold_predicate_in (gimple_stmt_iterator *si)
9319 {
9320 bool assignment_p = false;
9321 tree val;
9322 gimple stmt = gsi_stmt (*si);
9323
9324 if (is_gimple_assign (stmt)
9325 && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison)
9326 {
9327 assignment_p = true;
9328 val = vrp_evaluate_conditional (gimple_assign_rhs_code (stmt),
9329 gimple_assign_rhs1 (stmt),
9330 gimple_assign_rhs2 (stmt),
9331 stmt);
9332 }
9333 else if (gimple_code (stmt) == GIMPLE_COND)
9334 val = vrp_evaluate_conditional (gimple_cond_code (stmt),
9335 gimple_cond_lhs (stmt),
9336 gimple_cond_rhs (stmt),
9337 stmt);
9338 else
9339 return false;
9340
9341 if (val)
9342 {
9343 if (assignment_p)
9344 val = fold_convert (gimple_expr_type (stmt), val);
9345
9346 if (dump_file)
9347 {
9348 fprintf (dump_file, "Folding predicate ");
9349 print_gimple_expr (dump_file, stmt, 0, 0);
9350 fprintf (dump_file, " to ");
9351 print_generic_expr (dump_file, val, 0);
9352 fprintf (dump_file, "\n");
9353 }
9354
9355 if (is_gimple_assign (stmt))
9356 gimple_assign_set_rhs_from_tree (si, val);
9357 else
9358 {
9359 gcc_assert (gimple_code (stmt) == GIMPLE_COND);
9360 if (integer_zerop (val))
9361 gimple_cond_make_false (stmt);
9362 else if (integer_onep (val))
9363 gimple_cond_make_true (stmt);
9364 else
9365 gcc_unreachable ();
9366 }
9367
9368 return true;
9369 }
9370
9371 return false;
9372 }
9373
9374 /* Callback for substitute_and_fold folding the stmt at *SI. */
9375
9376 static bool
9377 vrp_fold_stmt (gimple_stmt_iterator *si)
9378 {
9379 if (fold_predicate_in (si))
9380 return true;
9381
9382 return simplify_stmt_using_ranges (si);
9383 }
9384
9385 /* Stack of dest,src equivalency pairs that need to be restored after
9386 each attempt to thread a block's incoming edge to an outgoing edge.
9387
9388 A NULL entry is used to mark the end of pairs which need to be
9389 restored. */
9390 static vec<tree> equiv_stack;
9391
9392 /* A trivial wrapper so that we can present the generic jump threading
9393 code with a simple API for simplifying statements. STMT is the
9394 statement we want to simplify, WITHIN_STMT provides the location
9395 for any overflow warnings. */
9396
9397 static tree
9398 simplify_stmt_for_jump_threading (gimple stmt, gimple within_stmt)
9399 {
9400 if (gimple_code (stmt) == GIMPLE_COND)
9401 return vrp_evaluate_conditional (gimple_cond_code (stmt),
9402 gimple_cond_lhs (stmt),
9403 gimple_cond_rhs (stmt), within_stmt);
9404
9405 if (gimple_code (stmt) == GIMPLE_ASSIGN)
9406 {
9407 value_range_t new_vr = VR_INITIALIZER;
9408 tree lhs = gimple_assign_lhs (stmt);
9409
9410 if (TREE_CODE (lhs) == SSA_NAME
9411 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
9412 || POINTER_TYPE_P (TREE_TYPE (lhs))))
9413 {
9414 extract_range_from_assignment (&new_vr, stmt);
9415 if (range_int_cst_singleton_p (&new_vr))
9416 return new_vr.min;
9417 }
9418 }
9419
9420 return NULL_TREE;
9421 }
9422
9423 /* Blocks which have more than one predecessor and more than
9424 one successor present jump threading opportunities, i.e.,
9425 when the block is reached from a specific predecessor, we
9426 may be able to determine which of the outgoing edges will
9427 be traversed. When this optimization applies, we are able
9428 to avoid conditionals at runtime and we may expose secondary
9429 optimization opportunities.
9430
9431 This routine is effectively a driver for the generic jump
9432 threading code. It basically just presents the generic code
9433 with edges that may be suitable for jump threading.
9434
9435 Unlike DOM, we do not iterate VRP if jump threading was successful.
9436 While iterating may expose new opportunities for VRP, it is expected
9437 those opportunities would be very limited and the compile time cost
9438 to expose those opportunities would be significant.
9439
9440 As jump threading opportunities are discovered, they are registered
9441 for later realization. */
9442
9443 static void
9444 identify_jump_threads (void)
9445 {
9446 basic_block bb;
9447 gimple dummy;
9448 int i;
9449 edge e;
9450
9451 /* Ugh. When substituting values earlier in this pass we can
9452 wipe the dominance information. So rebuild the dominator
9453 information as we need it within the jump threading code. */
9454 calculate_dominance_info (CDI_DOMINATORS);
9455
9456 /* We do not allow VRP information to be used for jump threading
9457 across a back edge in the CFG. Otherwise it becomes too
9458 difficult to avoid eliminating loop exit tests. Of course
9459 EDGE_DFS_BACK is not accurate at this time so we have to
9460 recompute it. */
9461 mark_dfs_back_edges ();
9462
9463 /* Do not thread across edges we are about to remove. Just marking
9464 them as EDGE_DFS_BACK will do. */
9465 FOR_EACH_VEC_ELT (to_remove_edges, i, e)
9466 e->flags |= EDGE_DFS_BACK;
9467
9468 /* Allocate our unwinder stack to unwind any temporary equivalences
9469 that might be recorded. */
9470 equiv_stack.create (20);
9471
9472 /* To avoid lots of silly node creation, we create a single
9473 conditional and just modify it in-place when attempting to
9474 thread jumps. */
9475 dummy = gimple_build_cond (EQ_EXPR,
9476 integer_zero_node, integer_zero_node,
9477 NULL, NULL);
9478
9479 /* Walk through all the blocks finding those which present a
9480 potential jump threading opportunity. We could set this up
9481 as a dominator walker and record data during the walk, but
9482 I doubt it's worth the effort for the classes of jump
9483 threading opportunities we are trying to identify at this
9484 point in compilation. */
9485 FOR_EACH_BB (bb)
9486 {
9487 gimple last;
9488
9489 /* If the generic jump threading code does not find this block
9490 interesting, then there is nothing to do. */
9491 if (! potentially_threadable_block (bb))
9492 continue;
9493
9494 /* We only care about blocks ending in a COND_EXPR. While there
9495 may be some value in handling SWITCH_EXPR here, I doubt it's
9496 terribly important. */
9497 last = gsi_stmt (gsi_last_bb (bb));
9498
9499 /* We're basically looking for a switch or any kind of conditional with
9500 integral or pointer type arguments. Note the type of the second
9501 argument will be the same as the first argument, so no need to
9502 check it explicitly. */
9503 if (gimple_code (last) == GIMPLE_SWITCH
9504 || (gimple_code (last) == GIMPLE_COND
9505 && TREE_CODE (gimple_cond_lhs (last)) == SSA_NAME
9506 && (INTEGRAL_TYPE_P (TREE_TYPE (gimple_cond_lhs (last)))
9507 || POINTER_TYPE_P (TREE_TYPE (gimple_cond_lhs (last))))
9508 && (TREE_CODE (gimple_cond_rhs (last)) == SSA_NAME
9509 || is_gimple_min_invariant (gimple_cond_rhs (last)))))
9510 {
9511 edge_iterator ei;
9512
9513 /* We've got a block with multiple predecessors and multiple
9514 successors which also ends in a suitable conditional or
9515 switch statement. For each predecessor, see if we can thread
9516 it to a specific successor. */
9517 FOR_EACH_EDGE (e, ei, bb->preds)
9518 {
9519 /* Do not thread across back edges or abnormal edges
9520 in the CFG. */
9521 if (e->flags & (EDGE_DFS_BACK | EDGE_COMPLEX))
9522 continue;
9523
9524 thread_across_edge (dummy, e, true, &equiv_stack,
9525 simplify_stmt_for_jump_threading);
9526 }
9527 }
9528 }
9529
9530 /* We do not actually update the CFG or SSA graphs at this point as
9531 ASSERT_EXPRs are still in the IL and cfg cleanup code does not yet
9532 handle ASSERT_EXPRs gracefully. */
9533 }
9534
9535 /* We identified all the jump threading opportunities earlier, but could
9536 not transform the CFG at that time. This routine transforms the
9537 CFG and arranges for the dominator tree to be rebuilt if necessary.
9538
9539 Note the SSA graph update will occur during the normal TODO
9540 processing by the pass manager. */
9541 static void
9542 finalize_jump_threads (void)
9543 {
9544 thread_through_all_blocks (false);
9545 equiv_stack.release ();
9546 }
9547
9548
9549 /* Traverse all the blocks folding conditionals with known ranges. */
9550
9551 static void
9552 vrp_finalize (void)
9553 {
9554 size_t i;
9555
9556 values_propagated = true;
9557
9558 if (dump_file)
9559 {
9560 fprintf (dump_file, "\nValue ranges after VRP:\n\n");
9561 dump_all_value_ranges (dump_file);
9562 fprintf (dump_file, "\n");
9563 }
9564
9565 substitute_and_fold (op_with_constant_singleton_value_range,
9566 vrp_fold_stmt, false);
9567
9568 if (warn_array_bounds)
9569 check_all_array_refs ();
9570
9571 /* We must identify jump threading opportunities before we release
9572 the datastructures built by VRP. */
9573 identify_jump_threads ();
9574
9575 /* Set value range to non pointer SSA_NAMEs. */
9576 for (i = 0; i < num_vr_values; i++)
9577 if (vr_value[i])
9578 {
9579 tree name = ssa_name (i);
9580
9581 if (!name
9582 || POINTER_TYPE_P (TREE_TYPE (name))
9583 || (vr_value[i]->type == VR_VARYING)
9584 || (vr_value[i]->type == VR_UNDEFINED))
9585 continue;
9586
9587 if ((TREE_CODE (vr_value[i]->min) == INTEGER_CST)
9588 && (TREE_CODE (vr_value[i]->max) == INTEGER_CST))
9589 {
9590 if (vr_value[i]->type == VR_RANGE)
9591 set_range_info (name,
9592 tree_to_double_int (vr_value[i]->min),
9593 tree_to_double_int (vr_value[i]->max));
9594 else if (vr_value[i]->type == VR_ANTI_RANGE)
9595 {
9596 /* VR_ANTI_RANGE ~[min, max] is encoded compactly as
9597 [max + 1, min - 1] without additional attributes.
9598 When min value > max value, we know that it is
9599 VR_ANTI_RANGE; it is VR_RANGE otherwise. */
9600
9601 /* ~[0,0] anti-range is represented as
9602 range. */
9603 if (TYPE_UNSIGNED (TREE_TYPE (name))
9604 && integer_zerop (vr_value[i]->min)
9605 && integer_zerop (vr_value[i]->max))
9606 set_range_info (name,
9607 double_int_one,
9608 double_int::max_value
9609 (TYPE_PRECISION (TREE_TYPE (name)), true));
9610 else
9611 set_range_info (name,
9612 tree_to_double_int (vr_value[i]->max)
9613 + double_int_one,
9614 tree_to_double_int (vr_value[i]->min)
9615 - double_int_one);
9616 }
9617 }
9618 }
9619
9620 /* Free allocated memory. */
9621 for (i = 0; i < num_vr_values; i++)
9622 if (vr_value[i])
9623 {
9624 BITMAP_FREE (vr_value[i]->equiv);
9625 free (vr_value[i]);
9626 }
9627
9628 free (vr_value);
9629 free (vr_phi_edge_counts);
9630
9631 /* So that we can distinguish between VRP data being available
9632 and not available. */
9633 vr_value = NULL;
9634 vr_phi_edge_counts = NULL;
9635 }
9636
9637
9638 /* Main entry point to VRP (Value Range Propagation). This pass is
9639 loosely based on J. R. C. Patterson, ``Accurate Static Branch
9640 Prediction by Value Range Propagation,'' in SIGPLAN Conference on
9641 Programming Language Design and Implementation, pp. 67-78, 1995.
9642 Also available at http://citeseer.ist.psu.edu/patterson95accurate.html
9643
9644 This is essentially an SSA-CCP pass modified to deal with ranges
9645 instead of constants.
9646
9647 While propagating ranges, we may find that two or more SSA name
9648 have equivalent, though distinct ranges. For instance,
9649
9650 1 x_9 = p_3->a;
9651 2 p_4 = ASSERT_EXPR <p_3, p_3 != 0>
9652 3 if (p_4 == q_2)
9653 4 p_5 = ASSERT_EXPR <p_4, p_4 == q_2>;
9654 5 endif
9655 6 if (q_2)
9656
9657 In the code above, pointer p_5 has range [q_2, q_2], but from the
9658 code we can also determine that p_5 cannot be NULL and, if q_2 had
9659 a non-varying range, p_5's range should also be compatible with it.
9660
9661 These equivalences are created by two expressions: ASSERT_EXPR and
9662 copy operations. Since p_5 is an assertion on p_4, and p_4 was the
9663 result of another assertion, then we can use the fact that p_5 and
9664 p_4 are equivalent when evaluating p_5's range.
9665
9666 Together with value ranges, we also propagate these equivalences
9667 between names so that we can take advantage of information from
9668 multiple ranges when doing final replacement. Note that this
9669 equivalency relation is transitive but not symmetric.
9670
9671 In the example above, p_5 is equivalent to p_4, q_2 and p_3, but we
9672 cannot assert that q_2 is equivalent to p_5 because q_2 may be used
9673 in contexts where that assertion does not hold (e.g., in line 6).
9674
9675 TODO, the main difference between this pass and Patterson's is that
9676 we do not propagate edge probabilities. We only compute whether
9677 edges can be taken or not. That is, instead of having a spectrum
9678 of jump probabilities between 0 and 1, we only deal with 0, 1 and
9679 DON'T KNOW. In the future, it may be worthwhile to propagate
9680 probabilities to aid branch prediction. */
9681
9682 static unsigned int
9683 execute_vrp (void)
9684 {
9685 int i;
9686 edge e;
9687 switch_update *su;
9688
9689 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
9690 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
9691 scev_initialize ();
9692
9693 /* ??? This ends up using stale EDGE_DFS_BACK for liveness computation.
9694 Inserting assertions may split edges which will invalidate
9695 EDGE_DFS_BACK. */
9696 insert_range_assertions ();
9697
9698 to_remove_edges.create (10);
9699 to_update_switch_stmts.create (5);
9700 threadedge_initialize_values ();
9701
9702 /* For visiting PHI nodes we need EDGE_DFS_BACK computed. */
9703 mark_dfs_back_edges ();
9704
9705 vrp_initialize ();
9706 ssa_propagate (vrp_visit_stmt, vrp_visit_phi_node);
9707 vrp_finalize ();
9708
9709 free_numbers_of_iterations_estimates ();
9710
9711 /* ASSERT_EXPRs must be removed before finalizing jump threads
9712 as finalizing jump threads calls the CFG cleanup code which
9713 does not properly handle ASSERT_EXPRs. */
9714 remove_range_assertions ();
9715
9716 /* If we exposed any new variables, go ahead and put them into
9717 SSA form now, before we handle jump threading. This simplifies
9718 interactions between rewriting of _DECL nodes into SSA form
9719 and rewriting SSA_NAME nodes into SSA form after block
9720 duplication and CFG manipulation. */
9721 update_ssa (TODO_update_ssa);
9722
9723 finalize_jump_threads ();
9724
9725 /* Remove dead edges from SWITCH_EXPR optimization. This leaves the
9726 CFG in a broken state and requires a cfg_cleanup run. */
9727 FOR_EACH_VEC_ELT (to_remove_edges, i, e)
9728 remove_edge (e);
9729 /* Update SWITCH_EXPR case label vector. */
9730 FOR_EACH_VEC_ELT (to_update_switch_stmts, i, su)
9731 {
9732 size_t j;
9733 size_t n = TREE_VEC_LENGTH (su->vec);
9734 tree label;
9735 gimple_switch_set_num_labels (su->stmt, n);
9736 for (j = 0; j < n; j++)
9737 gimple_switch_set_label (su->stmt, j, TREE_VEC_ELT (su->vec, j));
9738 /* As we may have replaced the default label with a regular one
9739 make sure to make it a real default label again. This ensures
9740 optimal expansion. */
9741 label = gimple_switch_label (su->stmt, 0);
9742 CASE_LOW (label) = NULL_TREE;
9743 CASE_HIGH (label) = NULL_TREE;
9744 }
9745
9746 if (to_remove_edges.length () > 0)
9747 {
9748 free_dominance_info (CDI_DOMINATORS);
9749 if (current_loops)
9750 loops_state_set (LOOPS_NEED_FIXUP);
9751 }
9752
9753 to_remove_edges.release ();
9754 to_update_switch_stmts.release ();
9755 threadedge_finalize_values ();
9756
9757 scev_finalize ();
9758 loop_optimizer_finalize ();
9759 return 0;
9760 }
9761
9762 static bool
9763 gate_vrp (void)
9764 {
9765 return flag_tree_vrp != 0;
9766 }
9767
9768 namespace {
9769
9770 const pass_data pass_data_vrp =
9771 {
9772 GIMPLE_PASS, /* type */
9773 "vrp", /* name */
9774 OPTGROUP_NONE, /* optinfo_flags */
9775 true, /* has_gate */
9776 true, /* has_execute */
9777 TV_TREE_VRP, /* tv_id */
9778 PROP_ssa, /* properties_required */
9779 0, /* properties_provided */
9780 0, /* properties_destroyed */
9781 0, /* todo_flags_start */
9782 ( TODO_cleanup_cfg | TODO_update_ssa
9783 | TODO_verify_ssa
9784 | TODO_verify_flow ), /* todo_flags_finish */
9785 };
9786
9787 class pass_vrp : public gimple_opt_pass
9788 {
9789 public:
9790 pass_vrp (gcc::context *ctxt)
9791 : gimple_opt_pass (pass_data_vrp, ctxt)
9792 {}
9793
9794 /* opt_pass methods: */
9795 opt_pass * clone () { return new pass_vrp (m_ctxt); }
9796 bool gate () { return gate_vrp (); }
9797 unsigned int execute () { return execute_vrp (); }
9798
9799 }; // class pass_vrp
9800
9801 } // anon namespace
9802
9803 gimple_opt_pass *
9804 make_pass_vrp (gcc::context *ctxt)
9805 {
9806 return new pass_vrp (ctxt);
9807 }