alloc-pool.c, [...]: Add missing whitespace before "(".
[gcc.git] / gcc / tree-vrp.c
1 /* Support routines for Value Range Propagation (VRP).
2 Copyright (C) 2005-2013 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "ggc.h"
26 #include "flags.h"
27 #include "tree.h"
28 #include "basic-block.h"
29 #include "tree-ssa.h"
30 #include "tree-pass.h"
31 #include "tree-dump.h"
32 #include "gimple-pretty-print.h"
33 #include "diagnostic-core.h"
34 #include "intl.h"
35 #include "cfgloop.h"
36 #include "tree-scalar-evolution.h"
37 #include "tree-ssa-propagate.h"
38 #include "tree-chrec.h"
39 #include "tree-ssa-threadupdate.h"
40 #include "gimple-fold.h"
41 #include "expr.h"
42 #include "optabs.h"
43
44
45
46 /* Range of values that can be associated with an SSA_NAME after VRP
47 has executed. */
48 struct value_range_d
49 {
50 /* Lattice value represented by this range. */
51 enum value_range_type type;
52
53 /* Minimum and maximum values represented by this range. These
54 values should be interpreted as follows:
55
56 - If TYPE is VR_UNDEFINED or VR_VARYING then MIN and MAX must
57 be NULL.
58
59 - If TYPE == VR_RANGE then MIN holds the minimum value and
60 MAX holds the maximum value of the range [MIN, MAX].
61
62 - If TYPE == ANTI_RANGE the variable is known to NOT
63 take any values in the range [MIN, MAX]. */
64 tree min;
65 tree max;
66
67 /* Set of SSA names whose value ranges are equivalent to this one.
68 This set is only valid when TYPE is VR_RANGE or VR_ANTI_RANGE. */
69 bitmap equiv;
70 };
71
72 typedef struct value_range_d value_range_t;
73
74 #define VR_INITIALIZER { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL }
75
76 /* Set of SSA names found live during the RPO traversal of the function
77 for still active basic-blocks. */
78 static sbitmap *live;
79
80 /* Return true if the SSA name NAME is live on the edge E. */
81
82 static bool
83 live_on_edge (edge e, tree name)
84 {
85 return (live[e->dest->index]
86 && bitmap_bit_p (live[e->dest->index], SSA_NAME_VERSION (name)));
87 }
88
89 /* Local functions. */
90 static int compare_values (tree val1, tree val2);
91 static int compare_values_warnv (tree val1, tree val2, bool *);
92 static void vrp_meet (value_range_t *, value_range_t *);
93 static void vrp_intersect_ranges (value_range_t *, value_range_t *);
94 static tree vrp_evaluate_conditional_warnv_with_ops (enum tree_code,
95 tree, tree, bool, bool *,
96 bool *);
97
98 /* Location information for ASSERT_EXPRs. Each instance of this
99 structure describes an ASSERT_EXPR for an SSA name. Since a single
100 SSA name may have more than one assertion associated with it, these
101 locations are kept in a linked list attached to the corresponding
102 SSA name. */
103 struct assert_locus_d
104 {
105 /* Basic block where the assertion would be inserted. */
106 basic_block bb;
107
108 /* Some assertions need to be inserted on an edge (e.g., assertions
109 generated by COND_EXPRs). In those cases, BB will be NULL. */
110 edge e;
111
112 /* Pointer to the statement that generated this assertion. */
113 gimple_stmt_iterator si;
114
115 /* Predicate code for the ASSERT_EXPR. Must be COMPARISON_CLASS_P. */
116 enum tree_code comp_code;
117
118 /* Value being compared against. */
119 tree val;
120
121 /* Expression to compare. */
122 tree expr;
123
124 /* Next node in the linked list. */
125 struct assert_locus_d *next;
126 };
127
128 typedef struct assert_locus_d *assert_locus_t;
129
130 /* If bit I is present, it means that SSA name N_i has a list of
131 assertions that should be inserted in the IL. */
132 static bitmap need_assert_for;
133
134 /* Array of locations lists where to insert assertions. ASSERTS_FOR[I]
135 holds a list of ASSERT_LOCUS_T nodes that describe where
136 ASSERT_EXPRs for SSA name N_I should be inserted. */
137 static assert_locus_t *asserts_for;
138
139 /* Value range array. After propagation, VR_VALUE[I] holds the range
140 of values that SSA name N_I may take. */
141 static unsigned num_vr_values;
142 static value_range_t **vr_value;
143 static bool values_propagated;
144
145 /* For a PHI node which sets SSA name N_I, VR_COUNTS[I] holds the
146 number of executable edges we saw the last time we visited the
147 node. */
148 static int *vr_phi_edge_counts;
149
150 typedef struct {
151 gimple stmt;
152 tree vec;
153 } switch_update;
154
155 static vec<edge> to_remove_edges;
156 static vec<switch_update> to_update_switch_stmts;
157
158
159 /* Return the maximum value for TYPE. */
160
161 static inline tree
162 vrp_val_max (const_tree type)
163 {
164 if (!INTEGRAL_TYPE_P (type))
165 return NULL_TREE;
166
167 return TYPE_MAX_VALUE (type);
168 }
169
170 /* Return the minimum value for TYPE. */
171
172 static inline tree
173 vrp_val_min (const_tree type)
174 {
175 if (!INTEGRAL_TYPE_P (type))
176 return NULL_TREE;
177
178 return TYPE_MIN_VALUE (type);
179 }
180
181 /* Return whether VAL is equal to the maximum value of its type. This
182 will be true for a positive overflow infinity. We can't do a
183 simple equality comparison with TYPE_MAX_VALUE because C typedefs
184 and Ada subtypes can produce types whose TYPE_MAX_VALUE is not ==
185 to the integer constant with the same value in the type. */
186
187 static inline bool
188 vrp_val_is_max (const_tree val)
189 {
190 tree type_max = vrp_val_max (TREE_TYPE (val));
191 return (val == type_max
192 || (type_max != NULL_TREE
193 && operand_equal_p (val, type_max, 0)));
194 }
195
196 /* Return whether VAL is equal to the minimum value of its type. This
197 will be true for a negative overflow infinity. */
198
199 static inline bool
200 vrp_val_is_min (const_tree val)
201 {
202 tree type_min = vrp_val_min (TREE_TYPE (val));
203 return (val == type_min
204 || (type_min != NULL_TREE
205 && operand_equal_p (val, type_min, 0)));
206 }
207
208
209 /* Return whether TYPE should use an overflow infinity distinct from
210 TYPE_{MIN,MAX}_VALUE. We use an overflow infinity value to
211 represent a signed overflow during VRP computations. An infinity
212 is distinct from a half-range, which will go from some number to
213 TYPE_{MIN,MAX}_VALUE. */
214
215 static inline bool
216 needs_overflow_infinity (const_tree type)
217 {
218 return INTEGRAL_TYPE_P (type) && !TYPE_OVERFLOW_WRAPS (type);
219 }
220
221 /* Return whether TYPE can support our overflow infinity
222 representation: we use the TREE_OVERFLOW flag, which only exists
223 for constants. If TYPE doesn't support this, we don't optimize
224 cases which would require signed overflow--we drop them to
225 VARYING. */
226
227 static inline bool
228 supports_overflow_infinity (const_tree type)
229 {
230 tree min = vrp_val_min (type), max = vrp_val_max (type);
231 #ifdef ENABLE_CHECKING
232 gcc_assert (needs_overflow_infinity (type));
233 #endif
234 return (min != NULL_TREE
235 && CONSTANT_CLASS_P (min)
236 && max != NULL_TREE
237 && CONSTANT_CLASS_P (max));
238 }
239
240 /* VAL is the maximum or minimum value of a type. Return a
241 corresponding overflow infinity. */
242
243 static inline tree
244 make_overflow_infinity (tree val)
245 {
246 gcc_checking_assert (val != NULL_TREE && CONSTANT_CLASS_P (val));
247 val = copy_node (val);
248 TREE_OVERFLOW (val) = 1;
249 return val;
250 }
251
252 /* Return a negative overflow infinity for TYPE. */
253
254 static inline tree
255 negative_overflow_infinity (tree type)
256 {
257 gcc_checking_assert (supports_overflow_infinity (type));
258 return make_overflow_infinity (vrp_val_min (type));
259 }
260
261 /* Return a positive overflow infinity for TYPE. */
262
263 static inline tree
264 positive_overflow_infinity (tree type)
265 {
266 gcc_checking_assert (supports_overflow_infinity (type));
267 return make_overflow_infinity (vrp_val_max (type));
268 }
269
270 /* Return whether VAL is a negative overflow infinity. */
271
272 static inline bool
273 is_negative_overflow_infinity (const_tree val)
274 {
275 return (needs_overflow_infinity (TREE_TYPE (val))
276 && CONSTANT_CLASS_P (val)
277 && TREE_OVERFLOW (val)
278 && vrp_val_is_min (val));
279 }
280
281 /* Return whether VAL is a positive overflow infinity. */
282
283 static inline bool
284 is_positive_overflow_infinity (const_tree val)
285 {
286 return (needs_overflow_infinity (TREE_TYPE (val))
287 && CONSTANT_CLASS_P (val)
288 && TREE_OVERFLOW (val)
289 && vrp_val_is_max (val));
290 }
291
292 /* Return whether VAL is a positive or negative overflow infinity. */
293
294 static inline bool
295 is_overflow_infinity (const_tree val)
296 {
297 return (needs_overflow_infinity (TREE_TYPE (val))
298 && CONSTANT_CLASS_P (val)
299 && TREE_OVERFLOW (val)
300 && (vrp_val_is_min (val) || vrp_val_is_max (val)));
301 }
302
303 /* Return whether STMT has a constant rhs that is_overflow_infinity. */
304
305 static inline bool
306 stmt_overflow_infinity (gimple stmt)
307 {
308 if (is_gimple_assign (stmt)
309 && get_gimple_rhs_class (gimple_assign_rhs_code (stmt)) ==
310 GIMPLE_SINGLE_RHS)
311 return is_overflow_infinity (gimple_assign_rhs1 (stmt));
312 return false;
313 }
314
315 /* If VAL is now an overflow infinity, return VAL. Otherwise, return
316 the same value with TREE_OVERFLOW clear. This can be used to avoid
317 confusing a regular value with an overflow value. */
318
319 static inline tree
320 avoid_overflow_infinity (tree val)
321 {
322 if (!is_overflow_infinity (val))
323 return val;
324
325 if (vrp_val_is_max (val))
326 return vrp_val_max (TREE_TYPE (val));
327 else
328 {
329 gcc_checking_assert (vrp_val_is_min (val));
330 return vrp_val_min (TREE_TYPE (val));
331 }
332 }
333
334
335 /* Return true if ARG is marked with the nonnull attribute in the
336 current function signature. */
337
338 static bool
339 nonnull_arg_p (const_tree arg)
340 {
341 tree t, attrs, fntype;
342 unsigned HOST_WIDE_INT arg_num;
343
344 gcc_assert (TREE_CODE (arg) == PARM_DECL && POINTER_TYPE_P (TREE_TYPE (arg)));
345
346 /* The static chain decl is always non null. */
347 if (arg == cfun->static_chain_decl)
348 return true;
349
350 fntype = TREE_TYPE (current_function_decl);
351 for (attrs = TYPE_ATTRIBUTES (fntype); attrs; attrs = TREE_CHAIN (attrs))
352 {
353 attrs = lookup_attribute ("nonnull", attrs);
354
355 /* If "nonnull" wasn't specified, we know nothing about the argument. */
356 if (attrs == NULL_TREE)
357 return false;
358
359 /* If "nonnull" applies to all the arguments, then ARG is non-null. */
360 if (TREE_VALUE (attrs) == NULL_TREE)
361 return true;
362
363 /* Get the position number for ARG in the function signature. */
364 for (arg_num = 1, t = DECL_ARGUMENTS (current_function_decl);
365 t;
366 t = DECL_CHAIN (t), arg_num++)
367 {
368 if (t == arg)
369 break;
370 }
371
372 gcc_assert (t == arg);
373
374 /* Now see if ARG_NUM is mentioned in the nonnull list. */
375 for (t = TREE_VALUE (attrs); t; t = TREE_CHAIN (t))
376 {
377 if (compare_tree_int (TREE_VALUE (t), arg_num) == 0)
378 return true;
379 }
380 }
381
382 return false;
383 }
384
385
386 /* Set value range VR to VR_UNDEFINED. */
387
388 static inline void
389 set_value_range_to_undefined (value_range_t *vr)
390 {
391 vr->type = VR_UNDEFINED;
392 vr->min = vr->max = NULL_TREE;
393 if (vr->equiv)
394 bitmap_clear (vr->equiv);
395 }
396
397
398 /* Set value range VR to VR_VARYING. */
399
400 static inline void
401 set_value_range_to_varying (value_range_t *vr)
402 {
403 vr->type = VR_VARYING;
404 vr->min = vr->max = NULL_TREE;
405 if (vr->equiv)
406 bitmap_clear (vr->equiv);
407 }
408
409
410 /* Set value range VR to {T, MIN, MAX, EQUIV}. */
411
412 static void
413 set_value_range (value_range_t *vr, enum value_range_type t, tree min,
414 tree max, bitmap equiv)
415 {
416 #if defined ENABLE_CHECKING
417 /* Check the validity of the range. */
418 if (t == VR_RANGE || t == VR_ANTI_RANGE)
419 {
420 int cmp;
421
422 gcc_assert (min && max);
423
424 if (INTEGRAL_TYPE_P (TREE_TYPE (min)) && t == VR_ANTI_RANGE)
425 gcc_assert (!vrp_val_is_min (min) || !vrp_val_is_max (max));
426
427 cmp = compare_values (min, max);
428 gcc_assert (cmp == 0 || cmp == -1 || cmp == -2);
429
430 if (needs_overflow_infinity (TREE_TYPE (min)))
431 gcc_assert (!is_overflow_infinity (min)
432 || !is_overflow_infinity (max));
433 }
434
435 if (t == VR_UNDEFINED || t == VR_VARYING)
436 gcc_assert (min == NULL_TREE && max == NULL_TREE);
437
438 if (t == VR_UNDEFINED || t == VR_VARYING)
439 gcc_assert (equiv == NULL || bitmap_empty_p (equiv));
440 #endif
441
442 vr->type = t;
443 vr->min = min;
444 vr->max = max;
445
446 /* Since updating the equivalence set involves deep copying the
447 bitmaps, only do it if absolutely necessary. */
448 if (vr->equiv == NULL
449 && equiv != NULL)
450 vr->equiv = BITMAP_ALLOC (NULL);
451
452 if (equiv != vr->equiv)
453 {
454 if (equiv && !bitmap_empty_p (equiv))
455 bitmap_copy (vr->equiv, equiv);
456 else
457 bitmap_clear (vr->equiv);
458 }
459 }
460
461
462 /* Set value range VR to the canonical form of {T, MIN, MAX, EQUIV}.
463 This means adjusting T, MIN and MAX representing the case of a
464 wrapping range with MAX < MIN covering [MIN, type_max] U [type_min, MAX]
465 as anti-rage ~[MAX+1, MIN-1]. Likewise for wrapping anti-ranges.
466 In corner cases where MAX+1 or MIN-1 wraps this will fall back
467 to varying.
468 This routine exists to ease canonicalization in the case where we
469 extract ranges from var + CST op limit. */
470
471 static void
472 set_and_canonicalize_value_range (value_range_t *vr, enum value_range_type t,
473 tree min, tree max, bitmap equiv)
474 {
475 /* Use the canonical setters for VR_UNDEFINED and VR_VARYING. */
476 if (t == VR_UNDEFINED)
477 {
478 set_value_range_to_undefined (vr);
479 return;
480 }
481 else if (t == VR_VARYING)
482 {
483 set_value_range_to_varying (vr);
484 return;
485 }
486
487 /* Nothing to canonicalize for symbolic ranges. */
488 if (TREE_CODE (min) != INTEGER_CST
489 || TREE_CODE (max) != INTEGER_CST)
490 {
491 set_value_range (vr, t, min, max, equiv);
492 return;
493 }
494
495 /* Wrong order for min and max, to swap them and the VR type we need
496 to adjust them. */
497 if (tree_int_cst_lt (max, min))
498 {
499 tree one, tmp;
500
501 /* For one bit precision if max < min, then the swapped
502 range covers all values, so for VR_RANGE it is varying and
503 for VR_ANTI_RANGE empty range, so drop to varying as well. */
504 if (TYPE_PRECISION (TREE_TYPE (min)) == 1)
505 {
506 set_value_range_to_varying (vr);
507 return;
508 }
509
510 one = build_int_cst (TREE_TYPE (min), 1);
511 tmp = int_const_binop (PLUS_EXPR, max, one);
512 max = int_const_binop (MINUS_EXPR, min, one);
513 min = tmp;
514
515 /* There's one corner case, if we had [C+1, C] before we now have
516 that again. But this represents an empty value range, so drop
517 to varying in this case. */
518 if (tree_int_cst_lt (max, min))
519 {
520 set_value_range_to_varying (vr);
521 return;
522 }
523
524 t = t == VR_RANGE ? VR_ANTI_RANGE : VR_RANGE;
525 }
526
527 /* Anti-ranges that can be represented as ranges should be so. */
528 if (t == VR_ANTI_RANGE)
529 {
530 bool is_min = vrp_val_is_min (min);
531 bool is_max = vrp_val_is_max (max);
532
533 if (is_min && is_max)
534 {
535 /* We cannot deal with empty ranges, drop to varying.
536 ??? This could be VR_UNDEFINED instead. */
537 set_value_range_to_varying (vr);
538 return;
539 }
540 else if (TYPE_PRECISION (TREE_TYPE (min)) == 1
541 && (is_min || is_max))
542 {
543 /* Non-empty boolean ranges can always be represented
544 as a singleton range. */
545 if (is_min)
546 min = max = vrp_val_max (TREE_TYPE (min));
547 else
548 min = max = vrp_val_min (TREE_TYPE (min));
549 t = VR_RANGE;
550 }
551 else if (is_min
552 /* As a special exception preserve non-null ranges. */
553 && !(TYPE_UNSIGNED (TREE_TYPE (min))
554 && integer_zerop (max)))
555 {
556 tree one = build_int_cst (TREE_TYPE (max), 1);
557 min = int_const_binop (PLUS_EXPR, max, one);
558 max = vrp_val_max (TREE_TYPE (max));
559 t = VR_RANGE;
560 }
561 else if (is_max)
562 {
563 tree one = build_int_cst (TREE_TYPE (min), 1);
564 max = int_const_binop (MINUS_EXPR, min, one);
565 min = vrp_val_min (TREE_TYPE (min));
566 t = VR_RANGE;
567 }
568 }
569
570 /* Drop [-INF(OVF), +INF(OVF)] to varying. */
571 if (needs_overflow_infinity (TREE_TYPE (min))
572 && is_overflow_infinity (min)
573 && is_overflow_infinity (max))
574 {
575 set_value_range_to_varying (vr);
576 return;
577 }
578
579 set_value_range (vr, t, min, max, equiv);
580 }
581
582 /* Copy value range FROM into value range TO. */
583
584 static inline void
585 copy_value_range (value_range_t *to, value_range_t *from)
586 {
587 set_value_range (to, from->type, from->min, from->max, from->equiv);
588 }
589
590 /* Set value range VR to a single value. This function is only called
591 with values we get from statements, and exists to clear the
592 TREE_OVERFLOW flag so that we don't think we have an overflow
593 infinity when we shouldn't. */
594
595 static inline void
596 set_value_range_to_value (value_range_t *vr, tree val, bitmap equiv)
597 {
598 gcc_assert (is_gimple_min_invariant (val));
599 val = avoid_overflow_infinity (val);
600 set_value_range (vr, VR_RANGE, val, val, equiv);
601 }
602
603 /* Set value range VR to a non-negative range of type TYPE.
604 OVERFLOW_INFINITY indicates whether to use an overflow infinity
605 rather than TYPE_MAX_VALUE; this should be true if we determine
606 that the range is nonnegative based on the assumption that signed
607 overflow does not occur. */
608
609 static inline void
610 set_value_range_to_nonnegative (value_range_t *vr, tree type,
611 bool overflow_infinity)
612 {
613 tree zero;
614
615 if (overflow_infinity && !supports_overflow_infinity (type))
616 {
617 set_value_range_to_varying (vr);
618 return;
619 }
620
621 zero = build_int_cst (type, 0);
622 set_value_range (vr, VR_RANGE, zero,
623 (overflow_infinity
624 ? positive_overflow_infinity (type)
625 : TYPE_MAX_VALUE (type)),
626 vr->equiv);
627 }
628
629 /* Set value range VR to a non-NULL range of type TYPE. */
630
631 static inline void
632 set_value_range_to_nonnull (value_range_t *vr, tree type)
633 {
634 tree zero = build_int_cst (type, 0);
635 set_value_range (vr, VR_ANTI_RANGE, zero, zero, vr->equiv);
636 }
637
638
639 /* Set value range VR to a NULL range of type TYPE. */
640
641 static inline void
642 set_value_range_to_null (value_range_t *vr, tree type)
643 {
644 set_value_range_to_value (vr, build_int_cst (type, 0), vr->equiv);
645 }
646
647
648 /* Set value range VR to a range of a truthvalue of type TYPE. */
649
650 static inline void
651 set_value_range_to_truthvalue (value_range_t *vr, tree type)
652 {
653 if (TYPE_PRECISION (type) == 1)
654 set_value_range_to_varying (vr);
655 else
656 set_value_range (vr, VR_RANGE,
657 build_int_cst (type, 0), build_int_cst (type, 1),
658 vr->equiv);
659 }
660
661
662 /* If abs (min) < abs (max), set VR to [-max, max], if
663 abs (min) >= abs (max), set VR to [-min, min]. */
664
665 static void
666 abs_extent_range (value_range_t *vr, tree min, tree max)
667 {
668 int cmp;
669
670 gcc_assert (TREE_CODE (min) == INTEGER_CST);
671 gcc_assert (TREE_CODE (max) == INTEGER_CST);
672 gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (min)));
673 gcc_assert (!TYPE_UNSIGNED (TREE_TYPE (min)));
674 min = fold_unary (ABS_EXPR, TREE_TYPE (min), min);
675 max = fold_unary (ABS_EXPR, TREE_TYPE (max), max);
676 if (TREE_OVERFLOW (min) || TREE_OVERFLOW (max))
677 {
678 set_value_range_to_varying (vr);
679 return;
680 }
681 cmp = compare_values (min, max);
682 if (cmp == -1)
683 min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), max);
684 else if (cmp == 0 || cmp == 1)
685 {
686 max = min;
687 min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), min);
688 }
689 else
690 {
691 set_value_range_to_varying (vr);
692 return;
693 }
694 set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL);
695 }
696
697
698 /* Return value range information for VAR.
699
700 If we have no values ranges recorded (ie, VRP is not running), then
701 return NULL. Otherwise create an empty range if none existed for VAR. */
702
703 static value_range_t *
704 get_value_range (const_tree var)
705 {
706 static const struct value_range_d vr_const_varying
707 = { VR_VARYING, NULL_TREE, NULL_TREE, NULL };
708 value_range_t *vr;
709 tree sym;
710 unsigned ver = SSA_NAME_VERSION (var);
711
712 /* If we have no recorded ranges, then return NULL. */
713 if (! vr_value)
714 return NULL;
715
716 /* If we query the range for a new SSA name return an unmodifiable VARYING.
717 We should get here at most from the substitute-and-fold stage which
718 will never try to change values. */
719 if (ver >= num_vr_values)
720 return CONST_CAST (value_range_t *, &vr_const_varying);
721
722 vr = vr_value[ver];
723 if (vr)
724 return vr;
725
726 /* After propagation finished do not allocate new value-ranges. */
727 if (values_propagated)
728 return CONST_CAST (value_range_t *, &vr_const_varying);
729
730 /* Create a default value range. */
731 vr_value[ver] = vr = XCNEW (value_range_t);
732
733 /* Defer allocating the equivalence set. */
734 vr->equiv = NULL;
735
736 /* If VAR is a default definition of a parameter, the variable can
737 take any value in VAR's type. */
738 if (SSA_NAME_IS_DEFAULT_DEF (var))
739 {
740 sym = SSA_NAME_VAR (var);
741 if (TREE_CODE (sym) == PARM_DECL)
742 {
743 /* Try to use the "nonnull" attribute to create ~[0, 0]
744 anti-ranges for pointers. Note that this is only valid with
745 default definitions of PARM_DECLs. */
746 if (POINTER_TYPE_P (TREE_TYPE (sym))
747 && nonnull_arg_p (sym))
748 set_value_range_to_nonnull (vr, TREE_TYPE (sym));
749 else
750 set_value_range_to_varying (vr);
751 }
752 else if (TREE_CODE (sym) == RESULT_DECL
753 && DECL_BY_REFERENCE (sym))
754 set_value_range_to_nonnull (vr, TREE_TYPE (sym));
755 }
756
757 return vr;
758 }
759
760 /* Return true, if VAL1 and VAL2 are equal values for VRP purposes. */
761
762 static inline bool
763 vrp_operand_equal_p (const_tree val1, const_tree val2)
764 {
765 if (val1 == val2)
766 return true;
767 if (!val1 || !val2 || !operand_equal_p (val1, val2, 0))
768 return false;
769 if (is_overflow_infinity (val1))
770 return is_overflow_infinity (val2);
771 return true;
772 }
773
774 /* Return true, if the bitmaps B1 and B2 are equal. */
775
776 static inline bool
777 vrp_bitmap_equal_p (const_bitmap b1, const_bitmap b2)
778 {
779 return (b1 == b2
780 || ((!b1 || bitmap_empty_p (b1))
781 && (!b2 || bitmap_empty_p (b2)))
782 || (b1 && b2
783 && bitmap_equal_p (b1, b2)));
784 }
785
786 /* Update the value range and equivalence set for variable VAR to
787 NEW_VR. Return true if NEW_VR is different from VAR's previous
788 value.
789
790 NOTE: This function assumes that NEW_VR is a temporary value range
791 object created for the sole purpose of updating VAR's range. The
792 storage used by the equivalence set from NEW_VR will be freed by
793 this function. Do not call update_value_range when NEW_VR
794 is the range object associated with another SSA name. */
795
796 static inline bool
797 update_value_range (const_tree var, value_range_t *new_vr)
798 {
799 value_range_t *old_vr;
800 bool is_new;
801
802 /* Update the value range, if necessary. */
803 old_vr = get_value_range (var);
804 is_new = old_vr->type != new_vr->type
805 || !vrp_operand_equal_p (old_vr->min, new_vr->min)
806 || !vrp_operand_equal_p (old_vr->max, new_vr->max)
807 || !vrp_bitmap_equal_p (old_vr->equiv, new_vr->equiv);
808
809 if (is_new)
810 {
811 /* Do not allow transitions up the lattice. The following
812 is slightly more awkward than just new_vr->type < old_vr->type
813 because VR_RANGE and VR_ANTI_RANGE need to be considered
814 the same. We may not have is_new when transitioning to
815 UNDEFINED or from VARYING. */
816 if (new_vr->type == VR_UNDEFINED
817 || old_vr->type == VR_VARYING)
818 set_value_range_to_varying (old_vr);
819 else
820 set_value_range (old_vr, new_vr->type, new_vr->min, new_vr->max,
821 new_vr->equiv);
822 }
823
824 BITMAP_FREE (new_vr->equiv);
825
826 return is_new;
827 }
828
829
830 /* Add VAR and VAR's equivalence set to EQUIV. This is the central
831 point where equivalence processing can be turned on/off. */
832
833 static void
834 add_equivalence (bitmap *equiv, const_tree var)
835 {
836 unsigned ver = SSA_NAME_VERSION (var);
837 value_range_t *vr = vr_value[ver];
838
839 if (*equiv == NULL)
840 *equiv = BITMAP_ALLOC (NULL);
841 bitmap_set_bit (*equiv, ver);
842 if (vr && vr->equiv)
843 bitmap_ior_into (*equiv, vr->equiv);
844 }
845
846
847 /* Return true if VR is ~[0, 0]. */
848
849 static inline bool
850 range_is_nonnull (value_range_t *vr)
851 {
852 return vr->type == VR_ANTI_RANGE
853 && integer_zerop (vr->min)
854 && integer_zerop (vr->max);
855 }
856
857
858 /* Return true if VR is [0, 0]. */
859
860 static inline bool
861 range_is_null (value_range_t *vr)
862 {
863 return vr->type == VR_RANGE
864 && integer_zerop (vr->min)
865 && integer_zerop (vr->max);
866 }
867
868 /* Return true if max and min of VR are INTEGER_CST. It's not necessary
869 a singleton. */
870
871 static inline bool
872 range_int_cst_p (value_range_t *vr)
873 {
874 return (vr->type == VR_RANGE
875 && TREE_CODE (vr->max) == INTEGER_CST
876 && TREE_CODE (vr->min) == INTEGER_CST);
877 }
878
879 /* Return true if VR is a INTEGER_CST singleton. */
880
881 static inline bool
882 range_int_cst_singleton_p (value_range_t *vr)
883 {
884 return (range_int_cst_p (vr)
885 && !TREE_OVERFLOW (vr->min)
886 && !TREE_OVERFLOW (vr->max)
887 && tree_int_cst_equal (vr->min, vr->max));
888 }
889
890 /* Return true if value range VR involves at least one symbol. */
891
892 static inline bool
893 symbolic_range_p (value_range_t *vr)
894 {
895 return (!is_gimple_min_invariant (vr->min)
896 || !is_gimple_min_invariant (vr->max));
897 }
898
899 /* Return true if value range VR uses an overflow infinity. */
900
901 static inline bool
902 overflow_infinity_range_p (value_range_t *vr)
903 {
904 return (vr->type == VR_RANGE
905 && (is_overflow_infinity (vr->min)
906 || is_overflow_infinity (vr->max)));
907 }
908
909 /* Return false if we can not make a valid comparison based on VR;
910 this will be the case if it uses an overflow infinity and overflow
911 is not undefined (i.e., -fno-strict-overflow is in effect).
912 Otherwise return true, and set *STRICT_OVERFLOW_P to true if VR
913 uses an overflow infinity. */
914
915 static bool
916 usable_range_p (value_range_t *vr, bool *strict_overflow_p)
917 {
918 gcc_assert (vr->type == VR_RANGE);
919 if (is_overflow_infinity (vr->min))
920 {
921 *strict_overflow_p = true;
922 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr->min)))
923 return false;
924 }
925 if (is_overflow_infinity (vr->max))
926 {
927 *strict_overflow_p = true;
928 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr->max)))
929 return false;
930 }
931 return true;
932 }
933
934
935 /* Return true if the result of assignment STMT is know to be non-negative.
936 If the return value is based on the assumption that signed overflow is
937 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
938 *STRICT_OVERFLOW_P.*/
939
940 static bool
941 gimple_assign_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p)
942 {
943 enum tree_code code = gimple_assign_rhs_code (stmt);
944 switch (get_gimple_rhs_class (code))
945 {
946 case GIMPLE_UNARY_RHS:
947 return tree_unary_nonnegative_warnv_p (gimple_assign_rhs_code (stmt),
948 gimple_expr_type (stmt),
949 gimple_assign_rhs1 (stmt),
950 strict_overflow_p);
951 case GIMPLE_BINARY_RHS:
952 return tree_binary_nonnegative_warnv_p (gimple_assign_rhs_code (stmt),
953 gimple_expr_type (stmt),
954 gimple_assign_rhs1 (stmt),
955 gimple_assign_rhs2 (stmt),
956 strict_overflow_p);
957 case GIMPLE_TERNARY_RHS:
958 return false;
959 case GIMPLE_SINGLE_RHS:
960 return tree_single_nonnegative_warnv_p (gimple_assign_rhs1 (stmt),
961 strict_overflow_p);
962 case GIMPLE_INVALID_RHS:
963 gcc_unreachable ();
964 default:
965 gcc_unreachable ();
966 }
967 }
968
969 /* Return true if return value of call STMT is know to be non-negative.
970 If the return value is based on the assumption that signed overflow is
971 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
972 *STRICT_OVERFLOW_P.*/
973
974 static bool
975 gimple_call_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p)
976 {
977 tree arg0 = gimple_call_num_args (stmt) > 0 ?
978 gimple_call_arg (stmt, 0) : NULL_TREE;
979 tree arg1 = gimple_call_num_args (stmt) > 1 ?
980 gimple_call_arg (stmt, 1) : NULL_TREE;
981
982 return tree_call_nonnegative_warnv_p (gimple_expr_type (stmt),
983 gimple_call_fndecl (stmt),
984 arg0,
985 arg1,
986 strict_overflow_p);
987 }
988
989 /* Return true if STMT is know to to compute a non-negative value.
990 If the return value is based on the assumption that signed overflow is
991 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
992 *STRICT_OVERFLOW_P.*/
993
994 static bool
995 gimple_stmt_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p)
996 {
997 switch (gimple_code (stmt))
998 {
999 case GIMPLE_ASSIGN:
1000 return gimple_assign_nonnegative_warnv_p (stmt, strict_overflow_p);
1001 case GIMPLE_CALL:
1002 return gimple_call_nonnegative_warnv_p (stmt, strict_overflow_p);
1003 default:
1004 gcc_unreachable ();
1005 }
1006 }
1007
1008 /* Return true if the result of assignment STMT is know to be non-zero.
1009 If the return value is based on the assumption that signed overflow is
1010 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
1011 *STRICT_OVERFLOW_P.*/
1012
1013 static bool
1014 gimple_assign_nonzero_warnv_p (gimple stmt, bool *strict_overflow_p)
1015 {
1016 enum tree_code code = gimple_assign_rhs_code (stmt);
1017 switch (get_gimple_rhs_class (code))
1018 {
1019 case GIMPLE_UNARY_RHS:
1020 return tree_unary_nonzero_warnv_p (gimple_assign_rhs_code (stmt),
1021 gimple_expr_type (stmt),
1022 gimple_assign_rhs1 (stmt),
1023 strict_overflow_p);
1024 case GIMPLE_BINARY_RHS:
1025 return tree_binary_nonzero_warnv_p (gimple_assign_rhs_code (stmt),
1026 gimple_expr_type (stmt),
1027 gimple_assign_rhs1 (stmt),
1028 gimple_assign_rhs2 (stmt),
1029 strict_overflow_p);
1030 case GIMPLE_TERNARY_RHS:
1031 return false;
1032 case GIMPLE_SINGLE_RHS:
1033 return tree_single_nonzero_warnv_p (gimple_assign_rhs1 (stmt),
1034 strict_overflow_p);
1035 case GIMPLE_INVALID_RHS:
1036 gcc_unreachable ();
1037 default:
1038 gcc_unreachable ();
1039 }
1040 }
1041
1042 /* Return true if STMT is know to to compute a non-zero value.
1043 If the return value is based on the assumption that signed overflow is
1044 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
1045 *STRICT_OVERFLOW_P.*/
1046
1047 static bool
1048 gimple_stmt_nonzero_warnv_p (gimple stmt, bool *strict_overflow_p)
1049 {
1050 switch (gimple_code (stmt))
1051 {
1052 case GIMPLE_ASSIGN:
1053 return gimple_assign_nonzero_warnv_p (stmt, strict_overflow_p);
1054 case GIMPLE_CALL:
1055 return gimple_alloca_call_p (stmt);
1056 default:
1057 gcc_unreachable ();
1058 }
1059 }
1060
1061 /* Like tree_expr_nonzero_warnv_p, but this function uses value ranges
1062 obtained so far. */
1063
1064 static bool
1065 vrp_stmt_computes_nonzero (gimple stmt, bool *strict_overflow_p)
1066 {
1067 if (gimple_stmt_nonzero_warnv_p (stmt, strict_overflow_p))
1068 return true;
1069
1070 /* If we have an expression of the form &X->a, then the expression
1071 is nonnull if X is nonnull. */
1072 if (is_gimple_assign (stmt)
1073 && gimple_assign_rhs_code (stmt) == ADDR_EXPR)
1074 {
1075 tree expr = gimple_assign_rhs1 (stmt);
1076 tree base = get_base_address (TREE_OPERAND (expr, 0));
1077
1078 if (base != NULL_TREE
1079 && TREE_CODE (base) == MEM_REF
1080 && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME)
1081 {
1082 value_range_t *vr = get_value_range (TREE_OPERAND (base, 0));
1083 if (range_is_nonnull (vr))
1084 return true;
1085 }
1086 }
1087
1088 return false;
1089 }
1090
1091 /* Returns true if EXPR is a valid value (as expected by compare_values) --
1092 a gimple invariant, or SSA_NAME +- CST. */
1093
1094 static bool
1095 valid_value_p (tree expr)
1096 {
1097 if (TREE_CODE (expr) == SSA_NAME)
1098 return true;
1099
1100 if (TREE_CODE (expr) == PLUS_EXPR
1101 || TREE_CODE (expr) == MINUS_EXPR)
1102 return (TREE_CODE (TREE_OPERAND (expr, 0)) == SSA_NAME
1103 && TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST);
1104
1105 return is_gimple_min_invariant (expr);
1106 }
1107
1108 /* Return
1109 1 if VAL < VAL2
1110 0 if !(VAL < VAL2)
1111 -2 if those are incomparable. */
1112 static inline int
1113 operand_less_p (tree val, tree val2)
1114 {
1115 /* LT is folded faster than GE and others. Inline the common case. */
1116 if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
1117 {
1118 if (TYPE_UNSIGNED (TREE_TYPE (val)))
1119 return INT_CST_LT_UNSIGNED (val, val2);
1120 else
1121 {
1122 if (INT_CST_LT (val, val2))
1123 return 1;
1124 }
1125 }
1126 else
1127 {
1128 tree tcmp;
1129
1130 fold_defer_overflow_warnings ();
1131
1132 tcmp = fold_binary_to_constant (LT_EXPR, boolean_type_node, val, val2);
1133
1134 fold_undefer_and_ignore_overflow_warnings ();
1135
1136 if (!tcmp
1137 || TREE_CODE (tcmp) != INTEGER_CST)
1138 return -2;
1139
1140 if (!integer_zerop (tcmp))
1141 return 1;
1142 }
1143
1144 /* val >= val2, not considering overflow infinity. */
1145 if (is_negative_overflow_infinity (val))
1146 return is_negative_overflow_infinity (val2) ? 0 : 1;
1147 else if (is_positive_overflow_infinity (val2))
1148 return is_positive_overflow_infinity (val) ? 0 : 1;
1149
1150 return 0;
1151 }
1152
1153 /* Compare two values VAL1 and VAL2. Return
1154
1155 -2 if VAL1 and VAL2 cannot be compared at compile-time,
1156 -1 if VAL1 < VAL2,
1157 0 if VAL1 == VAL2,
1158 +1 if VAL1 > VAL2, and
1159 +2 if VAL1 != VAL2
1160
1161 This is similar to tree_int_cst_compare but supports pointer values
1162 and values that cannot be compared at compile time.
1163
1164 If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to
1165 true if the return value is only valid if we assume that signed
1166 overflow is undefined. */
1167
1168 static int
1169 compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p)
1170 {
1171 if (val1 == val2)
1172 return 0;
1173
1174 /* Below we rely on the fact that VAL1 and VAL2 are both pointers or
1175 both integers. */
1176 gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1))
1177 == POINTER_TYPE_P (TREE_TYPE (val2)));
1178 /* Convert the two values into the same type. This is needed because
1179 sizetype causes sign extension even for unsigned types. */
1180 val2 = fold_convert (TREE_TYPE (val1), val2);
1181 STRIP_USELESS_TYPE_CONVERSION (val2);
1182
1183 if ((TREE_CODE (val1) == SSA_NAME
1184 || TREE_CODE (val1) == PLUS_EXPR
1185 || TREE_CODE (val1) == MINUS_EXPR)
1186 && (TREE_CODE (val2) == SSA_NAME
1187 || TREE_CODE (val2) == PLUS_EXPR
1188 || TREE_CODE (val2) == MINUS_EXPR))
1189 {
1190 tree n1, c1, n2, c2;
1191 enum tree_code code1, code2;
1192
1193 /* If VAL1 and VAL2 are of the form 'NAME [+-] CST' or 'NAME',
1194 return -1 or +1 accordingly. If VAL1 and VAL2 don't use the
1195 same name, return -2. */
1196 if (TREE_CODE (val1) == SSA_NAME)
1197 {
1198 code1 = SSA_NAME;
1199 n1 = val1;
1200 c1 = NULL_TREE;
1201 }
1202 else
1203 {
1204 code1 = TREE_CODE (val1);
1205 n1 = TREE_OPERAND (val1, 0);
1206 c1 = TREE_OPERAND (val1, 1);
1207 if (tree_int_cst_sgn (c1) == -1)
1208 {
1209 if (is_negative_overflow_infinity (c1))
1210 return -2;
1211 c1 = fold_unary_to_constant (NEGATE_EXPR, TREE_TYPE (c1), c1);
1212 if (!c1)
1213 return -2;
1214 code1 = code1 == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR;
1215 }
1216 }
1217
1218 if (TREE_CODE (val2) == SSA_NAME)
1219 {
1220 code2 = SSA_NAME;
1221 n2 = val2;
1222 c2 = NULL_TREE;
1223 }
1224 else
1225 {
1226 code2 = TREE_CODE (val2);
1227 n2 = TREE_OPERAND (val2, 0);
1228 c2 = TREE_OPERAND (val2, 1);
1229 if (tree_int_cst_sgn (c2) == -1)
1230 {
1231 if (is_negative_overflow_infinity (c2))
1232 return -2;
1233 c2 = fold_unary_to_constant (NEGATE_EXPR, TREE_TYPE (c2), c2);
1234 if (!c2)
1235 return -2;
1236 code2 = code2 == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR;
1237 }
1238 }
1239
1240 /* Both values must use the same name. */
1241 if (n1 != n2)
1242 return -2;
1243
1244 if (code1 == SSA_NAME
1245 && code2 == SSA_NAME)
1246 /* NAME == NAME */
1247 return 0;
1248
1249 /* If overflow is defined we cannot simplify more. */
1250 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1)))
1251 return -2;
1252
1253 if (strict_overflow_p != NULL
1254 && (code1 == SSA_NAME || !TREE_NO_WARNING (val1))
1255 && (code2 == SSA_NAME || !TREE_NO_WARNING (val2)))
1256 *strict_overflow_p = true;
1257
1258 if (code1 == SSA_NAME)
1259 {
1260 if (code2 == PLUS_EXPR)
1261 /* NAME < NAME + CST */
1262 return -1;
1263 else if (code2 == MINUS_EXPR)
1264 /* NAME > NAME - CST */
1265 return 1;
1266 }
1267 else if (code1 == PLUS_EXPR)
1268 {
1269 if (code2 == SSA_NAME)
1270 /* NAME + CST > NAME */
1271 return 1;
1272 else if (code2 == PLUS_EXPR)
1273 /* NAME + CST1 > NAME + CST2, if CST1 > CST2 */
1274 return compare_values_warnv (c1, c2, strict_overflow_p);
1275 else if (code2 == MINUS_EXPR)
1276 /* NAME + CST1 > NAME - CST2 */
1277 return 1;
1278 }
1279 else if (code1 == MINUS_EXPR)
1280 {
1281 if (code2 == SSA_NAME)
1282 /* NAME - CST < NAME */
1283 return -1;
1284 else if (code2 == PLUS_EXPR)
1285 /* NAME - CST1 < NAME + CST2 */
1286 return -1;
1287 else if (code2 == MINUS_EXPR)
1288 /* NAME - CST1 > NAME - CST2, if CST1 < CST2. Notice that
1289 C1 and C2 are swapped in the call to compare_values. */
1290 return compare_values_warnv (c2, c1, strict_overflow_p);
1291 }
1292
1293 gcc_unreachable ();
1294 }
1295
1296 /* We cannot compare non-constants. */
1297 if (!is_gimple_min_invariant (val1) || !is_gimple_min_invariant (val2))
1298 return -2;
1299
1300 if (!POINTER_TYPE_P (TREE_TYPE (val1)))
1301 {
1302 /* We cannot compare overflowed values, except for overflow
1303 infinities. */
1304 if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2))
1305 {
1306 if (strict_overflow_p != NULL)
1307 *strict_overflow_p = true;
1308 if (is_negative_overflow_infinity (val1))
1309 return is_negative_overflow_infinity (val2) ? 0 : -1;
1310 else if (is_negative_overflow_infinity (val2))
1311 return 1;
1312 else if (is_positive_overflow_infinity (val1))
1313 return is_positive_overflow_infinity (val2) ? 0 : 1;
1314 else if (is_positive_overflow_infinity (val2))
1315 return -1;
1316 return -2;
1317 }
1318
1319 return tree_int_cst_compare (val1, val2);
1320 }
1321 else
1322 {
1323 tree t;
1324
1325 /* First see if VAL1 and VAL2 are not the same. */
1326 if (val1 == val2 || operand_equal_p (val1, val2, 0))
1327 return 0;
1328
1329 /* If VAL1 is a lower address than VAL2, return -1. */
1330 if (operand_less_p (val1, val2) == 1)
1331 return -1;
1332
1333 /* If VAL1 is a higher address than VAL2, return +1. */
1334 if (operand_less_p (val2, val1) == 1)
1335 return 1;
1336
1337 /* If VAL1 is different than VAL2, return +2.
1338 For integer constants we either have already returned -1 or 1
1339 or they are equivalent. We still might succeed in proving
1340 something about non-trivial operands. */
1341 if (TREE_CODE (val1) != INTEGER_CST
1342 || TREE_CODE (val2) != INTEGER_CST)
1343 {
1344 t = fold_binary_to_constant (NE_EXPR, boolean_type_node, val1, val2);
1345 if (t && integer_onep (t))
1346 return 2;
1347 }
1348
1349 return -2;
1350 }
1351 }
1352
1353 /* Compare values like compare_values_warnv, but treat comparisons of
1354 nonconstants which rely on undefined overflow as incomparable. */
1355
1356 static int
1357 compare_values (tree val1, tree val2)
1358 {
1359 bool sop;
1360 int ret;
1361
1362 sop = false;
1363 ret = compare_values_warnv (val1, val2, &sop);
1364 if (sop
1365 && (!is_gimple_min_invariant (val1) || !is_gimple_min_invariant (val2)))
1366 ret = -2;
1367 return ret;
1368 }
1369
1370
1371 /* Return 1 if VAL is inside value range MIN <= VAL <= MAX,
1372 0 if VAL is not inside [MIN, MAX],
1373 -2 if we cannot tell either way.
1374
1375 Benchmark compile/20001226-1.c compilation time after changing this
1376 function. */
1377
1378 static inline int
1379 value_inside_range (tree val, tree min, tree max)
1380 {
1381 int cmp1, cmp2;
1382
1383 cmp1 = operand_less_p (val, min);
1384 if (cmp1 == -2)
1385 return -2;
1386 if (cmp1 == 1)
1387 return 0;
1388
1389 cmp2 = operand_less_p (max, val);
1390 if (cmp2 == -2)
1391 return -2;
1392
1393 return !cmp2;
1394 }
1395
1396
1397 /* Return true if value ranges VR0 and VR1 have a non-empty
1398 intersection.
1399
1400 Benchmark compile/20001226-1.c compilation time after changing this
1401 function.
1402 */
1403
1404 static inline bool
1405 value_ranges_intersect_p (value_range_t *vr0, value_range_t *vr1)
1406 {
1407 /* The value ranges do not intersect if the maximum of the first range is
1408 less than the minimum of the second range or vice versa.
1409 When those relations are unknown, we can't do any better. */
1410 if (operand_less_p (vr0->max, vr1->min) != 0)
1411 return false;
1412 if (operand_less_p (vr1->max, vr0->min) != 0)
1413 return false;
1414 return true;
1415 }
1416
1417
1418 /* Return 1 if [MIN, MAX] includes the value zero, 0 if it does not
1419 include the value zero, -2 if we cannot tell. */
1420
1421 static inline int
1422 range_includes_zero_p (tree min, tree max)
1423 {
1424 tree zero = build_int_cst (TREE_TYPE (min), 0);
1425 return value_inside_range (zero, min, max);
1426 }
1427
1428 /* Return true if *VR is know to only contain nonnegative values. */
1429
1430 static inline bool
1431 value_range_nonnegative_p (value_range_t *vr)
1432 {
1433 /* Testing for VR_ANTI_RANGE is not useful here as any anti-range
1434 which would return a useful value should be encoded as a
1435 VR_RANGE. */
1436 if (vr->type == VR_RANGE)
1437 {
1438 int result = compare_values (vr->min, integer_zero_node);
1439 return (result == 0 || result == 1);
1440 }
1441
1442 return false;
1443 }
1444
1445 /* Return true if T, an SSA_NAME, is known to be nonnegative. Return
1446 false otherwise or if no value range information is available. */
1447
1448 bool
1449 ssa_name_nonnegative_p (const_tree t)
1450 {
1451 value_range_t *vr = get_value_range (t);
1452
1453 if (INTEGRAL_TYPE_P (t)
1454 && TYPE_UNSIGNED (t))
1455 return true;
1456
1457 if (!vr)
1458 return false;
1459
1460 return value_range_nonnegative_p (vr);
1461 }
1462
1463 /* If *VR has a value rante that is a single constant value return that,
1464 otherwise return NULL_TREE. */
1465
1466 static tree
1467 value_range_constant_singleton (value_range_t *vr)
1468 {
1469 if (vr->type == VR_RANGE
1470 && operand_equal_p (vr->min, vr->max, 0)
1471 && is_gimple_min_invariant (vr->min))
1472 return vr->min;
1473
1474 return NULL_TREE;
1475 }
1476
1477 /* If OP has a value range with a single constant value return that,
1478 otherwise return NULL_TREE. This returns OP itself if OP is a
1479 constant. */
1480
1481 static tree
1482 op_with_constant_singleton_value_range (tree op)
1483 {
1484 if (is_gimple_min_invariant (op))
1485 return op;
1486
1487 if (TREE_CODE (op) != SSA_NAME)
1488 return NULL_TREE;
1489
1490 return value_range_constant_singleton (get_value_range (op));
1491 }
1492
1493 /* Return true if op is in a boolean [0, 1] value-range. */
1494
1495 static bool
1496 op_with_boolean_value_range_p (tree op)
1497 {
1498 value_range_t *vr;
1499
1500 if (TYPE_PRECISION (TREE_TYPE (op)) == 1)
1501 return true;
1502
1503 if (integer_zerop (op)
1504 || integer_onep (op))
1505 return true;
1506
1507 if (TREE_CODE (op) != SSA_NAME)
1508 return false;
1509
1510 vr = get_value_range (op);
1511 return (vr->type == VR_RANGE
1512 && integer_zerop (vr->min)
1513 && integer_onep (vr->max));
1514 }
1515
1516 /* Extract value range information from an ASSERT_EXPR EXPR and store
1517 it in *VR_P. */
1518
1519 static void
1520 extract_range_from_assert (value_range_t *vr_p, tree expr)
1521 {
1522 tree var, cond, limit, min, max, type;
1523 value_range_t *limit_vr;
1524 enum tree_code cond_code;
1525
1526 var = ASSERT_EXPR_VAR (expr);
1527 cond = ASSERT_EXPR_COND (expr);
1528
1529 gcc_assert (COMPARISON_CLASS_P (cond));
1530
1531 /* Find VAR in the ASSERT_EXPR conditional. */
1532 if (var == TREE_OPERAND (cond, 0)
1533 || TREE_CODE (TREE_OPERAND (cond, 0)) == PLUS_EXPR
1534 || TREE_CODE (TREE_OPERAND (cond, 0)) == NOP_EXPR)
1535 {
1536 /* If the predicate is of the form VAR COMP LIMIT, then we just
1537 take LIMIT from the RHS and use the same comparison code. */
1538 cond_code = TREE_CODE (cond);
1539 limit = TREE_OPERAND (cond, 1);
1540 cond = TREE_OPERAND (cond, 0);
1541 }
1542 else
1543 {
1544 /* If the predicate is of the form LIMIT COMP VAR, then we need
1545 to flip around the comparison code to create the proper range
1546 for VAR. */
1547 cond_code = swap_tree_comparison (TREE_CODE (cond));
1548 limit = TREE_OPERAND (cond, 0);
1549 cond = TREE_OPERAND (cond, 1);
1550 }
1551
1552 limit = avoid_overflow_infinity (limit);
1553
1554 type = TREE_TYPE (var);
1555 gcc_assert (limit != var);
1556
1557 /* For pointer arithmetic, we only keep track of pointer equality
1558 and inequality. */
1559 if (POINTER_TYPE_P (type) && cond_code != NE_EXPR && cond_code != EQ_EXPR)
1560 {
1561 set_value_range_to_varying (vr_p);
1562 return;
1563 }
1564
1565 /* If LIMIT is another SSA name and LIMIT has a range of its own,
1566 try to use LIMIT's range to avoid creating symbolic ranges
1567 unnecessarily. */
1568 limit_vr = (TREE_CODE (limit) == SSA_NAME) ? get_value_range (limit) : NULL;
1569
1570 /* LIMIT's range is only interesting if it has any useful information. */
1571 if (limit_vr
1572 && (limit_vr->type == VR_UNDEFINED
1573 || limit_vr->type == VR_VARYING
1574 || symbolic_range_p (limit_vr)))
1575 limit_vr = NULL;
1576
1577 /* Initially, the new range has the same set of equivalences of
1578 VAR's range. This will be revised before returning the final
1579 value. Since assertions may be chained via mutually exclusive
1580 predicates, we will need to trim the set of equivalences before
1581 we are done. */
1582 gcc_assert (vr_p->equiv == NULL);
1583 add_equivalence (&vr_p->equiv, var);
1584
1585 /* Extract a new range based on the asserted comparison for VAR and
1586 LIMIT's value range. Notice that if LIMIT has an anti-range, we
1587 will only use it for equality comparisons (EQ_EXPR). For any
1588 other kind of assertion, we cannot derive a range from LIMIT's
1589 anti-range that can be used to describe the new range. For
1590 instance, ASSERT_EXPR <x_2, x_2 <= b_4>. If b_4 is ~[2, 10],
1591 then b_4 takes on the ranges [-INF, 1] and [11, +INF]. There is
1592 no single range for x_2 that could describe LE_EXPR, so we might
1593 as well build the range [b_4, +INF] for it.
1594 One special case we handle is extracting a range from a
1595 range test encoded as (unsigned)var + CST <= limit. */
1596 if (TREE_CODE (cond) == NOP_EXPR
1597 || TREE_CODE (cond) == PLUS_EXPR)
1598 {
1599 if (TREE_CODE (cond) == PLUS_EXPR)
1600 {
1601 min = fold_build1 (NEGATE_EXPR, TREE_TYPE (TREE_OPERAND (cond, 1)),
1602 TREE_OPERAND (cond, 1));
1603 max = int_const_binop (PLUS_EXPR, limit, min);
1604 cond = TREE_OPERAND (cond, 0);
1605 }
1606 else
1607 {
1608 min = build_int_cst (TREE_TYPE (var), 0);
1609 max = limit;
1610 }
1611
1612 /* Make sure to not set TREE_OVERFLOW on the final type
1613 conversion. We are willingly interpreting large positive
1614 unsigned values as negative singed values here. */
1615 min = force_fit_type_double (TREE_TYPE (var), tree_to_double_int (min),
1616 0, false);
1617 max = force_fit_type_double (TREE_TYPE (var), tree_to_double_int (max),
1618 0, false);
1619
1620 /* We can transform a max, min range to an anti-range or
1621 vice-versa. Use set_and_canonicalize_value_range which does
1622 this for us. */
1623 if (cond_code == LE_EXPR)
1624 set_and_canonicalize_value_range (vr_p, VR_RANGE,
1625 min, max, vr_p->equiv);
1626 else if (cond_code == GT_EXPR)
1627 set_and_canonicalize_value_range (vr_p, VR_ANTI_RANGE,
1628 min, max, vr_p->equiv);
1629 else
1630 gcc_unreachable ();
1631 }
1632 else if (cond_code == EQ_EXPR)
1633 {
1634 enum value_range_type range_type;
1635
1636 if (limit_vr)
1637 {
1638 range_type = limit_vr->type;
1639 min = limit_vr->min;
1640 max = limit_vr->max;
1641 }
1642 else
1643 {
1644 range_type = VR_RANGE;
1645 min = limit;
1646 max = limit;
1647 }
1648
1649 set_value_range (vr_p, range_type, min, max, vr_p->equiv);
1650
1651 /* When asserting the equality VAR == LIMIT and LIMIT is another
1652 SSA name, the new range will also inherit the equivalence set
1653 from LIMIT. */
1654 if (TREE_CODE (limit) == SSA_NAME)
1655 add_equivalence (&vr_p->equiv, limit);
1656 }
1657 else if (cond_code == NE_EXPR)
1658 {
1659 /* As described above, when LIMIT's range is an anti-range and
1660 this assertion is an inequality (NE_EXPR), then we cannot
1661 derive anything from the anti-range. For instance, if
1662 LIMIT's range was ~[0, 0], the assertion 'VAR != LIMIT' does
1663 not imply that VAR's range is [0, 0]. So, in the case of
1664 anti-ranges, we just assert the inequality using LIMIT and
1665 not its anti-range.
1666
1667 If LIMIT_VR is a range, we can only use it to build a new
1668 anti-range if LIMIT_VR is a single-valued range. For
1669 instance, if LIMIT_VR is [0, 1], the predicate
1670 VAR != [0, 1] does not mean that VAR's range is ~[0, 1].
1671 Rather, it means that for value 0 VAR should be ~[0, 0]
1672 and for value 1, VAR should be ~[1, 1]. We cannot
1673 represent these ranges.
1674
1675 The only situation in which we can build a valid
1676 anti-range is when LIMIT_VR is a single-valued range
1677 (i.e., LIMIT_VR->MIN == LIMIT_VR->MAX). In that case,
1678 build the anti-range ~[LIMIT_VR->MIN, LIMIT_VR->MAX]. */
1679 if (limit_vr
1680 && limit_vr->type == VR_RANGE
1681 && compare_values (limit_vr->min, limit_vr->max) == 0)
1682 {
1683 min = limit_vr->min;
1684 max = limit_vr->max;
1685 }
1686 else
1687 {
1688 /* In any other case, we cannot use LIMIT's range to build a
1689 valid anti-range. */
1690 min = max = limit;
1691 }
1692
1693 /* If MIN and MAX cover the whole range for their type, then
1694 just use the original LIMIT. */
1695 if (INTEGRAL_TYPE_P (type)
1696 && vrp_val_is_min (min)
1697 && vrp_val_is_max (max))
1698 min = max = limit;
1699
1700 set_and_canonicalize_value_range (vr_p, VR_ANTI_RANGE,
1701 min, max, vr_p->equiv);
1702 }
1703 else if (cond_code == LE_EXPR || cond_code == LT_EXPR)
1704 {
1705 min = TYPE_MIN_VALUE (type);
1706
1707 if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE)
1708 max = limit;
1709 else
1710 {
1711 /* If LIMIT_VR is of the form [N1, N2], we need to build the
1712 range [MIN, N2] for LE_EXPR and [MIN, N2 - 1] for
1713 LT_EXPR. */
1714 max = limit_vr->max;
1715 }
1716
1717 /* If the maximum value forces us to be out of bounds, simply punt.
1718 It would be pointless to try and do anything more since this
1719 all should be optimized away above us. */
1720 if ((cond_code == LT_EXPR
1721 && compare_values (max, min) == 0)
1722 || (CONSTANT_CLASS_P (max) && TREE_OVERFLOW (max)))
1723 set_value_range_to_varying (vr_p);
1724 else
1725 {
1726 /* For LT_EXPR, we create the range [MIN, MAX - 1]. */
1727 if (cond_code == LT_EXPR)
1728 {
1729 if (TYPE_PRECISION (TREE_TYPE (max)) == 1
1730 && !TYPE_UNSIGNED (TREE_TYPE (max)))
1731 max = fold_build2 (PLUS_EXPR, TREE_TYPE (max), max,
1732 build_int_cst (TREE_TYPE (max), -1));
1733 else
1734 max = fold_build2 (MINUS_EXPR, TREE_TYPE (max), max,
1735 build_int_cst (TREE_TYPE (max), 1));
1736 if (EXPR_P (max))
1737 TREE_NO_WARNING (max) = 1;
1738 }
1739
1740 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1741 }
1742 }
1743 else if (cond_code == GE_EXPR || cond_code == GT_EXPR)
1744 {
1745 max = TYPE_MAX_VALUE (type);
1746
1747 if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE)
1748 min = limit;
1749 else
1750 {
1751 /* If LIMIT_VR is of the form [N1, N2], we need to build the
1752 range [N1, MAX] for GE_EXPR and [N1 + 1, MAX] for
1753 GT_EXPR. */
1754 min = limit_vr->min;
1755 }
1756
1757 /* If the minimum value forces us to be out of bounds, simply punt.
1758 It would be pointless to try and do anything more since this
1759 all should be optimized away above us. */
1760 if ((cond_code == GT_EXPR
1761 && compare_values (min, max) == 0)
1762 || (CONSTANT_CLASS_P (min) && TREE_OVERFLOW (min)))
1763 set_value_range_to_varying (vr_p);
1764 else
1765 {
1766 /* For GT_EXPR, we create the range [MIN + 1, MAX]. */
1767 if (cond_code == GT_EXPR)
1768 {
1769 if (TYPE_PRECISION (TREE_TYPE (min)) == 1
1770 && !TYPE_UNSIGNED (TREE_TYPE (min)))
1771 min = fold_build2 (MINUS_EXPR, TREE_TYPE (min), min,
1772 build_int_cst (TREE_TYPE (min), -1));
1773 else
1774 min = fold_build2 (PLUS_EXPR, TREE_TYPE (min), min,
1775 build_int_cst (TREE_TYPE (min), 1));
1776 if (EXPR_P (min))
1777 TREE_NO_WARNING (min) = 1;
1778 }
1779
1780 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1781 }
1782 }
1783 else
1784 gcc_unreachable ();
1785
1786 /* Finally intersect the new range with what we already know about var. */
1787 vrp_intersect_ranges (vr_p, get_value_range (var));
1788 }
1789
1790
1791 /* Extract range information from SSA name VAR and store it in VR. If
1792 VAR has an interesting range, use it. Otherwise, create the
1793 range [VAR, VAR] and return it. This is useful in situations where
1794 we may have conditionals testing values of VARYING names. For
1795 instance,
1796
1797 x_3 = y_5;
1798 if (x_3 > y_5)
1799 ...
1800
1801 Even if y_5 is deemed VARYING, we can determine that x_3 > y_5 is
1802 always false. */
1803
1804 static void
1805 extract_range_from_ssa_name (value_range_t *vr, tree var)
1806 {
1807 value_range_t *var_vr = get_value_range (var);
1808
1809 if (var_vr->type != VR_UNDEFINED && var_vr->type != VR_VARYING)
1810 copy_value_range (vr, var_vr);
1811 else
1812 set_value_range (vr, VR_RANGE, var, var, NULL);
1813
1814 add_equivalence (&vr->equiv, var);
1815 }
1816
1817
1818 /* Wrapper around int_const_binop. If the operation overflows and we
1819 are not using wrapping arithmetic, then adjust the result to be
1820 -INF or +INF depending on CODE, VAL1 and VAL2. This can return
1821 NULL_TREE if we need to use an overflow infinity representation but
1822 the type does not support it. */
1823
1824 static tree
1825 vrp_int_const_binop (enum tree_code code, tree val1, tree val2)
1826 {
1827 tree res;
1828
1829 res = int_const_binop (code, val1, val2);
1830
1831 /* If we are using unsigned arithmetic, operate symbolically
1832 on -INF and +INF as int_const_binop only handles signed overflow. */
1833 if (TYPE_UNSIGNED (TREE_TYPE (val1)))
1834 {
1835 int checkz = compare_values (res, val1);
1836 bool overflow = false;
1837
1838 /* Ensure that res = val1 [+*] val2 >= val1
1839 or that res = val1 - val2 <= val1. */
1840 if ((code == PLUS_EXPR
1841 && !(checkz == 1 || checkz == 0))
1842 || (code == MINUS_EXPR
1843 && !(checkz == 0 || checkz == -1)))
1844 {
1845 overflow = true;
1846 }
1847 /* Checking for multiplication overflow is done by dividing the
1848 output of the multiplication by the first input of the
1849 multiplication. If the result of that division operation is
1850 not equal to the second input of the multiplication, then the
1851 multiplication overflowed. */
1852 else if (code == MULT_EXPR && !integer_zerop (val1))
1853 {
1854 tree tmp = int_const_binop (TRUNC_DIV_EXPR,
1855 res,
1856 val1);
1857 int check = compare_values (tmp, val2);
1858
1859 if (check != 0)
1860 overflow = true;
1861 }
1862
1863 if (overflow)
1864 {
1865 res = copy_node (res);
1866 TREE_OVERFLOW (res) = 1;
1867 }
1868
1869 }
1870 else if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (val1)))
1871 /* If the singed operation wraps then int_const_binop has done
1872 everything we want. */
1873 ;
1874 else if ((TREE_OVERFLOW (res)
1875 && !TREE_OVERFLOW (val1)
1876 && !TREE_OVERFLOW (val2))
1877 || is_overflow_infinity (val1)
1878 || is_overflow_infinity (val2))
1879 {
1880 /* If the operation overflowed but neither VAL1 nor VAL2 are
1881 overflown, return -INF or +INF depending on the operation
1882 and the combination of signs of the operands. */
1883 int sgn1 = tree_int_cst_sgn (val1);
1884 int sgn2 = tree_int_cst_sgn (val2);
1885
1886 if (needs_overflow_infinity (TREE_TYPE (res))
1887 && !supports_overflow_infinity (TREE_TYPE (res)))
1888 return NULL_TREE;
1889
1890 /* We have to punt on adding infinities of different signs,
1891 since we can't tell what the sign of the result should be.
1892 Likewise for subtracting infinities of the same sign. */
1893 if (((code == PLUS_EXPR && sgn1 != sgn2)
1894 || (code == MINUS_EXPR && sgn1 == sgn2))
1895 && is_overflow_infinity (val1)
1896 && is_overflow_infinity (val2))
1897 return NULL_TREE;
1898
1899 /* Don't try to handle division or shifting of infinities. */
1900 if ((code == TRUNC_DIV_EXPR
1901 || code == FLOOR_DIV_EXPR
1902 || code == CEIL_DIV_EXPR
1903 || code == EXACT_DIV_EXPR
1904 || code == ROUND_DIV_EXPR
1905 || code == RSHIFT_EXPR)
1906 && (is_overflow_infinity (val1)
1907 || is_overflow_infinity (val2)))
1908 return NULL_TREE;
1909
1910 /* Notice that we only need to handle the restricted set of
1911 operations handled by extract_range_from_binary_expr.
1912 Among them, only multiplication, addition and subtraction
1913 can yield overflow without overflown operands because we
1914 are working with integral types only... except in the
1915 case VAL1 = -INF and VAL2 = -1 which overflows to +INF
1916 for division too. */
1917
1918 /* For multiplication, the sign of the overflow is given
1919 by the comparison of the signs of the operands. */
1920 if ((code == MULT_EXPR && sgn1 == sgn2)
1921 /* For addition, the operands must be of the same sign
1922 to yield an overflow. Its sign is therefore that
1923 of one of the operands, for example the first. For
1924 infinite operands X + -INF is negative, not positive. */
1925 || (code == PLUS_EXPR
1926 && (sgn1 >= 0
1927 ? !is_negative_overflow_infinity (val2)
1928 : is_positive_overflow_infinity (val2)))
1929 /* For subtraction, non-infinite operands must be of
1930 different signs to yield an overflow. Its sign is
1931 therefore that of the first operand or the opposite of
1932 that of the second operand. A first operand of 0 counts
1933 as positive here, for the corner case 0 - (-INF), which
1934 overflows, but must yield +INF. For infinite operands 0
1935 - INF is negative, not positive. */
1936 || (code == MINUS_EXPR
1937 && (sgn1 >= 0
1938 ? !is_positive_overflow_infinity (val2)
1939 : is_negative_overflow_infinity (val2)))
1940 /* We only get in here with positive shift count, so the
1941 overflow direction is the same as the sign of val1.
1942 Actually rshift does not overflow at all, but we only
1943 handle the case of shifting overflowed -INF and +INF. */
1944 || (code == RSHIFT_EXPR
1945 && sgn1 >= 0)
1946 /* For division, the only case is -INF / -1 = +INF. */
1947 || code == TRUNC_DIV_EXPR
1948 || code == FLOOR_DIV_EXPR
1949 || code == CEIL_DIV_EXPR
1950 || code == EXACT_DIV_EXPR
1951 || code == ROUND_DIV_EXPR)
1952 return (needs_overflow_infinity (TREE_TYPE (res))
1953 ? positive_overflow_infinity (TREE_TYPE (res))
1954 : TYPE_MAX_VALUE (TREE_TYPE (res)));
1955 else
1956 return (needs_overflow_infinity (TREE_TYPE (res))
1957 ? negative_overflow_infinity (TREE_TYPE (res))
1958 : TYPE_MIN_VALUE (TREE_TYPE (res)));
1959 }
1960
1961 return res;
1962 }
1963
1964
1965 /* For range VR compute two double_int bitmasks. In *MAY_BE_NONZERO
1966 bitmask if some bit is unset, it means for all numbers in the range
1967 the bit is 0, otherwise it might be 0 or 1. In *MUST_BE_NONZERO
1968 bitmask if some bit is set, it means for all numbers in the range
1969 the bit is 1, otherwise it might be 0 or 1. */
1970
1971 static bool
1972 zero_nonzero_bits_from_vr (value_range_t *vr,
1973 double_int *may_be_nonzero,
1974 double_int *must_be_nonzero)
1975 {
1976 *may_be_nonzero = double_int_minus_one;
1977 *must_be_nonzero = double_int_zero;
1978 if (!range_int_cst_p (vr)
1979 || TREE_OVERFLOW (vr->min)
1980 || TREE_OVERFLOW (vr->max))
1981 return false;
1982
1983 if (range_int_cst_singleton_p (vr))
1984 {
1985 *may_be_nonzero = tree_to_double_int (vr->min);
1986 *must_be_nonzero = *may_be_nonzero;
1987 }
1988 else if (tree_int_cst_sgn (vr->min) >= 0
1989 || tree_int_cst_sgn (vr->max) < 0)
1990 {
1991 double_int dmin = tree_to_double_int (vr->min);
1992 double_int dmax = tree_to_double_int (vr->max);
1993 double_int xor_mask = dmin ^ dmax;
1994 *may_be_nonzero = dmin | dmax;
1995 *must_be_nonzero = dmin & dmax;
1996 if (xor_mask.high != 0)
1997 {
1998 unsigned HOST_WIDE_INT mask
1999 = ((unsigned HOST_WIDE_INT) 1
2000 << floor_log2 (xor_mask.high)) - 1;
2001 may_be_nonzero->low = ALL_ONES;
2002 may_be_nonzero->high |= mask;
2003 must_be_nonzero->low = 0;
2004 must_be_nonzero->high &= ~mask;
2005 }
2006 else if (xor_mask.low != 0)
2007 {
2008 unsigned HOST_WIDE_INT mask
2009 = ((unsigned HOST_WIDE_INT) 1
2010 << floor_log2 (xor_mask.low)) - 1;
2011 may_be_nonzero->low |= mask;
2012 must_be_nonzero->low &= ~mask;
2013 }
2014 }
2015
2016 return true;
2017 }
2018
2019 /* Create two value-ranges in *VR0 and *VR1 from the anti-range *AR
2020 so that *VR0 U *VR1 == *AR. Returns true if that is possible,
2021 false otherwise. If *AR can be represented with a single range
2022 *VR1 will be VR_UNDEFINED. */
2023
2024 static bool
2025 ranges_from_anti_range (value_range_t *ar,
2026 value_range_t *vr0, value_range_t *vr1)
2027 {
2028 tree type = TREE_TYPE (ar->min);
2029
2030 vr0->type = VR_UNDEFINED;
2031 vr1->type = VR_UNDEFINED;
2032
2033 if (ar->type != VR_ANTI_RANGE
2034 || TREE_CODE (ar->min) != INTEGER_CST
2035 || TREE_CODE (ar->max) != INTEGER_CST
2036 || !vrp_val_min (type)
2037 || !vrp_val_max (type))
2038 return false;
2039
2040 if (!vrp_val_is_min (ar->min))
2041 {
2042 vr0->type = VR_RANGE;
2043 vr0->min = vrp_val_min (type);
2044 vr0->max
2045 = double_int_to_tree (type,
2046 tree_to_double_int (ar->min) - double_int_one);
2047 }
2048 if (!vrp_val_is_max (ar->max))
2049 {
2050 vr1->type = VR_RANGE;
2051 vr1->min
2052 = double_int_to_tree (type,
2053 tree_to_double_int (ar->max) + double_int_one);
2054 vr1->max = vrp_val_max (type);
2055 }
2056 if (vr0->type == VR_UNDEFINED)
2057 {
2058 *vr0 = *vr1;
2059 vr1->type = VR_UNDEFINED;
2060 }
2061
2062 return vr0->type != VR_UNDEFINED;
2063 }
2064
2065 /* Helper to extract a value-range *VR for a multiplicative operation
2066 *VR0 CODE *VR1. */
2067
2068 static void
2069 extract_range_from_multiplicative_op_1 (value_range_t *vr,
2070 enum tree_code code,
2071 value_range_t *vr0, value_range_t *vr1)
2072 {
2073 enum value_range_type type;
2074 tree val[4];
2075 size_t i;
2076 tree min, max;
2077 bool sop;
2078 int cmp;
2079
2080 /* Multiplications, divisions and shifts are a bit tricky to handle,
2081 depending on the mix of signs we have in the two ranges, we
2082 need to operate on different values to get the minimum and
2083 maximum values for the new range. One approach is to figure
2084 out all the variations of range combinations and do the
2085 operations.
2086
2087 However, this involves several calls to compare_values and it
2088 is pretty convoluted. It's simpler to do the 4 operations
2089 (MIN0 OP MIN1, MIN0 OP MAX1, MAX0 OP MIN1 and MAX0 OP MAX0 OP
2090 MAX1) and then figure the smallest and largest values to form
2091 the new range. */
2092 gcc_assert (code == MULT_EXPR
2093 || code == TRUNC_DIV_EXPR
2094 || code == FLOOR_DIV_EXPR
2095 || code == CEIL_DIV_EXPR
2096 || code == EXACT_DIV_EXPR
2097 || code == ROUND_DIV_EXPR
2098 || code == RSHIFT_EXPR
2099 || code == LSHIFT_EXPR);
2100 gcc_assert ((vr0->type == VR_RANGE
2101 || (code == MULT_EXPR && vr0->type == VR_ANTI_RANGE))
2102 && vr0->type == vr1->type);
2103
2104 type = vr0->type;
2105
2106 /* Compute the 4 cross operations. */
2107 sop = false;
2108 val[0] = vrp_int_const_binop (code, vr0->min, vr1->min);
2109 if (val[0] == NULL_TREE)
2110 sop = true;
2111
2112 if (vr1->max == vr1->min)
2113 val[1] = NULL_TREE;
2114 else
2115 {
2116 val[1] = vrp_int_const_binop (code, vr0->min, vr1->max);
2117 if (val[1] == NULL_TREE)
2118 sop = true;
2119 }
2120
2121 if (vr0->max == vr0->min)
2122 val[2] = NULL_TREE;
2123 else
2124 {
2125 val[2] = vrp_int_const_binop (code, vr0->max, vr1->min);
2126 if (val[2] == NULL_TREE)
2127 sop = true;
2128 }
2129
2130 if (vr0->min == vr0->max || vr1->min == vr1->max)
2131 val[3] = NULL_TREE;
2132 else
2133 {
2134 val[3] = vrp_int_const_binop (code, vr0->max, vr1->max);
2135 if (val[3] == NULL_TREE)
2136 sop = true;
2137 }
2138
2139 if (sop)
2140 {
2141 set_value_range_to_varying (vr);
2142 return;
2143 }
2144
2145 /* Set MIN to the minimum of VAL[i] and MAX to the maximum
2146 of VAL[i]. */
2147 min = val[0];
2148 max = val[0];
2149 for (i = 1; i < 4; i++)
2150 {
2151 if (!is_gimple_min_invariant (min)
2152 || (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
2153 || !is_gimple_min_invariant (max)
2154 || (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
2155 break;
2156
2157 if (val[i])
2158 {
2159 if (!is_gimple_min_invariant (val[i])
2160 || (TREE_OVERFLOW (val[i])
2161 && !is_overflow_infinity (val[i])))
2162 {
2163 /* If we found an overflowed value, set MIN and MAX
2164 to it so that we set the resulting range to
2165 VARYING. */
2166 min = max = val[i];
2167 break;
2168 }
2169
2170 if (compare_values (val[i], min) == -1)
2171 min = val[i];
2172
2173 if (compare_values (val[i], max) == 1)
2174 max = val[i];
2175 }
2176 }
2177
2178 /* If either MIN or MAX overflowed, then set the resulting range to
2179 VARYING. But we do accept an overflow infinity
2180 representation. */
2181 if (min == NULL_TREE
2182 || !is_gimple_min_invariant (min)
2183 || (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
2184 || max == NULL_TREE
2185 || !is_gimple_min_invariant (max)
2186 || (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
2187 {
2188 set_value_range_to_varying (vr);
2189 return;
2190 }
2191
2192 /* We punt if:
2193 1) [-INF, +INF]
2194 2) [-INF, +-INF(OVF)]
2195 3) [+-INF(OVF), +INF]
2196 4) [+-INF(OVF), +-INF(OVF)]
2197 We learn nothing when we have INF and INF(OVF) on both sides.
2198 Note that we do accept [-INF, -INF] and [+INF, +INF] without
2199 overflow. */
2200 if ((vrp_val_is_min (min) || is_overflow_infinity (min))
2201 && (vrp_val_is_max (max) || is_overflow_infinity (max)))
2202 {
2203 set_value_range_to_varying (vr);
2204 return;
2205 }
2206
2207 cmp = compare_values (min, max);
2208 if (cmp == -2 || cmp == 1)
2209 {
2210 /* If the new range has its limits swapped around (MIN > MAX),
2211 then the operation caused one of them to wrap around, mark
2212 the new range VARYING. */
2213 set_value_range_to_varying (vr);
2214 }
2215 else
2216 set_value_range (vr, type, min, max, NULL);
2217 }
2218
2219 /* Some quadruple precision helpers. */
2220 static int
2221 quad_int_cmp (double_int l0, double_int h0,
2222 double_int l1, double_int h1, bool uns)
2223 {
2224 int c = h0.cmp (h1, uns);
2225 if (c != 0) return c;
2226 return l0.ucmp (l1);
2227 }
2228
2229 static void
2230 quad_int_pair_sort (double_int *l0, double_int *h0,
2231 double_int *l1, double_int *h1, bool uns)
2232 {
2233 if (quad_int_cmp (*l0, *h0, *l1, *h1, uns) > 0)
2234 {
2235 double_int tmp;
2236 tmp = *l0; *l0 = *l1; *l1 = tmp;
2237 tmp = *h0; *h0 = *h1; *h1 = tmp;
2238 }
2239 }
2240
2241 /* Extract range information from a binary operation CODE based on
2242 the ranges of each of its operands, *VR0 and *VR1 with resulting
2243 type EXPR_TYPE. The resulting range is stored in *VR. */
2244
2245 static void
2246 extract_range_from_binary_expr_1 (value_range_t *vr,
2247 enum tree_code code, tree expr_type,
2248 value_range_t *vr0_, value_range_t *vr1_)
2249 {
2250 value_range_t vr0 = *vr0_, vr1 = *vr1_;
2251 value_range_t vrtem0 = VR_INITIALIZER, vrtem1 = VR_INITIALIZER;
2252 enum value_range_type type;
2253 tree min = NULL_TREE, max = NULL_TREE;
2254 int cmp;
2255
2256 if (!INTEGRAL_TYPE_P (expr_type)
2257 && !POINTER_TYPE_P (expr_type))
2258 {
2259 set_value_range_to_varying (vr);
2260 return;
2261 }
2262
2263 /* Not all binary expressions can be applied to ranges in a
2264 meaningful way. Handle only arithmetic operations. */
2265 if (code != PLUS_EXPR
2266 && code != MINUS_EXPR
2267 && code != POINTER_PLUS_EXPR
2268 && code != MULT_EXPR
2269 && code != TRUNC_DIV_EXPR
2270 && code != FLOOR_DIV_EXPR
2271 && code != CEIL_DIV_EXPR
2272 && code != EXACT_DIV_EXPR
2273 && code != ROUND_DIV_EXPR
2274 && code != TRUNC_MOD_EXPR
2275 && code != RSHIFT_EXPR
2276 && code != LSHIFT_EXPR
2277 && code != MIN_EXPR
2278 && code != MAX_EXPR
2279 && code != BIT_AND_EXPR
2280 && code != BIT_IOR_EXPR
2281 && code != BIT_XOR_EXPR)
2282 {
2283 set_value_range_to_varying (vr);
2284 return;
2285 }
2286
2287 /* If both ranges are UNDEFINED, so is the result. */
2288 if (vr0.type == VR_UNDEFINED && vr1.type == VR_UNDEFINED)
2289 {
2290 set_value_range_to_undefined (vr);
2291 return;
2292 }
2293 /* If one of the ranges is UNDEFINED drop it to VARYING for the following
2294 code. At some point we may want to special-case operations that
2295 have UNDEFINED result for all or some value-ranges of the not UNDEFINED
2296 operand. */
2297 else if (vr0.type == VR_UNDEFINED)
2298 set_value_range_to_varying (&vr0);
2299 else if (vr1.type == VR_UNDEFINED)
2300 set_value_range_to_varying (&vr1);
2301
2302 /* Now canonicalize anti-ranges to ranges when they are not symbolic
2303 and express ~[] op X as ([]' op X) U ([]'' op X). */
2304 if (vr0.type == VR_ANTI_RANGE
2305 && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1))
2306 {
2307 extract_range_from_binary_expr_1 (vr, code, expr_type, &vrtem0, vr1_);
2308 if (vrtem1.type != VR_UNDEFINED)
2309 {
2310 value_range_t vrres = VR_INITIALIZER;
2311 extract_range_from_binary_expr_1 (&vrres, code, expr_type,
2312 &vrtem1, vr1_);
2313 vrp_meet (vr, &vrres);
2314 }
2315 return;
2316 }
2317 /* Likewise for X op ~[]. */
2318 if (vr1.type == VR_ANTI_RANGE
2319 && ranges_from_anti_range (&vr1, &vrtem0, &vrtem1))
2320 {
2321 extract_range_from_binary_expr_1 (vr, code, expr_type, vr0_, &vrtem0);
2322 if (vrtem1.type != VR_UNDEFINED)
2323 {
2324 value_range_t vrres = VR_INITIALIZER;
2325 extract_range_from_binary_expr_1 (&vrres, code, expr_type,
2326 vr0_, &vrtem1);
2327 vrp_meet (vr, &vrres);
2328 }
2329 return;
2330 }
2331
2332 /* The type of the resulting value range defaults to VR0.TYPE. */
2333 type = vr0.type;
2334
2335 /* Refuse to operate on VARYING ranges, ranges of different kinds
2336 and symbolic ranges. As an exception, we allow BIT_AND_EXPR
2337 because we may be able to derive a useful range even if one of
2338 the operands is VR_VARYING or symbolic range. Similarly for
2339 divisions. TODO, we may be able to derive anti-ranges in
2340 some cases. */
2341 if (code != BIT_AND_EXPR
2342 && code != BIT_IOR_EXPR
2343 && code != TRUNC_DIV_EXPR
2344 && code != FLOOR_DIV_EXPR
2345 && code != CEIL_DIV_EXPR
2346 && code != EXACT_DIV_EXPR
2347 && code != ROUND_DIV_EXPR
2348 && code != TRUNC_MOD_EXPR
2349 && code != MIN_EXPR
2350 && code != MAX_EXPR
2351 && (vr0.type == VR_VARYING
2352 || vr1.type == VR_VARYING
2353 || vr0.type != vr1.type
2354 || symbolic_range_p (&vr0)
2355 || symbolic_range_p (&vr1)))
2356 {
2357 set_value_range_to_varying (vr);
2358 return;
2359 }
2360
2361 /* Now evaluate the expression to determine the new range. */
2362 if (POINTER_TYPE_P (expr_type))
2363 {
2364 if (code == MIN_EXPR || code == MAX_EXPR)
2365 {
2366 /* For MIN/MAX expressions with pointers, we only care about
2367 nullness, if both are non null, then the result is nonnull.
2368 If both are null, then the result is null. Otherwise they
2369 are varying. */
2370 if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1))
2371 set_value_range_to_nonnull (vr, expr_type);
2372 else if (range_is_null (&vr0) && range_is_null (&vr1))
2373 set_value_range_to_null (vr, expr_type);
2374 else
2375 set_value_range_to_varying (vr);
2376 }
2377 else if (code == POINTER_PLUS_EXPR)
2378 {
2379 /* For pointer types, we are really only interested in asserting
2380 whether the expression evaluates to non-NULL. */
2381 if (range_is_nonnull (&vr0) || range_is_nonnull (&vr1))
2382 set_value_range_to_nonnull (vr, expr_type);
2383 else if (range_is_null (&vr0) && range_is_null (&vr1))
2384 set_value_range_to_null (vr, expr_type);
2385 else
2386 set_value_range_to_varying (vr);
2387 }
2388 else if (code == BIT_AND_EXPR)
2389 {
2390 /* For pointer types, we are really only interested in asserting
2391 whether the expression evaluates to non-NULL. */
2392 if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1))
2393 set_value_range_to_nonnull (vr, expr_type);
2394 else if (range_is_null (&vr0) || range_is_null (&vr1))
2395 set_value_range_to_null (vr, expr_type);
2396 else
2397 set_value_range_to_varying (vr);
2398 }
2399 else
2400 set_value_range_to_varying (vr);
2401
2402 return;
2403 }
2404
2405 /* For integer ranges, apply the operation to each end of the
2406 range and see what we end up with. */
2407 if (code == PLUS_EXPR || code == MINUS_EXPR)
2408 {
2409 /* If we have a PLUS_EXPR with two VR_RANGE integer constant
2410 ranges compute the precise range for such case if possible. */
2411 if (range_int_cst_p (&vr0)
2412 && range_int_cst_p (&vr1)
2413 /* We need as many bits as the possibly unsigned inputs. */
2414 && TYPE_PRECISION (expr_type) <= HOST_BITS_PER_DOUBLE_INT)
2415 {
2416 double_int min0 = tree_to_double_int (vr0.min);
2417 double_int max0 = tree_to_double_int (vr0.max);
2418 double_int min1 = tree_to_double_int (vr1.min);
2419 double_int max1 = tree_to_double_int (vr1.max);
2420 bool uns = TYPE_UNSIGNED (expr_type);
2421 double_int type_min
2422 = double_int::min_value (TYPE_PRECISION (expr_type), uns);
2423 double_int type_max
2424 = double_int::max_value (TYPE_PRECISION (expr_type), uns);
2425 double_int dmin, dmax;
2426 int min_ovf = 0;
2427 int max_ovf = 0;
2428
2429 if (code == PLUS_EXPR)
2430 {
2431 dmin = min0 + min1;
2432 dmax = max0 + max1;
2433
2434 /* Check for overflow in double_int. */
2435 if (min1.cmp (double_int_zero, uns) != dmin.cmp (min0, uns))
2436 min_ovf = min0.cmp (dmin, uns);
2437 if (max1.cmp (double_int_zero, uns) != dmax.cmp (max0, uns))
2438 max_ovf = max0.cmp (dmax, uns);
2439 }
2440 else /* if (code == MINUS_EXPR) */
2441 {
2442 dmin = min0 - max1;
2443 dmax = max0 - min1;
2444
2445 if (double_int_zero.cmp (max1, uns) != dmin.cmp (min0, uns))
2446 min_ovf = min0.cmp (max1, uns);
2447 if (double_int_zero.cmp (min1, uns) != dmax.cmp (max0, uns))
2448 max_ovf = max0.cmp (min1, uns);
2449 }
2450
2451 /* For non-wrapping arithmetic look at possibly smaller
2452 value-ranges of the type. */
2453 if (!TYPE_OVERFLOW_WRAPS (expr_type))
2454 {
2455 if (vrp_val_min (expr_type))
2456 type_min = tree_to_double_int (vrp_val_min (expr_type));
2457 if (vrp_val_max (expr_type))
2458 type_max = tree_to_double_int (vrp_val_max (expr_type));
2459 }
2460
2461 /* Check for type overflow. */
2462 if (min_ovf == 0)
2463 {
2464 if (dmin.cmp (type_min, uns) == -1)
2465 min_ovf = -1;
2466 else if (dmin.cmp (type_max, uns) == 1)
2467 min_ovf = 1;
2468 }
2469 if (max_ovf == 0)
2470 {
2471 if (dmax.cmp (type_min, uns) == -1)
2472 max_ovf = -1;
2473 else if (dmax.cmp (type_max, uns) == 1)
2474 max_ovf = 1;
2475 }
2476
2477 if (TYPE_OVERFLOW_WRAPS (expr_type))
2478 {
2479 /* If overflow wraps, truncate the values and adjust the
2480 range kind and bounds appropriately. */
2481 double_int tmin
2482 = dmin.ext (TYPE_PRECISION (expr_type), uns);
2483 double_int tmax
2484 = dmax.ext (TYPE_PRECISION (expr_type), uns);
2485 if (min_ovf == max_ovf)
2486 {
2487 /* No overflow or both overflow or underflow. The
2488 range kind stays VR_RANGE. */
2489 min = double_int_to_tree (expr_type, tmin);
2490 max = double_int_to_tree (expr_type, tmax);
2491 }
2492 else if (min_ovf == -1
2493 && max_ovf == 1)
2494 {
2495 /* Underflow and overflow, drop to VR_VARYING. */
2496 set_value_range_to_varying (vr);
2497 return;
2498 }
2499 else
2500 {
2501 /* Min underflow or max overflow. The range kind
2502 changes to VR_ANTI_RANGE. */
2503 bool covers = false;
2504 double_int tem = tmin;
2505 gcc_assert ((min_ovf == -1 && max_ovf == 0)
2506 || (max_ovf == 1 && min_ovf == 0));
2507 type = VR_ANTI_RANGE;
2508 tmin = tmax + double_int_one;
2509 if (tmin.cmp (tmax, uns) < 0)
2510 covers = true;
2511 tmax = tem + double_int_minus_one;
2512 if (tmax.cmp (tem, uns) > 0)
2513 covers = true;
2514 /* If the anti-range would cover nothing, drop to varying.
2515 Likewise if the anti-range bounds are outside of the
2516 types values. */
2517 if (covers || tmin.cmp (tmax, uns) > 0)
2518 {
2519 set_value_range_to_varying (vr);
2520 return;
2521 }
2522 min = double_int_to_tree (expr_type, tmin);
2523 max = double_int_to_tree (expr_type, tmax);
2524 }
2525 }
2526 else
2527 {
2528 /* If overflow does not wrap, saturate to the types min/max
2529 value. */
2530 if (min_ovf == -1)
2531 {
2532 if (needs_overflow_infinity (expr_type)
2533 && supports_overflow_infinity (expr_type))
2534 min = negative_overflow_infinity (expr_type);
2535 else
2536 min = double_int_to_tree (expr_type, type_min);
2537 }
2538 else if (min_ovf == 1)
2539 {
2540 if (needs_overflow_infinity (expr_type)
2541 && supports_overflow_infinity (expr_type))
2542 min = positive_overflow_infinity (expr_type);
2543 else
2544 min = double_int_to_tree (expr_type, type_max);
2545 }
2546 else
2547 min = double_int_to_tree (expr_type, dmin);
2548
2549 if (max_ovf == -1)
2550 {
2551 if (needs_overflow_infinity (expr_type)
2552 && supports_overflow_infinity (expr_type))
2553 max = negative_overflow_infinity (expr_type);
2554 else
2555 max = double_int_to_tree (expr_type, type_min);
2556 }
2557 else if (max_ovf == 1)
2558 {
2559 if (needs_overflow_infinity (expr_type)
2560 && supports_overflow_infinity (expr_type))
2561 max = positive_overflow_infinity (expr_type);
2562 else
2563 max = double_int_to_tree (expr_type, type_max);
2564 }
2565 else
2566 max = double_int_to_tree (expr_type, dmax);
2567 }
2568 if (needs_overflow_infinity (expr_type)
2569 && supports_overflow_infinity (expr_type))
2570 {
2571 if (is_negative_overflow_infinity (vr0.min)
2572 || (code == PLUS_EXPR
2573 ? is_negative_overflow_infinity (vr1.min)
2574 : is_positive_overflow_infinity (vr1.max)))
2575 min = negative_overflow_infinity (expr_type);
2576 if (is_positive_overflow_infinity (vr0.max)
2577 || (code == PLUS_EXPR
2578 ? is_positive_overflow_infinity (vr1.max)
2579 : is_negative_overflow_infinity (vr1.min)))
2580 max = positive_overflow_infinity (expr_type);
2581 }
2582 }
2583 else
2584 {
2585 /* For other cases, for example if we have a PLUS_EXPR with two
2586 VR_ANTI_RANGEs, drop to VR_VARYING. It would take more effort
2587 to compute a precise range for such a case.
2588 ??? General even mixed range kind operations can be expressed
2589 by for example transforming ~[3, 5] + [1, 2] to range-only
2590 operations and a union primitive:
2591 [-INF, 2] + [1, 2] U [5, +INF] + [1, 2]
2592 [-INF+1, 4] U [6, +INF(OVF)]
2593 though usually the union is not exactly representable with
2594 a single range or anti-range as the above is
2595 [-INF+1, +INF(OVF)] intersected with ~[5, 5]
2596 but one could use a scheme similar to equivalences for this. */
2597 set_value_range_to_varying (vr);
2598 return;
2599 }
2600 }
2601 else if (code == MIN_EXPR
2602 || code == MAX_EXPR)
2603 {
2604 if (vr0.type == VR_RANGE
2605 && !symbolic_range_p (&vr0))
2606 {
2607 type = VR_RANGE;
2608 if (vr1.type == VR_RANGE
2609 && !symbolic_range_p (&vr1))
2610 {
2611 /* For operations that make the resulting range directly
2612 proportional to the original ranges, apply the operation to
2613 the same end of each range. */
2614 min = vrp_int_const_binop (code, vr0.min, vr1.min);
2615 max = vrp_int_const_binop (code, vr0.max, vr1.max);
2616 }
2617 else if (code == MIN_EXPR)
2618 {
2619 min = vrp_val_min (expr_type);
2620 max = vr0.max;
2621 }
2622 else if (code == MAX_EXPR)
2623 {
2624 min = vr0.min;
2625 max = vrp_val_max (expr_type);
2626 }
2627 }
2628 else if (vr1.type == VR_RANGE
2629 && !symbolic_range_p (&vr1))
2630 {
2631 type = VR_RANGE;
2632 if (code == MIN_EXPR)
2633 {
2634 min = vrp_val_min (expr_type);
2635 max = vr1.max;
2636 }
2637 else if (code == MAX_EXPR)
2638 {
2639 min = vr1.min;
2640 max = vrp_val_max (expr_type);
2641 }
2642 }
2643 else
2644 {
2645 set_value_range_to_varying (vr);
2646 return;
2647 }
2648 }
2649 else if (code == MULT_EXPR)
2650 {
2651 /* Fancy code so that with unsigned, [-3,-1]*[-3,-1] does not
2652 drop to varying. */
2653 if (range_int_cst_p (&vr0)
2654 && range_int_cst_p (&vr1)
2655 && TYPE_OVERFLOW_WRAPS (expr_type))
2656 {
2657 double_int min0, max0, min1, max1, sizem1, size;
2658 double_int prod0l, prod0h, prod1l, prod1h,
2659 prod2l, prod2h, prod3l, prod3h;
2660 bool uns0, uns1, uns;
2661
2662 sizem1 = double_int::max_value (TYPE_PRECISION (expr_type), true);
2663 size = sizem1 + double_int_one;
2664
2665 min0 = tree_to_double_int (vr0.min);
2666 max0 = tree_to_double_int (vr0.max);
2667 min1 = tree_to_double_int (vr1.min);
2668 max1 = tree_to_double_int (vr1.max);
2669
2670 uns0 = TYPE_UNSIGNED (expr_type);
2671 uns1 = uns0;
2672
2673 /* Canonicalize the intervals. */
2674 if (TYPE_UNSIGNED (expr_type))
2675 {
2676 double_int min2 = size - min0;
2677 if (!min2.is_zero () && min2.cmp (max0, true) < 0)
2678 {
2679 min0 = -min2;
2680 max0 -= size;
2681 uns0 = false;
2682 }
2683
2684 min2 = size - min1;
2685 if (!min2.is_zero () && min2.cmp (max1, true) < 0)
2686 {
2687 min1 = -min2;
2688 max1 -= size;
2689 uns1 = false;
2690 }
2691 }
2692 uns = uns0 & uns1;
2693
2694 bool overflow;
2695 prod0l = min0.wide_mul_with_sign (min1, true, &prod0h, &overflow);
2696 if (!uns0 && min0.is_negative ())
2697 prod0h -= min1;
2698 if (!uns1 && min1.is_negative ())
2699 prod0h -= min0;
2700
2701 prod1l = min0.wide_mul_with_sign (max1, true, &prod1h, &overflow);
2702 if (!uns0 && min0.is_negative ())
2703 prod1h -= max1;
2704 if (!uns1 && max1.is_negative ())
2705 prod1h -= min0;
2706
2707 prod2l = max0.wide_mul_with_sign (min1, true, &prod2h, &overflow);
2708 if (!uns0 && max0.is_negative ())
2709 prod2h -= min1;
2710 if (!uns1 && min1.is_negative ())
2711 prod2h -= max0;
2712
2713 prod3l = max0.wide_mul_with_sign (max1, true, &prod3h, &overflow);
2714 if (!uns0 && max0.is_negative ())
2715 prod3h -= max1;
2716 if (!uns1 && max1.is_negative ())
2717 prod3h -= max0;
2718
2719 /* Sort the 4 products. */
2720 quad_int_pair_sort (&prod0l, &prod0h, &prod3l, &prod3h, uns);
2721 quad_int_pair_sort (&prod1l, &prod1h, &prod2l, &prod2h, uns);
2722 quad_int_pair_sort (&prod0l, &prod0h, &prod1l, &prod1h, uns);
2723 quad_int_pair_sort (&prod2l, &prod2h, &prod3l, &prod3h, uns);
2724
2725 /* Max - min. */
2726 if (prod0l.is_zero ())
2727 {
2728 prod1l = double_int_zero;
2729 prod1h = -prod0h;
2730 }
2731 else
2732 {
2733 prod1l = -prod0l;
2734 prod1h = ~prod0h;
2735 }
2736 prod2l = prod3l + prod1l;
2737 prod2h = prod3h + prod1h;
2738 if (prod2l.ult (prod3l))
2739 prod2h += double_int_one; /* carry */
2740
2741 if (!prod2h.is_zero ()
2742 || prod2l.cmp (sizem1, true) >= 0)
2743 {
2744 /* the range covers all values. */
2745 set_value_range_to_varying (vr);
2746 return;
2747 }
2748
2749 /* The following should handle the wrapping and selecting
2750 VR_ANTI_RANGE for us. */
2751 min = double_int_to_tree (expr_type, prod0l);
2752 max = double_int_to_tree (expr_type, prod3l);
2753 set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL);
2754 return;
2755 }
2756
2757 /* If we have an unsigned MULT_EXPR with two VR_ANTI_RANGEs,
2758 drop to VR_VARYING. It would take more effort to compute a
2759 precise range for such a case. For example, if we have
2760 op0 == 65536 and op1 == 65536 with their ranges both being
2761 ~[0,0] on a 32-bit machine, we would have op0 * op1 == 0, so
2762 we cannot claim that the product is in ~[0,0]. Note that we
2763 are guaranteed to have vr0.type == vr1.type at this
2764 point. */
2765 if (vr0.type == VR_ANTI_RANGE
2766 && !TYPE_OVERFLOW_UNDEFINED (expr_type))
2767 {
2768 set_value_range_to_varying (vr);
2769 return;
2770 }
2771
2772 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2773 return;
2774 }
2775 else if (code == RSHIFT_EXPR
2776 || code == LSHIFT_EXPR)
2777 {
2778 /* If we have a RSHIFT_EXPR with any shift values outside [0..prec-1],
2779 then drop to VR_VARYING. Outside of this range we get undefined
2780 behavior from the shift operation. We cannot even trust
2781 SHIFT_COUNT_TRUNCATED at this stage, because that applies to rtl
2782 shifts, and the operation at the tree level may be widened. */
2783 if (range_int_cst_p (&vr1)
2784 && compare_tree_int (vr1.min, 0) >= 0
2785 && compare_tree_int (vr1.max, TYPE_PRECISION (expr_type)) == -1)
2786 {
2787 if (code == RSHIFT_EXPR)
2788 {
2789 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2790 return;
2791 }
2792 /* We can map lshifts by constants to MULT_EXPR handling. */
2793 else if (code == LSHIFT_EXPR
2794 && range_int_cst_singleton_p (&vr1))
2795 {
2796 bool saved_flag_wrapv;
2797 value_range_t vr1p = VR_INITIALIZER;
2798 vr1p.type = VR_RANGE;
2799 vr1p.min
2800 = double_int_to_tree (expr_type,
2801 double_int_one
2802 .llshift (TREE_INT_CST_LOW (vr1.min),
2803 TYPE_PRECISION (expr_type)));
2804 vr1p.max = vr1p.min;
2805 /* We have to use a wrapping multiply though as signed overflow
2806 on lshifts is implementation defined in C89. */
2807 saved_flag_wrapv = flag_wrapv;
2808 flag_wrapv = 1;
2809 extract_range_from_binary_expr_1 (vr, MULT_EXPR, expr_type,
2810 &vr0, &vr1p);
2811 flag_wrapv = saved_flag_wrapv;
2812 return;
2813 }
2814 else if (code == LSHIFT_EXPR
2815 && range_int_cst_p (&vr0))
2816 {
2817 int prec = TYPE_PRECISION (expr_type);
2818 int overflow_pos = prec;
2819 int bound_shift;
2820 double_int bound, complement, low_bound, high_bound;
2821 bool uns = TYPE_UNSIGNED (expr_type);
2822 bool in_bounds = false;
2823
2824 if (!uns)
2825 overflow_pos -= 1;
2826
2827 bound_shift = overflow_pos - TREE_INT_CST_LOW (vr1.max);
2828 /* If bound_shift == HOST_BITS_PER_DOUBLE_INT, the llshift can
2829 overflow. However, for that to happen, vr1.max needs to be
2830 zero, which means vr1 is a singleton range of zero, which
2831 means it should be handled by the previous LSHIFT_EXPR
2832 if-clause. */
2833 bound = double_int_one.llshift (bound_shift, prec);
2834 complement = ~(bound - double_int_one);
2835
2836 if (uns)
2837 {
2838 low_bound = bound.zext (prec);
2839 high_bound = complement.zext (prec);
2840 if (tree_to_double_int (vr0.max).ult (low_bound))
2841 {
2842 /* [5, 6] << [1, 2] == [10, 24]. */
2843 /* We're shifting out only zeroes, the value increases
2844 monotonically. */
2845 in_bounds = true;
2846 }
2847 else if (high_bound.ult (tree_to_double_int (vr0.min)))
2848 {
2849 /* [0xffffff00, 0xffffffff] << [1, 2]
2850 == [0xfffffc00, 0xfffffffe]. */
2851 /* We're shifting out only ones, the value decreases
2852 monotonically. */
2853 in_bounds = true;
2854 }
2855 }
2856 else
2857 {
2858 /* [-1, 1] << [1, 2] == [-4, 4]. */
2859 low_bound = complement.sext (prec);
2860 high_bound = bound;
2861 if (tree_to_double_int (vr0.max).slt (high_bound)
2862 && low_bound.slt (tree_to_double_int (vr0.min)))
2863 {
2864 /* For non-negative numbers, we're shifting out only
2865 zeroes, the value increases monotonically.
2866 For negative numbers, we're shifting out only ones, the
2867 value decreases monotomically. */
2868 in_bounds = true;
2869 }
2870 }
2871
2872 if (in_bounds)
2873 {
2874 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2875 return;
2876 }
2877 }
2878 }
2879 set_value_range_to_varying (vr);
2880 return;
2881 }
2882 else if (code == TRUNC_DIV_EXPR
2883 || code == FLOOR_DIV_EXPR
2884 || code == CEIL_DIV_EXPR
2885 || code == EXACT_DIV_EXPR
2886 || code == ROUND_DIV_EXPR)
2887 {
2888 if (vr0.type != VR_RANGE || symbolic_range_p (&vr0))
2889 {
2890 /* For division, if op1 has VR_RANGE but op0 does not, something
2891 can be deduced just from that range. Say [min, max] / [4, max]
2892 gives [min / 4, max / 4] range. */
2893 if (vr1.type == VR_RANGE
2894 && !symbolic_range_p (&vr1)
2895 && range_includes_zero_p (vr1.min, vr1.max) == 0)
2896 {
2897 vr0.type = type = VR_RANGE;
2898 vr0.min = vrp_val_min (expr_type);
2899 vr0.max = vrp_val_max (expr_type);
2900 }
2901 else
2902 {
2903 set_value_range_to_varying (vr);
2904 return;
2905 }
2906 }
2907
2908 /* For divisions, if flag_non_call_exceptions is true, we must
2909 not eliminate a division by zero. */
2910 if (cfun->can_throw_non_call_exceptions
2911 && (vr1.type != VR_RANGE
2912 || range_includes_zero_p (vr1.min, vr1.max) != 0))
2913 {
2914 set_value_range_to_varying (vr);
2915 return;
2916 }
2917
2918 /* For divisions, if op0 is VR_RANGE, we can deduce a range
2919 even if op1 is VR_VARYING, VR_ANTI_RANGE, symbolic or can
2920 include 0. */
2921 if (vr0.type == VR_RANGE
2922 && (vr1.type != VR_RANGE
2923 || range_includes_zero_p (vr1.min, vr1.max) != 0))
2924 {
2925 tree zero = build_int_cst (TREE_TYPE (vr0.min), 0);
2926 int cmp;
2927
2928 min = NULL_TREE;
2929 max = NULL_TREE;
2930 if (TYPE_UNSIGNED (expr_type)
2931 || value_range_nonnegative_p (&vr1))
2932 {
2933 /* For unsigned division or when divisor is known
2934 to be non-negative, the range has to cover
2935 all numbers from 0 to max for positive max
2936 and all numbers from min to 0 for negative min. */
2937 cmp = compare_values (vr0.max, zero);
2938 if (cmp == -1)
2939 max = zero;
2940 else if (cmp == 0 || cmp == 1)
2941 max = vr0.max;
2942 else
2943 type = VR_VARYING;
2944 cmp = compare_values (vr0.min, zero);
2945 if (cmp == 1)
2946 min = zero;
2947 else if (cmp == 0 || cmp == -1)
2948 min = vr0.min;
2949 else
2950 type = VR_VARYING;
2951 }
2952 else
2953 {
2954 /* Otherwise the range is -max .. max or min .. -min
2955 depending on which bound is bigger in absolute value,
2956 as the division can change the sign. */
2957 abs_extent_range (vr, vr0.min, vr0.max);
2958 return;
2959 }
2960 if (type == VR_VARYING)
2961 {
2962 set_value_range_to_varying (vr);
2963 return;
2964 }
2965 }
2966 else
2967 {
2968 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2969 return;
2970 }
2971 }
2972 else if (code == TRUNC_MOD_EXPR)
2973 {
2974 if (vr1.type != VR_RANGE
2975 || range_includes_zero_p (vr1.min, vr1.max) != 0
2976 || vrp_val_is_min (vr1.min))
2977 {
2978 set_value_range_to_varying (vr);
2979 return;
2980 }
2981 type = VR_RANGE;
2982 /* Compute MAX <|vr1.min|, |vr1.max|> - 1. */
2983 max = fold_unary_to_constant (ABS_EXPR, expr_type, vr1.min);
2984 if (tree_int_cst_lt (max, vr1.max))
2985 max = vr1.max;
2986 max = int_const_binop (MINUS_EXPR, max, integer_one_node);
2987 /* If the dividend is non-negative the modulus will be
2988 non-negative as well. */
2989 if (TYPE_UNSIGNED (expr_type)
2990 || value_range_nonnegative_p (&vr0))
2991 min = build_int_cst (TREE_TYPE (max), 0);
2992 else
2993 min = fold_unary_to_constant (NEGATE_EXPR, expr_type, max);
2994 }
2995 else if (code == BIT_AND_EXPR || code == BIT_IOR_EXPR || code == BIT_XOR_EXPR)
2996 {
2997 bool int_cst_range0, int_cst_range1;
2998 double_int may_be_nonzero0, may_be_nonzero1;
2999 double_int must_be_nonzero0, must_be_nonzero1;
3000
3001 int_cst_range0 = zero_nonzero_bits_from_vr (&vr0, &may_be_nonzero0,
3002 &must_be_nonzero0);
3003 int_cst_range1 = zero_nonzero_bits_from_vr (&vr1, &may_be_nonzero1,
3004 &must_be_nonzero1);
3005
3006 type = VR_RANGE;
3007 if (code == BIT_AND_EXPR)
3008 {
3009 double_int dmax;
3010 min = double_int_to_tree (expr_type,
3011 must_be_nonzero0 & must_be_nonzero1);
3012 dmax = may_be_nonzero0 & may_be_nonzero1;
3013 /* If both input ranges contain only negative values we can
3014 truncate the result range maximum to the minimum of the
3015 input range maxima. */
3016 if (int_cst_range0 && int_cst_range1
3017 && tree_int_cst_sgn (vr0.max) < 0
3018 && tree_int_cst_sgn (vr1.max) < 0)
3019 {
3020 dmax = dmax.min (tree_to_double_int (vr0.max),
3021 TYPE_UNSIGNED (expr_type));
3022 dmax = dmax.min (tree_to_double_int (vr1.max),
3023 TYPE_UNSIGNED (expr_type));
3024 }
3025 /* If either input range contains only non-negative values
3026 we can truncate the result range maximum to the respective
3027 maximum of the input range. */
3028 if (int_cst_range0 && tree_int_cst_sgn (vr0.min) >= 0)
3029 dmax = dmax.min (tree_to_double_int (vr0.max),
3030 TYPE_UNSIGNED (expr_type));
3031 if (int_cst_range1 && tree_int_cst_sgn (vr1.min) >= 0)
3032 dmax = dmax.min (tree_to_double_int (vr1.max),
3033 TYPE_UNSIGNED (expr_type));
3034 max = double_int_to_tree (expr_type, dmax);
3035 }
3036 else if (code == BIT_IOR_EXPR)
3037 {
3038 double_int dmin;
3039 max = double_int_to_tree (expr_type,
3040 may_be_nonzero0 | may_be_nonzero1);
3041 dmin = must_be_nonzero0 | must_be_nonzero1;
3042 /* If the input ranges contain only positive values we can
3043 truncate the minimum of the result range to the maximum
3044 of the input range minima. */
3045 if (int_cst_range0 && int_cst_range1
3046 && tree_int_cst_sgn (vr0.min) >= 0
3047 && tree_int_cst_sgn (vr1.min) >= 0)
3048 {
3049 dmin = dmin.max (tree_to_double_int (vr0.min),
3050 TYPE_UNSIGNED (expr_type));
3051 dmin = dmin.max (tree_to_double_int (vr1.min),
3052 TYPE_UNSIGNED (expr_type));
3053 }
3054 /* If either input range contains only negative values
3055 we can truncate the minimum of the result range to the
3056 respective minimum range. */
3057 if (int_cst_range0 && tree_int_cst_sgn (vr0.max) < 0)
3058 dmin = dmin.max (tree_to_double_int (vr0.min),
3059 TYPE_UNSIGNED (expr_type));
3060 if (int_cst_range1 && tree_int_cst_sgn (vr1.max) < 0)
3061 dmin = dmin.max (tree_to_double_int (vr1.min),
3062 TYPE_UNSIGNED (expr_type));
3063 min = double_int_to_tree (expr_type, dmin);
3064 }
3065 else if (code == BIT_XOR_EXPR)
3066 {
3067 double_int result_zero_bits, result_one_bits;
3068 result_zero_bits = (must_be_nonzero0 & must_be_nonzero1)
3069 | ~(may_be_nonzero0 | may_be_nonzero1);
3070 result_one_bits = must_be_nonzero0.and_not (may_be_nonzero1)
3071 | must_be_nonzero1.and_not (may_be_nonzero0);
3072 max = double_int_to_tree (expr_type, ~result_zero_bits);
3073 min = double_int_to_tree (expr_type, result_one_bits);
3074 /* If the range has all positive or all negative values the
3075 result is better than VARYING. */
3076 if (tree_int_cst_sgn (min) < 0
3077 || tree_int_cst_sgn (max) >= 0)
3078 ;
3079 else
3080 max = min = NULL_TREE;
3081 }
3082 }
3083 else
3084 gcc_unreachable ();
3085
3086 /* If either MIN or MAX overflowed, then set the resulting range to
3087 VARYING. But we do accept an overflow infinity
3088 representation. */
3089 if (min == NULL_TREE
3090 || !is_gimple_min_invariant (min)
3091 || (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
3092 || max == NULL_TREE
3093 || !is_gimple_min_invariant (max)
3094 || (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
3095 {
3096 set_value_range_to_varying (vr);
3097 return;
3098 }
3099
3100 /* We punt if:
3101 1) [-INF, +INF]
3102 2) [-INF, +-INF(OVF)]
3103 3) [+-INF(OVF), +INF]
3104 4) [+-INF(OVF), +-INF(OVF)]
3105 We learn nothing when we have INF and INF(OVF) on both sides.
3106 Note that we do accept [-INF, -INF] and [+INF, +INF] without
3107 overflow. */
3108 if ((vrp_val_is_min (min) || is_overflow_infinity (min))
3109 && (vrp_val_is_max (max) || is_overflow_infinity (max)))
3110 {
3111 set_value_range_to_varying (vr);
3112 return;
3113 }
3114
3115 cmp = compare_values (min, max);
3116 if (cmp == -2 || cmp == 1)
3117 {
3118 /* If the new range has its limits swapped around (MIN > MAX),
3119 then the operation caused one of them to wrap around, mark
3120 the new range VARYING. */
3121 set_value_range_to_varying (vr);
3122 }
3123 else
3124 set_value_range (vr, type, min, max, NULL);
3125 }
3126
3127 /* Extract range information from a binary expression OP0 CODE OP1 based on
3128 the ranges of each of its operands with resulting type EXPR_TYPE.
3129 The resulting range is stored in *VR. */
3130
3131 static void
3132 extract_range_from_binary_expr (value_range_t *vr,
3133 enum tree_code code,
3134 tree expr_type, tree op0, tree op1)
3135 {
3136 value_range_t vr0 = VR_INITIALIZER;
3137 value_range_t vr1 = VR_INITIALIZER;
3138
3139 /* Get value ranges for each operand. For constant operands, create
3140 a new value range with the operand to simplify processing. */
3141 if (TREE_CODE (op0) == SSA_NAME)
3142 vr0 = *(get_value_range (op0));
3143 else if (is_gimple_min_invariant (op0))
3144 set_value_range_to_value (&vr0, op0, NULL);
3145 else
3146 set_value_range_to_varying (&vr0);
3147
3148 if (TREE_CODE (op1) == SSA_NAME)
3149 vr1 = *(get_value_range (op1));
3150 else if (is_gimple_min_invariant (op1))
3151 set_value_range_to_value (&vr1, op1, NULL);
3152 else
3153 set_value_range_to_varying (&vr1);
3154
3155 extract_range_from_binary_expr_1 (vr, code, expr_type, &vr0, &vr1);
3156 }
3157
3158 /* Extract range information from a unary operation CODE based on
3159 the range of its operand *VR0 with type OP0_TYPE with resulting type TYPE.
3160 The The resulting range is stored in *VR. */
3161
3162 static void
3163 extract_range_from_unary_expr_1 (value_range_t *vr,
3164 enum tree_code code, tree type,
3165 value_range_t *vr0_, tree op0_type)
3166 {
3167 value_range_t vr0 = *vr0_, vrtem0 = VR_INITIALIZER, vrtem1 = VR_INITIALIZER;
3168
3169 /* VRP only operates on integral and pointer types. */
3170 if (!(INTEGRAL_TYPE_P (op0_type)
3171 || POINTER_TYPE_P (op0_type))
3172 || !(INTEGRAL_TYPE_P (type)
3173 || POINTER_TYPE_P (type)))
3174 {
3175 set_value_range_to_varying (vr);
3176 return;
3177 }
3178
3179 /* If VR0 is UNDEFINED, so is the result. */
3180 if (vr0.type == VR_UNDEFINED)
3181 {
3182 set_value_range_to_undefined (vr);
3183 return;
3184 }
3185
3186 /* Handle operations that we express in terms of others. */
3187 if (code == PAREN_EXPR)
3188 {
3189 /* PAREN_EXPR is a simple copy. */
3190 copy_value_range (vr, &vr0);
3191 return;
3192 }
3193 else if (code == NEGATE_EXPR)
3194 {
3195 /* -X is simply 0 - X, so re-use existing code that also handles
3196 anti-ranges fine. */
3197 value_range_t zero = VR_INITIALIZER;
3198 set_value_range_to_value (&zero, build_int_cst (type, 0), NULL);
3199 extract_range_from_binary_expr_1 (vr, MINUS_EXPR, type, &zero, &vr0);
3200 return;
3201 }
3202 else if (code == BIT_NOT_EXPR)
3203 {
3204 /* ~X is simply -1 - X, so re-use existing code that also handles
3205 anti-ranges fine. */
3206 value_range_t minusone = VR_INITIALIZER;
3207 set_value_range_to_value (&minusone, build_int_cst (type, -1), NULL);
3208 extract_range_from_binary_expr_1 (vr, MINUS_EXPR,
3209 type, &minusone, &vr0);
3210 return;
3211 }
3212
3213 /* Now canonicalize anti-ranges to ranges when they are not symbolic
3214 and express op ~[] as (op []') U (op []''). */
3215 if (vr0.type == VR_ANTI_RANGE
3216 && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1))
3217 {
3218 extract_range_from_unary_expr_1 (vr, code, type, &vrtem0, op0_type);
3219 if (vrtem1.type != VR_UNDEFINED)
3220 {
3221 value_range_t vrres = VR_INITIALIZER;
3222 extract_range_from_unary_expr_1 (&vrres, code, type,
3223 &vrtem1, op0_type);
3224 vrp_meet (vr, &vrres);
3225 }
3226 return;
3227 }
3228
3229 if (CONVERT_EXPR_CODE_P (code))
3230 {
3231 tree inner_type = op0_type;
3232 tree outer_type = type;
3233
3234 /* If the expression evaluates to a pointer, we are only interested in
3235 determining if it evaluates to NULL [0, 0] or non-NULL (~[0, 0]). */
3236 if (POINTER_TYPE_P (type))
3237 {
3238 if (range_is_nonnull (&vr0))
3239 set_value_range_to_nonnull (vr, type);
3240 else if (range_is_null (&vr0))
3241 set_value_range_to_null (vr, type);
3242 else
3243 set_value_range_to_varying (vr);
3244 return;
3245 }
3246
3247 /* If VR0 is varying and we increase the type precision, assume
3248 a full range for the following transformation. */
3249 if (vr0.type == VR_VARYING
3250 && INTEGRAL_TYPE_P (inner_type)
3251 && TYPE_PRECISION (inner_type) < TYPE_PRECISION (outer_type))
3252 {
3253 vr0.type = VR_RANGE;
3254 vr0.min = TYPE_MIN_VALUE (inner_type);
3255 vr0.max = TYPE_MAX_VALUE (inner_type);
3256 }
3257
3258 /* If VR0 is a constant range or anti-range and the conversion is
3259 not truncating we can convert the min and max values and
3260 canonicalize the resulting range. Otherwise we can do the
3261 conversion if the size of the range is less than what the
3262 precision of the target type can represent and the range is
3263 not an anti-range. */
3264 if ((vr0.type == VR_RANGE
3265 || vr0.type == VR_ANTI_RANGE)
3266 && TREE_CODE (vr0.min) == INTEGER_CST
3267 && TREE_CODE (vr0.max) == INTEGER_CST
3268 && (!is_overflow_infinity (vr0.min)
3269 || (vr0.type == VR_RANGE
3270 && TYPE_PRECISION (outer_type) > TYPE_PRECISION (inner_type)
3271 && needs_overflow_infinity (outer_type)
3272 && supports_overflow_infinity (outer_type)))
3273 && (!is_overflow_infinity (vr0.max)
3274 || (vr0.type == VR_RANGE
3275 && TYPE_PRECISION (outer_type) > TYPE_PRECISION (inner_type)
3276 && needs_overflow_infinity (outer_type)
3277 && supports_overflow_infinity (outer_type)))
3278 && (TYPE_PRECISION (outer_type) >= TYPE_PRECISION (inner_type)
3279 || (vr0.type == VR_RANGE
3280 && integer_zerop (int_const_binop (RSHIFT_EXPR,
3281 int_const_binop (MINUS_EXPR, vr0.max, vr0.min),
3282 size_int (TYPE_PRECISION (outer_type)))))))
3283 {
3284 tree new_min, new_max;
3285 if (is_overflow_infinity (vr0.min))
3286 new_min = negative_overflow_infinity (outer_type);
3287 else
3288 new_min = force_fit_type_double (outer_type,
3289 tree_to_double_int (vr0.min),
3290 0, false);
3291 if (is_overflow_infinity (vr0.max))
3292 new_max = positive_overflow_infinity (outer_type);
3293 else
3294 new_max = force_fit_type_double (outer_type,
3295 tree_to_double_int (vr0.max),
3296 0, false);
3297 set_and_canonicalize_value_range (vr, vr0.type,
3298 new_min, new_max, NULL);
3299 return;
3300 }
3301
3302 set_value_range_to_varying (vr);
3303 return;
3304 }
3305 else if (code == ABS_EXPR)
3306 {
3307 tree min, max;
3308 int cmp;
3309
3310 /* Pass through vr0 in the easy cases. */
3311 if (TYPE_UNSIGNED (type)
3312 || value_range_nonnegative_p (&vr0))
3313 {
3314 copy_value_range (vr, &vr0);
3315 return;
3316 }
3317
3318 /* For the remaining varying or symbolic ranges we can't do anything
3319 useful. */
3320 if (vr0.type == VR_VARYING
3321 || symbolic_range_p (&vr0))
3322 {
3323 set_value_range_to_varying (vr);
3324 return;
3325 }
3326
3327 /* -TYPE_MIN_VALUE = TYPE_MIN_VALUE with flag_wrapv so we can't get a
3328 useful range. */
3329 if (!TYPE_OVERFLOW_UNDEFINED (type)
3330 && ((vr0.type == VR_RANGE
3331 && vrp_val_is_min (vr0.min))
3332 || (vr0.type == VR_ANTI_RANGE
3333 && !vrp_val_is_min (vr0.min))))
3334 {
3335 set_value_range_to_varying (vr);
3336 return;
3337 }
3338
3339 /* ABS_EXPR may flip the range around, if the original range
3340 included negative values. */
3341 if (is_overflow_infinity (vr0.min))
3342 min = positive_overflow_infinity (type);
3343 else if (!vrp_val_is_min (vr0.min))
3344 min = fold_unary_to_constant (code, type, vr0.min);
3345 else if (!needs_overflow_infinity (type))
3346 min = TYPE_MAX_VALUE (type);
3347 else if (supports_overflow_infinity (type))
3348 min = positive_overflow_infinity (type);
3349 else
3350 {
3351 set_value_range_to_varying (vr);
3352 return;
3353 }
3354
3355 if (is_overflow_infinity (vr0.max))
3356 max = positive_overflow_infinity (type);
3357 else if (!vrp_val_is_min (vr0.max))
3358 max = fold_unary_to_constant (code, type, vr0.max);
3359 else if (!needs_overflow_infinity (type))
3360 max = TYPE_MAX_VALUE (type);
3361 else if (supports_overflow_infinity (type)
3362 /* We shouldn't generate [+INF, +INF] as set_value_range
3363 doesn't like this and ICEs. */
3364 && !is_positive_overflow_infinity (min))
3365 max = positive_overflow_infinity (type);
3366 else
3367 {
3368 set_value_range_to_varying (vr);
3369 return;
3370 }
3371
3372 cmp = compare_values (min, max);
3373
3374 /* If a VR_ANTI_RANGEs contains zero, then we have
3375 ~[-INF, min(MIN, MAX)]. */
3376 if (vr0.type == VR_ANTI_RANGE)
3377 {
3378 if (range_includes_zero_p (vr0.min, vr0.max) == 1)
3379 {
3380 /* Take the lower of the two values. */
3381 if (cmp != 1)
3382 max = min;
3383
3384 /* Create ~[-INF, min (abs(MIN), abs(MAX))]
3385 or ~[-INF + 1, min (abs(MIN), abs(MAX))] when
3386 flag_wrapv is set and the original anti-range doesn't include
3387 TYPE_MIN_VALUE, remember -TYPE_MIN_VALUE = TYPE_MIN_VALUE. */
3388 if (TYPE_OVERFLOW_WRAPS (type))
3389 {
3390 tree type_min_value = TYPE_MIN_VALUE (type);
3391
3392 min = (vr0.min != type_min_value
3393 ? int_const_binop (PLUS_EXPR, type_min_value,
3394 integer_one_node)
3395 : type_min_value);
3396 }
3397 else
3398 {
3399 if (overflow_infinity_range_p (&vr0))
3400 min = negative_overflow_infinity (type);
3401 else
3402 min = TYPE_MIN_VALUE (type);
3403 }
3404 }
3405 else
3406 {
3407 /* All else has failed, so create the range [0, INF], even for
3408 flag_wrapv since TYPE_MIN_VALUE is in the original
3409 anti-range. */
3410 vr0.type = VR_RANGE;
3411 min = build_int_cst (type, 0);
3412 if (needs_overflow_infinity (type))
3413 {
3414 if (supports_overflow_infinity (type))
3415 max = positive_overflow_infinity (type);
3416 else
3417 {
3418 set_value_range_to_varying (vr);
3419 return;
3420 }
3421 }
3422 else
3423 max = TYPE_MAX_VALUE (type);
3424 }
3425 }
3426
3427 /* If the range contains zero then we know that the minimum value in the
3428 range will be zero. */
3429 else if (range_includes_zero_p (vr0.min, vr0.max) == 1)
3430 {
3431 if (cmp == 1)
3432 max = min;
3433 min = build_int_cst (type, 0);
3434 }
3435 else
3436 {
3437 /* If the range was reversed, swap MIN and MAX. */
3438 if (cmp == 1)
3439 {
3440 tree t = min;
3441 min = max;
3442 max = t;
3443 }
3444 }
3445
3446 cmp = compare_values (min, max);
3447 if (cmp == -2 || cmp == 1)
3448 {
3449 /* If the new range has its limits swapped around (MIN > MAX),
3450 then the operation caused one of them to wrap around, mark
3451 the new range VARYING. */
3452 set_value_range_to_varying (vr);
3453 }
3454 else
3455 set_value_range (vr, vr0.type, min, max, NULL);
3456 return;
3457 }
3458
3459 /* For unhandled operations fall back to varying. */
3460 set_value_range_to_varying (vr);
3461 return;
3462 }
3463
3464
3465 /* Extract range information from a unary expression CODE OP0 based on
3466 the range of its operand with resulting type TYPE.
3467 The resulting range is stored in *VR. */
3468
3469 static void
3470 extract_range_from_unary_expr (value_range_t *vr, enum tree_code code,
3471 tree type, tree op0)
3472 {
3473 value_range_t vr0 = VR_INITIALIZER;
3474
3475 /* Get value ranges for the operand. For constant operands, create
3476 a new value range with the operand to simplify processing. */
3477 if (TREE_CODE (op0) == SSA_NAME)
3478 vr0 = *(get_value_range (op0));
3479 else if (is_gimple_min_invariant (op0))
3480 set_value_range_to_value (&vr0, op0, NULL);
3481 else
3482 set_value_range_to_varying (&vr0);
3483
3484 extract_range_from_unary_expr_1 (vr, code, type, &vr0, TREE_TYPE (op0));
3485 }
3486
3487
3488 /* Extract range information from a conditional expression STMT based on
3489 the ranges of each of its operands and the expression code. */
3490
3491 static void
3492 extract_range_from_cond_expr (value_range_t *vr, gimple stmt)
3493 {
3494 tree op0, op1;
3495 value_range_t vr0 = VR_INITIALIZER;
3496 value_range_t vr1 = VR_INITIALIZER;
3497
3498 /* Get value ranges for each operand. For constant operands, create
3499 a new value range with the operand to simplify processing. */
3500 op0 = gimple_assign_rhs2 (stmt);
3501 if (TREE_CODE (op0) == SSA_NAME)
3502 vr0 = *(get_value_range (op0));
3503 else if (is_gimple_min_invariant (op0))
3504 set_value_range_to_value (&vr0, op0, NULL);
3505 else
3506 set_value_range_to_varying (&vr0);
3507
3508 op1 = gimple_assign_rhs3 (stmt);
3509 if (TREE_CODE (op1) == SSA_NAME)
3510 vr1 = *(get_value_range (op1));
3511 else if (is_gimple_min_invariant (op1))
3512 set_value_range_to_value (&vr1, op1, NULL);
3513 else
3514 set_value_range_to_varying (&vr1);
3515
3516 /* The resulting value range is the union of the operand ranges */
3517 copy_value_range (vr, &vr0);
3518 vrp_meet (vr, &vr1);
3519 }
3520
3521
3522 /* Extract range information from a comparison expression EXPR based
3523 on the range of its operand and the expression code. */
3524
3525 static void
3526 extract_range_from_comparison (value_range_t *vr, enum tree_code code,
3527 tree type, tree op0, tree op1)
3528 {
3529 bool sop = false;
3530 tree val;
3531
3532 val = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, false, &sop,
3533 NULL);
3534
3535 /* A disadvantage of using a special infinity as an overflow
3536 representation is that we lose the ability to record overflow
3537 when we don't have an infinity. So we have to ignore a result
3538 which relies on overflow. */
3539
3540 if (val && !is_overflow_infinity (val) && !sop)
3541 {
3542 /* Since this expression was found on the RHS of an assignment,
3543 its type may be different from _Bool. Convert VAL to EXPR's
3544 type. */
3545 val = fold_convert (type, val);
3546 if (is_gimple_min_invariant (val))
3547 set_value_range_to_value (vr, val, vr->equiv);
3548 else
3549 set_value_range (vr, VR_RANGE, val, val, vr->equiv);
3550 }
3551 else
3552 /* The result of a comparison is always true or false. */
3553 set_value_range_to_truthvalue (vr, type);
3554 }
3555
3556 /* Try to derive a nonnegative or nonzero range out of STMT relying
3557 primarily on generic routines in fold in conjunction with range data.
3558 Store the result in *VR */
3559
3560 static void
3561 extract_range_basic (value_range_t *vr, gimple stmt)
3562 {
3563 bool sop = false;
3564 tree type = gimple_expr_type (stmt);
3565
3566 if (gimple_call_builtin_p (stmt, BUILT_IN_NORMAL))
3567 {
3568 tree fndecl = gimple_call_fndecl (stmt), arg;
3569 int mini, maxi, zerov = 0, prec;
3570
3571 switch (DECL_FUNCTION_CODE (fndecl))
3572 {
3573 case BUILT_IN_CONSTANT_P:
3574 /* If the call is __builtin_constant_p and the argument is a
3575 function parameter resolve it to false. This avoids bogus
3576 array bound warnings.
3577 ??? We could do this as early as inlining is finished. */
3578 arg = gimple_call_arg (stmt, 0);
3579 if (TREE_CODE (arg) == SSA_NAME
3580 && SSA_NAME_IS_DEFAULT_DEF (arg)
3581 && TREE_CODE (SSA_NAME_VAR (arg)) == PARM_DECL)
3582 {
3583 set_value_range_to_null (vr, type);
3584 return;
3585 }
3586 break;
3587 /* Both __builtin_ffs* and __builtin_popcount return
3588 [0, prec]. */
3589 CASE_INT_FN (BUILT_IN_FFS):
3590 CASE_INT_FN (BUILT_IN_POPCOUNT):
3591 arg = gimple_call_arg (stmt, 0);
3592 prec = TYPE_PRECISION (TREE_TYPE (arg));
3593 mini = 0;
3594 maxi = prec;
3595 if (TREE_CODE (arg) == SSA_NAME)
3596 {
3597 value_range_t *vr0 = get_value_range (arg);
3598 /* If arg is non-zero, then ffs or popcount
3599 are non-zero. */
3600 if (((vr0->type == VR_RANGE
3601 && integer_nonzerop (vr0->min))
3602 || (vr0->type == VR_ANTI_RANGE
3603 && integer_zerop (vr0->min)))
3604 && !TREE_OVERFLOW (vr0->min))
3605 mini = 1;
3606 /* If some high bits are known to be zero,
3607 we can decrease the maximum. */
3608 if (vr0->type == VR_RANGE
3609 && TREE_CODE (vr0->max) == INTEGER_CST
3610 && !TREE_OVERFLOW (vr0->max))
3611 maxi = tree_floor_log2 (vr0->max) + 1;
3612 }
3613 goto bitop_builtin;
3614 /* __builtin_parity* returns [0, 1]. */
3615 CASE_INT_FN (BUILT_IN_PARITY):
3616 mini = 0;
3617 maxi = 1;
3618 goto bitop_builtin;
3619 /* __builtin_c[lt]z* return [0, prec-1], except for
3620 when the argument is 0, but that is undefined behavior.
3621 On many targets where the CLZ RTL or optab value is defined
3622 for 0 the value is prec, so include that in the range
3623 by default. */
3624 CASE_INT_FN (BUILT_IN_CLZ):
3625 arg = gimple_call_arg (stmt, 0);
3626 prec = TYPE_PRECISION (TREE_TYPE (arg));
3627 mini = 0;
3628 maxi = prec;
3629 if (optab_handler (clz_optab, TYPE_MODE (TREE_TYPE (arg)))
3630 != CODE_FOR_nothing
3631 && CLZ_DEFINED_VALUE_AT_ZERO (TYPE_MODE (TREE_TYPE (arg)),
3632 zerov)
3633 /* Handle only the single common value. */
3634 && zerov != prec)
3635 /* Magic value to give up, unless vr0 proves
3636 arg is non-zero. */
3637 mini = -2;
3638 if (TREE_CODE (arg) == SSA_NAME)
3639 {
3640 value_range_t *vr0 = get_value_range (arg);
3641 /* From clz of VR_RANGE minimum we can compute
3642 result maximum. */
3643 if (vr0->type == VR_RANGE
3644 && TREE_CODE (vr0->min) == INTEGER_CST
3645 && !TREE_OVERFLOW (vr0->min))
3646 {
3647 maxi = prec - 1 - tree_floor_log2 (vr0->min);
3648 if (maxi != prec)
3649 mini = 0;
3650 }
3651 else if (vr0->type == VR_ANTI_RANGE
3652 && integer_zerop (vr0->min)
3653 && !TREE_OVERFLOW (vr0->min))
3654 {
3655 maxi = prec - 1;
3656 mini = 0;
3657 }
3658 if (mini == -2)
3659 break;
3660 /* From clz of VR_RANGE maximum we can compute
3661 result minimum. */
3662 if (vr0->type == VR_RANGE
3663 && TREE_CODE (vr0->max) == INTEGER_CST
3664 && !TREE_OVERFLOW (vr0->max))
3665 {
3666 mini = prec - 1 - tree_floor_log2 (vr0->max);
3667 if (mini == prec)
3668 break;
3669 }
3670 }
3671 if (mini == -2)
3672 break;
3673 goto bitop_builtin;
3674 /* __builtin_ctz* return [0, prec-1], except for
3675 when the argument is 0, but that is undefined behavior.
3676 If there is a ctz optab for this mode and
3677 CTZ_DEFINED_VALUE_AT_ZERO, include that in the range,
3678 otherwise just assume 0 won't be seen. */
3679 CASE_INT_FN (BUILT_IN_CTZ):
3680 arg = gimple_call_arg (stmt, 0);
3681 prec = TYPE_PRECISION (TREE_TYPE (arg));
3682 mini = 0;
3683 maxi = prec - 1;
3684 if (optab_handler (ctz_optab, TYPE_MODE (TREE_TYPE (arg)))
3685 != CODE_FOR_nothing
3686 && CTZ_DEFINED_VALUE_AT_ZERO (TYPE_MODE (TREE_TYPE (arg)),
3687 zerov))
3688 {
3689 /* Handle only the two common values. */
3690 if (zerov == -1)
3691 mini = -1;
3692 else if (zerov == prec)
3693 maxi = prec;
3694 else
3695 /* Magic value to give up, unless vr0 proves
3696 arg is non-zero. */
3697 mini = -2;
3698 }
3699 if (TREE_CODE (arg) == SSA_NAME)
3700 {
3701 value_range_t *vr0 = get_value_range (arg);
3702 /* If arg is non-zero, then use [0, prec - 1]. */
3703 if (((vr0->type == VR_RANGE
3704 && integer_nonzerop (vr0->min))
3705 || (vr0->type == VR_ANTI_RANGE
3706 && integer_zerop (vr0->min)))
3707 && !TREE_OVERFLOW (vr0->min))
3708 {
3709 mini = 0;
3710 maxi = prec - 1;
3711 }
3712 /* If some high bits are known to be zero,
3713 we can decrease the result maximum. */
3714 if (vr0->type == VR_RANGE
3715 && TREE_CODE (vr0->max) == INTEGER_CST
3716 && !TREE_OVERFLOW (vr0->max))
3717 {
3718 maxi = tree_floor_log2 (vr0->max);
3719 /* For vr0 [0, 0] give up. */
3720 if (maxi == -1)
3721 break;
3722 }
3723 }
3724 if (mini == -2)
3725 break;
3726 goto bitop_builtin;
3727 /* __builtin_clrsb* returns [0, prec-1]. */
3728 CASE_INT_FN (BUILT_IN_CLRSB):
3729 arg = gimple_call_arg (stmt, 0);
3730 prec = TYPE_PRECISION (TREE_TYPE (arg));
3731 mini = 0;
3732 maxi = prec - 1;
3733 goto bitop_builtin;
3734 bitop_builtin:
3735 set_value_range (vr, VR_RANGE, build_int_cst (type, mini),
3736 build_int_cst (type, maxi), NULL);
3737 return;
3738 default:
3739 break;
3740 }
3741 }
3742 if (INTEGRAL_TYPE_P (type)
3743 && gimple_stmt_nonnegative_warnv_p (stmt, &sop))
3744 set_value_range_to_nonnegative (vr, type,
3745 sop || stmt_overflow_infinity (stmt));
3746 else if (vrp_stmt_computes_nonzero (stmt, &sop)
3747 && !sop)
3748 set_value_range_to_nonnull (vr, type);
3749 else
3750 set_value_range_to_varying (vr);
3751 }
3752
3753
3754 /* Try to compute a useful range out of assignment STMT and store it
3755 in *VR. */
3756
3757 static void
3758 extract_range_from_assignment (value_range_t *vr, gimple stmt)
3759 {
3760 enum tree_code code = gimple_assign_rhs_code (stmt);
3761
3762 if (code == ASSERT_EXPR)
3763 extract_range_from_assert (vr, gimple_assign_rhs1 (stmt));
3764 else if (code == SSA_NAME)
3765 extract_range_from_ssa_name (vr, gimple_assign_rhs1 (stmt));
3766 else if (TREE_CODE_CLASS (code) == tcc_binary)
3767 extract_range_from_binary_expr (vr, gimple_assign_rhs_code (stmt),
3768 gimple_expr_type (stmt),
3769 gimple_assign_rhs1 (stmt),
3770 gimple_assign_rhs2 (stmt));
3771 else if (TREE_CODE_CLASS (code) == tcc_unary)
3772 extract_range_from_unary_expr (vr, gimple_assign_rhs_code (stmt),
3773 gimple_expr_type (stmt),
3774 gimple_assign_rhs1 (stmt));
3775 else if (code == COND_EXPR)
3776 extract_range_from_cond_expr (vr, stmt);
3777 else if (TREE_CODE_CLASS (code) == tcc_comparison)
3778 extract_range_from_comparison (vr, gimple_assign_rhs_code (stmt),
3779 gimple_expr_type (stmt),
3780 gimple_assign_rhs1 (stmt),
3781 gimple_assign_rhs2 (stmt));
3782 else if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS
3783 && is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
3784 set_value_range_to_value (vr, gimple_assign_rhs1 (stmt), NULL);
3785 else
3786 set_value_range_to_varying (vr);
3787
3788 if (vr->type == VR_VARYING)
3789 extract_range_basic (vr, stmt);
3790 }
3791
3792 /* Given a range VR, a LOOP and a variable VAR, determine whether it
3793 would be profitable to adjust VR using scalar evolution information
3794 for VAR. If so, update VR with the new limits. */
3795
3796 static void
3797 adjust_range_with_scev (value_range_t *vr, struct loop *loop,
3798 gimple stmt, tree var)
3799 {
3800 tree init, step, chrec, tmin, tmax, min, max, type, tem;
3801 enum ev_direction dir;
3802
3803 /* TODO. Don't adjust anti-ranges. An anti-range may provide
3804 better opportunities than a regular range, but I'm not sure. */
3805 if (vr->type == VR_ANTI_RANGE)
3806 return;
3807
3808 chrec = instantiate_parameters (loop, analyze_scalar_evolution (loop, var));
3809
3810 /* Like in PR19590, scev can return a constant function. */
3811 if (is_gimple_min_invariant (chrec))
3812 {
3813 set_value_range_to_value (vr, chrec, vr->equiv);
3814 return;
3815 }
3816
3817 if (TREE_CODE (chrec) != POLYNOMIAL_CHREC)
3818 return;
3819
3820 init = initial_condition_in_loop_num (chrec, loop->num);
3821 tem = op_with_constant_singleton_value_range (init);
3822 if (tem)
3823 init = tem;
3824 step = evolution_part_in_loop_num (chrec, loop->num);
3825 tem = op_with_constant_singleton_value_range (step);
3826 if (tem)
3827 step = tem;
3828
3829 /* If STEP is symbolic, we can't know whether INIT will be the
3830 minimum or maximum value in the range. Also, unless INIT is
3831 a simple expression, compare_values and possibly other functions
3832 in tree-vrp won't be able to handle it. */
3833 if (step == NULL_TREE
3834 || !is_gimple_min_invariant (step)
3835 || !valid_value_p (init))
3836 return;
3837
3838 dir = scev_direction (chrec);
3839 if (/* Do not adjust ranges if we do not know whether the iv increases
3840 or decreases, ... */
3841 dir == EV_DIR_UNKNOWN
3842 /* ... or if it may wrap. */
3843 || scev_probably_wraps_p (init, step, stmt, get_chrec_loop (chrec),
3844 true))
3845 return;
3846
3847 /* We use TYPE_MIN_VALUE and TYPE_MAX_VALUE here instead of
3848 negative_overflow_infinity and positive_overflow_infinity,
3849 because we have concluded that the loop probably does not
3850 wrap. */
3851
3852 type = TREE_TYPE (var);
3853 if (POINTER_TYPE_P (type) || !TYPE_MIN_VALUE (type))
3854 tmin = lower_bound_in_type (type, type);
3855 else
3856 tmin = TYPE_MIN_VALUE (type);
3857 if (POINTER_TYPE_P (type) || !TYPE_MAX_VALUE (type))
3858 tmax = upper_bound_in_type (type, type);
3859 else
3860 tmax = TYPE_MAX_VALUE (type);
3861
3862 /* Try to use estimated number of iterations for the loop to constrain the
3863 final value in the evolution. */
3864 if (TREE_CODE (step) == INTEGER_CST
3865 && is_gimple_val (init)
3866 && (TREE_CODE (init) != SSA_NAME
3867 || get_value_range (init)->type == VR_RANGE))
3868 {
3869 double_int nit;
3870
3871 /* We are only entering here for loop header PHI nodes, so using
3872 the number of latch executions is the correct thing to use. */
3873 if (max_loop_iterations (loop, &nit))
3874 {
3875 value_range_t maxvr = VR_INITIALIZER;
3876 double_int dtmp;
3877 bool unsigned_p = TYPE_UNSIGNED (TREE_TYPE (step));
3878 bool overflow = false;
3879
3880 dtmp = tree_to_double_int (step)
3881 .mul_with_sign (nit, unsigned_p, &overflow);
3882 /* If the multiplication overflowed we can't do a meaningful
3883 adjustment. Likewise if the result doesn't fit in the type
3884 of the induction variable. For a signed type we have to
3885 check whether the result has the expected signedness which
3886 is that of the step as number of iterations is unsigned. */
3887 if (!overflow
3888 && double_int_fits_to_tree_p (TREE_TYPE (init), dtmp)
3889 && (unsigned_p
3890 || ((dtmp.high ^ TREE_INT_CST_HIGH (step)) >= 0)))
3891 {
3892 tem = double_int_to_tree (TREE_TYPE (init), dtmp);
3893 extract_range_from_binary_expr (&maxvr, PLUS_EXPR,
3894 TREE_TYPE (init), init, tem);
3895 /* Likewise if the addition did. */
3896 if (maxvr.type == VR_RANGE)
3897 {
3898 tmin = maxvr.min;
3899 tmax = maxvr.max;
3900 }
3901 }
3902 }
3903 }
3904
3905 if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED)
3906 {
3907 min = tmin;
3908 max = tmax;
3909
3910 /* For VARYING or UNDEFINED ranges, just about anything we get
3911 from scalar evolutions should be better. */
3912
3913 if (dir == EV_DIR_DECREASES)
3914 max = init;
3915 else
3916 min = init;
3917
3918 /* If we would create an invalid range, then just assume we
3919 know absolutely nothing. This may be over-conservative,
3920 but it's clearly safe, and should happen only in unreachable
3921 parts of code, or for invalid programs. */
3922 if (compare_values (min, max) == 1)
3923 return;
3924
3925 set_value_range (vr, VR_RANGE, min, max, vr->equiv);
3926 }
3927 else if (vr->type == VR_RANGE)
3928 {
3929 min = vr->min;
3930 max = vr->max;
3931
3932 if (dir == EV_DIR_DECREASES)
3933 {
3934 /* INIT is the maximum value. If INIT is lower than VR->MAX
3935 but no smaller than VR->MIN, set VR->MAX to INIT. */
3936 if (compare_values (init, max) == -1)
3937 max = init;
3938
3939 /* According to the loop information, the variable does not
3940 overflow. If we think it does, probably because of an
3941 overflow due to arithmetic on a different INF value,
3942 reset now. */
3943 if (is_negative_overflow_infinity (min)
3944 || compare_values (min, tmin) == -1)
3945 min = tmin;
3946
3947 }
3948 else
3949 {
3950 /* If INIT is bigger than VR->MIN, set VR->MIN to INIT. */
3951 if (compare_values (init, min) == 1)
3952 min = init;
3953
3954 if (is_positive_overflow_infinity (max)
3955 || compare_values (tmax, max) == -1)
3956 max = tmax;
3957 }
3958
3959 /* If we just created an invalid range with the minimum
3960 greater than the maximum, we fail conservatively.
3961 This should happen only in unreachable
3962 parts of code, or for invalid programs. */
3963 if (compare_values (min, max) == 1)
3964 return;
3965
3966 set_value_range (vr, VR_RANGE, min, max, vr->equiv);
3967 }
3968 }
3969
3970 /* Return true if VAR may overflow at STMT. This checks any available
3971 loop information to see if we can determine that VAR does not
3972 overflow. */
3973
3974 static bool
3975 vrp_var_may_overflow (tree var, gimple stmt)
3976 {
3977 struct loop *l;
3978 tree chrec, init, step;
3979
3980 if (current_loops == NULL)
3981 return true;
3982
3983 l = loop_containing_stmt (stmt);
3984 if (l == NULL
3985 || !loop_outer (l))
3986 return true;
3987
3988 chrec = instantiate_parameters (l, analyze_scalar_evolution (l, var));
3989 if (TREE_CODE (chrec) != POLYNOMIAL_CHREC)
3990 return true;
3991
3992 init = initial_condition_in_loop_num (chrec, l->num);
3993 step = evolution_part_in_loop_num (chrec, l->num);
3994
3995 if (step == NULL_TREE
3996 || !is_gimple_min_invariant (step)
3997 || !valid_value_p (init))
3998 return true;
3999
4000 /* If we get here, we know something useful about VAR based on the
4001 loop information. If it wraps, it may overflow. */
4002
4003 if (scev_probably_wraps_p (init, step, stmt, get_chrec_loop (chrec),
4004 true))
4005 return true;
4006
4007 if (dump_file && (dump_flags & TDF_DETAILS) != 0)
4008 {
4009 print_generic_expr (dump_file, var, 0);
4010 fprintf (dump_file, ": loop information indicates does not overflow\n");
4011 }
4012
4013 return false;
4014 }
4015
4016
4017 /* Given two numeric value ranges VR0, VR1 and a comparison code COMP:
4018
4019 - Return BOOLEAN_TRUE_NODE if VR0 COMP VR1 always returns true for
4020 all the values in the ranges.
4021
4022 - Return BOOLEAN_FALSE_NODE if the comparison always returns false.
4023
4024 - Return NULL_TREE if it is not always possible to determine the
4025 value of the comparison.
4026
4027 Also set *STRICT_OVERFLOW_P to indicate whether a range with an
4028 overflow infinity was used in the test. */
4029
4030
4031 static tree
4032 compare_ranges (enum tree_code comp, value_range_t *vr0, value_range_t *vr1,
4033 bool *strict_overflow_p)
4034 {
4035 /* VARYING or UNDEFINED ranges cannot be compared. */
4036 if (vr0->type == VR_VARYING
4037 || vr0->type == VR_UNDEFINED
4038 || vr1->type == VR_VARYING
4039 || vr1->type == VR_UNDEFINED)
4040 return NULL_TREE;
4041
4042 /* Anti-ranges need to be handled separately. */
4043 if (vr0->type == VR_ANTI_RANGE || vr1->type == VR_ANTI_RANGE)
4044 {
4045 /* If both are anti-ranges, then we cannot compute any
4046 comparison. */
4047 if (vr0->type == VR_ANTI_RANGE && vr1->type == VR_ANTI_RANGE)
4048 return NULL_TREE;
4049
4050 /* These comparisons are never statically computable. */
4051 if (comp == GT_EXPR
4052 || comp == GE_EXPR
4053 || comp == LT_EXPR
4054 || comp == LE_EXPR)
4055 return NULL_TREE;
4056
4057 /* Equality can be computed only between a range and an
4058 anti-range. ~[VAL1, VAL2] == [VAL1, VAL2] is always false. */
4059 if (vr0->type == VR_RANGE)
4060 {
4061 /* To simplify processing, make VR0 the anti-range. */
4062 value_range_t *tmp = vr0;
4063 vr0 = vr1;
4064 vr1 = tmp;
4065 }
4066
4067 gcc_assert (comp == NE_EXPR || comp == EQ_EXPR);
4068
4069 if (compare_values_warnv (vr0->min, vr1->min, strict_overflow_p) == 0
4070 && compare_values_warnv (vr0->max, vr1->max, strict_overflow_p) == 0)
4071 return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node;
4072
4073 return NULL_TREE;
4074 }
4075
4076 if (!usable_range_p (vr0, strict_overflow_p)
4077 || !usable_range_p (vr1, strict_overflow_p))
4078 return NULL_TREE;
4079
4080 /* Simplify processing. If COMP is GT_EXPR or GE_EXPR, switch the
4081 operands around and change the comparison code. */
4082 if (comp == GT_EXPR || comp == GE_EXPR)
4083 {
4084 value_range_t *tmp;
4085 comp = (comp == GT_EXPR) ? LT_EXPR : LE_EXPR;
4086 tmp = vr0;
4087 vr0 = vr1;
4088 vr1 = tmp;
4089 }
4090
4091 if (comp == EQ_EXPR)
4092 {
4093 /* Equality may only be computed if both ranges represent
4094 exactly one value. */
4095 if (compare_values_warnv (vr0->min, vr0->max, strict_overflow_p) == 0
4096 && compare_values_warnv (vr1->min, vr1->max, strict_overflow_p) == 0)
4097 {
4098 int cmp_min = compare_values_warnv (vr0->min, vr1->min,
4099 strict_overflow_p);
4100 int cmp_max = compare_values_warnv (vr0->max, vr1->max,
4101 strict_overflow_p);
4102 if (cmp_min == 0 && cmp_max == 0)
4103 return boolean_true_node;
4104 else if (cmp_min != -2 && cmp_max != -2)
4105 return boolean_false_node;
4106 }
4107 /* If [V0_MIN, V1_MAX] < [V1_MIN, V1_MAX] then V0 != V1. */
4108 else if (compare_values_warnv (vr0->min, vr1->max,
4109 strict_overflow_p) == 1
4110 || compare_values_warnv (vr1->min, vr0->max,
4111 strict_overflow_p) == 1)
4112 return boolean_false_node;
4113
4114 return NULL_TREE;
4115 }
4116 else if (comp == NE_EXPR)
4117 {
4118 int cmp1, cmp2;
4119
4120 /* If VR0 is completely to the left or completely to the right
4121 of VR1, they are always different. Notice that we need to
4122 make sure that both comparisons yield similar results to
4123 avoid comparing values that cannot be compared at
4124 compile-time. */
4125 cmp1 = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p);
4126 cmp2 = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p);
4127 if ((cmp1 == -1 && cmp2 == -1) || (cmp1 == 1 && cmp2 == 1))
4128 return boolean_true_node;
4129
4130 /* If VR0 and VR1 represent a single value and are identical,
4131 return false. */
4132 else if (compare_values_warnv (vr0->min, vr0->max,
4133 strict_overflow_p) == 0
4134 && compare_values_warnv (vr1->min, vr1->max,
4135 strict_overflow_p) == 0
4136 && compare_values_warnv (vr0->min, vr1->min,
4137 strict_overflow_p) == 0
4138 && compare_values_warnv (vr0->max, vr1->max,
4139 strict_overflow_p) == 0)
4140 return boolean_false_node;
4141
4142 /* Otherwise, they may or may not be different. */
4143 else
4144 return NULL_TREE;
4145 }
4146 else if (comp == LT_EXPR || comp == LE_EXPR)
4147 {
4148 int tst;
4149
4150 /* If VR0 is to the left of VR1, return true. */
4151 tst = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p);
4152 if ((comp == LT_EXPR && tst == -1)
4153 || (comp == LE_EXPR && (tst == -1 || tst == 0)))
4154 {
4155 if (overflow_infinity_range_p (vr0)
4156 || overflow_infinity_range_p (vr1))
4157 *strict_overflow_p = true;
4158 return boolean_true_node;
4159 }
4160
4161 /* If VR0 is to the right of VR1, return false. */
4162 tst = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p);
4163 if ((comp == LT_EXPR && (tst == 0 || tst == 1))
4164 || (comp == LE_EXPR && tst == 1))
4165 {
4166 if (overflow_infinity_range_p (vr0)
4167 || overflow_infinity_range_p (vr1))
4168 *strict_overflow_p = true;
4169 return boolean_false_node;
4170 }
4171
4172 /* Otherwise, we don't know. */
4173 return NULL_TREE;
4174 }
4175
4176 gcc_unreachable ();
4177 }
4178
4179
4180 /* Given a value range VR, a value VAL and a comparison code COMP, return
4181 BOOLEAN_TRUE_NODE if VR COMP VAL always returns true for all the
4182 values in VR. Return BOOLEAN_FALSE_NODE if the comparison
4183 always returns false. Return NULL_TREE if it is not always
4184 possible to determine the value of the comparison. Also set
4185 *STRICT_OVERFLOW_P to indicate whether a range with an overflow
4186 infinity was used in the test. */
4187
4188 static tree
4189 compare_range_with_value (enum tree_code comp, value_range_t *vr, tree val,
4190 bool *strict_overflow_p)
4191 {
4192 if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED)
4193 return NULL_TREE;
4194
4195 /* Anti-ranges need to be handled separately. */
4196 if (vr->type == VR_ANTI_RANGE)
4197 {
4198 /* For anti-ranges, the only predicates that we can compute at
4199 compile time are equality and inequality. */
4200 if (comp == GT_EXPR
4201 || comp == GE_EXPR
4202 || comp == LT_EXPR
4203 || comp == LE_EXPR)
4204 return NULL_TREE;
4205
4206 /* ~[VAL_1, VAL_2] OP VAL is known if VAL_1 <= VAL <= VAL_2. */
4207 if (value_inside_range (val, vr->min, vr->max) == 1)
4208 return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node;
4209
4210 return NULL_TREE;
4211 }
4212
4213 if (!usable_range_p (vr, strict_overflow_p))
4214 return NULL_TREE;
4215
4216 if (comp == EQ_EXPR)
4217 {
4218 /* EQ_EXPR may only be computed if VR represents exactly
4219 one value. */
4220 if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0)
4221 {
4222 int cmp = compare_values_warnv (vr->min, val, strict_overflow_p);
4223 if (cmp == 0)
4224 return boolean_true_node;
4225 else if (cmp == -1 || cmp == 1 || cmp == 2)
4226 return boolean_false_node;
4227 }
4228 else if (compare_values_warnv (val, vr->min, strict_overflow_p) == -1
4229 || compare_values_warnv (vr->max, val, strict_overflow_p) == -1)
4230 return boolean_false_node;
4231
4232 return NULL_TREE;
4233 }
4234 else if (comp == NE_EXPR)
4235 {
4236 /* If VAL is not inside VR, then they are always different. */
4237 if (compare_values_warnv (vr->max, val, strict_overflow_p) == -1
4238 || compare_values_warnv (vr->min, val, strict_overflow_p) == 1)
4239 return boolean_true_node;
4240
4241 /* If VR represents exactly one value equal to VAL, then return
4242 false. */
4243 if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0
4244 && compare_values_warnv (vr->min, val, strict_overflow_p) == 0)
4245 return boolean_false_node;
4246
4247 /* Otherwise, they may or may not be different. */
4248 return NULL_TREE;
4249 }
4250 else if (comp == LT_EXPR || comp == LE_EXPR)
4251 {
4252 int tst;
4253
4254 /* If VR is to the left of VAL, return true. */
4255 tst = compare_values_warnv (vr->max, val, strict_overflow_p);
4256 if ((comp == LT_EXPR && tst == -1)
4257 || (comp == LE_EXPR && (tst == -1 || tst == 0)))
4258 {
4259 if (overflow_infinity_range_p (vr))
4260 *strict_overflow_p = true;
4261 return boolean_true_node;
4262 }
4263
4264 /* If VR is to the right of VAL, return false. */
4265 tst = compare_values_warnv (vr->min, val, strict_overflow_p);
4266 if ((comp == LT_EXPR && (tst == 0 || tst == 1))
4267 || (comp == LE_EXPR && tst == 1))
4268 {
4269 if (overflow_infinity_range_p (vr))
4270 *strict_overflow_p = true;
4271 return boolean_false_node;
4272 }
4273
4274 /* Otherwise, we don't know. */
4275 return NULL_TREE;
4276 }
4277 else if (comp == GT_EXPR || comp == GE_EXPR)
4278 {
4279 int tst;
4280
4281 /* If VR is to the right of VAL, return true. */
4282 tst = compare_values_warnv (vr->min, val, strict_overflow_p);
4283 if ((comp == GT_EXPR && tst == 1)
4284 || (comp == GE_EXPR && (tst == 0 || tst == 1)))
4285 {
4286 if (overflow_infinity_range_p (vr))
4287 *strict_overflow_p = true;
4288 return boolean_true_node;
4289 }
4290
4291 /* If VR is to the left of VAL, return false. */
4292 tst = compare_values_warnv (vr->max, val, strict_overflow_p);
4293 if ((comp == GT_EXPR && (tst == -1 || tst == 0))
4294 || (comp == GE_EXPR && tst == -1))
4295 {
4296 if (overflow_infinity_range_p (vr))
4297 *strict_overflow_p = true;
4298 return boolean_false_node;
4299 }
4300
4301 /* Otherwise, we don't know. */
4302 return NULL_TREE;
4303 }
4304
4305 gcc_unreachable ();
4306 }
4307
4308
4309 /* Debugging dumps. */
4310
4311 void dump_value_range (FILE *, value_range_t *);
4312 void debug_value_range (value_range_t *);
4313 void dump_all_value_ranges (FILE *);
4314 void debug_all_value_ranges (void);
4315 void dump_vr_equiv (FILE *, bitmap);
4316 void debug_vr_equiv (bitmap);
4317
4318
4319 /* Dump value range VR to FILE. */
4320
4321 void
4322 dump_value_range (FILE *file, value_range_t *vr)
4323 {
4324 if (vr == NULL)
4325 fprintf (file, "[]");
4326 else if (vr->type == VR_UNDEFINED)
4327 fprintf (file, "UNDEFINED");
4328 else if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
4329 {
4330 tree type = TREE_TYPE (vr->min);
4331
4332 fprintf (file, "%s[", (vr->type == VR_ANTI_RANGE) ? "~" : "");
4333
4334 if (is_negative_overflow_infinity (vr->min))
4335 fprintf (file, "-INF(OVF)");
4336 else if (INTEGRAL_TYPE_P (type)
4337 && !TYPE_UNSIGNED (type)
4338 && vrp_val_is_min (vr->min))
4339 fprintf (file, "-INF");
4340 else
4341 print_generic_expr (file, vr->min, 0);
4342
4343 fprintf (file, ", ");
4344
4345 if (is_positive_overflow_infinity (vr->max))
4346 fprintf (file, "+INF(OVF)");
4347 else if (INTEGRAL_TYPE_P (type)
4348 && vrp_val_is_max (vr->max))
4349 fprintf (file, "+INF");
4350 else
4351 print_generic_expr (file, vr->max, 0);
4352
4353 fprintf (file, "]");
4354
4355 if (vr->equiv)
4356 {
4357 bitmap_iterator bi;
4358 unsigned i, c = 0;
4359
4360 fprintf (file, " EQUIVALENCES: { ");
4361
4362 EXECUTE_IF_SET_IN_BITMAP (vr->equiv, 0, i, bi)
4363 {
4364 print_generic_expr (file, ssa_name (i), 0);
4365 fprintf (file, " ");
4366 c++;
4367 }
4368
4369 fprintf (file, "} (%u elements)", c);
4370 }
4371 }
4372 else if (vr->type == VR_VARYING)
4373 fprintf (file, "VARYING");
4374 else
4375 fprintf (file, "INVALID RANGE");
4376 }
4377
4378
4379 /* Dump value range VR to stderr. */
4380
4381 DEBUG_FUNCTION void
4382 debug_value_range (value_range_t *vr)
4383 {
4384 dump_value_range (stderr, vr);
4385 fprintf (stderr, "\n");
4386 }
4387
4388
4389 /* Dump value ranges of all SSA_NAMEs to FILE. */
4390
4391 void
4392 dump_all_value_ranges (FILE *file)
4393 {
4394 size_t i;
4395
4396 for (i = 0; i < num_vr_values; i++)
4397 {
4398 if (vr_value[i])
4399 {
4400 print_generic_expr (file, ssa_name (i), 0);
4401 fprintf (file, ": ");
4402 dump_value_range (file, vr_value[i]);
4403 fprintf (file, "\n");
4404 }
4405 }
4406
4407 fprintf (file, "\n");
4408 }
4409
4410
4411 /* Dump all value ranges to stderr. */
4412
4413 DEBUG_FUNCTION void
4414 debug_all_value_ranges (void)
4415 {
4416 dump_all_value_ranges (stderr);
4417 }
4418
4419
4420 /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
4421 create a new SSA name N and return the assertion assignment
4422 'V = ASSERT_EXPR <V, V OP W>'. */
4423
4424 static gimple
4425 build_assert_expr_for (tree cond, tree v)
4426 {
4427 tree a;
4428 gimple assertion;
4429
4430 gcc_assert (TREE_CODE (v) == SSA_NAME
4431 && COMPARISON_CLASS_P (cond));
4432
4433 a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond);
4434 assertion = gimple_build_assign (NULL_TREE, a);
4435
4436 /* The new ASSERT_EXPR, creates a new SSA name that replaces the
4437 operand of the ASSERT_EXPR. Create it so the new name and the old one
4438 are registered in the replacement table so that we can fix the SSA web
4439 after adding all the ASSERT_EXPRs. */
4440 create_new_def_for (v, assertion, NULL);
4441
4442 return assertion;
4443 }
4444
4445
4446 /* Return false if EXPR is a predicate expression involving floating
4447 point values. */
4448
4449 static inline bool
4450 fp_predicate (gimple stmt)
4451 {
4452 GIMPLE_CHECK (stmt, GIMPLE_COND);
4453
4454 return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)));
4455 }
4456
4457
4458 /* If the range of values taken by OP can be inferred after STMT executes,
4459 return the comparison code (COMP_CODE_P) and value (VAL_P) that
4460 describes the inferred range. Return true if a range could be
4461 inferred. */
4462
4463 static bool
4464 infer_value_range (gimple stmt, tree op, enum tree_code *comp_code_p, tree *val_p)
4465 {
4466 *val_p = NULL_TREE;
4467 *comp_code_p = ERROR_MARK;
4468
4469 /* Do not attempt to infer anything in names that flow through
4470 abnormal edges. */
4471 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op))
4472 return false;
4473
4474 /* Similarly, don't infer anything from statements that may throw
4475 exceptions. */
4476 if (stmt_could_throw_p (stmt))
4477 return false;
4478
4479 /* If STMT is the last statement of a basic block with no
4480 successors, there is no point inferring anything about any of its
4481 operands. We would not be able to find a proper insertion point
4482 for the assertion, anyway. */
4483 if (stmt_ends_bb_p (stmt) && EDGE_COUNT (gimple_bb (stmt)->succs) == 0)
4484 return false;
4485
4486 /* We can only assume that a pointer dereference will yield
4487 non-NULL if -fdelete-null-pointer-checks is enabled. */
4488 if (flag_delete_null_pointer_checks
4489 && POINTER_TYPE_P (TREE_TYPE (op))
4490 && gimple_code (stmt) != GIMPLE_ASM)
4491 {
4492 unsigned num_uses, num_loads, num_stores;
4493
4494 count_uses_and_derefs (op, stmt, &num_uses, &num_loads, &num_stores);
4495 if (num_loads + num_stores > 0)
4496 {
4497 *val_p = build_int_cst (TREE_TYPE (op), 0);
4498 *comp_code_p = NE_EXPR;
4499 return true;
4500 }
4501 }
4502
4503 return false;
4504 }
4505
4506
4507 void dump_asserts_for (FILE *, tree);
4508 void debug_asserts_for (tree);
4509 void dump_all_asserts (FILE *);
4510 void debug_all_asserts (void);
4511
4512 /* Dump all the registered assertions for NAME to FILE. */
4513
4514 void
4515 dump_asserts_for (FILE *file, tree name)
4516 {
4517 assert_locus_t loc;
4518
4519 fprintf (file, "Assertions to be inserted for ");
4520 print_generic_expr (file, name, 0);
4521 fprintf (file, "\n");
4522
4523 loc = asserts_for[SSA_NAME_VERSION (name)];
4524 while (loc)
4525 {
4526 fprintf (file, "\t");
4527 print_gimple_stmt (file, gsi_stmt (loc->si), 0, 0);
4528 fprintf (file, "\n\tBB #%d", loc->bb->index);
4529 if (loc->e)
4530 {
4531 fprintf (file, "\n\tEDGE %d->%d", loc->e->src->index,
4532 loc->e->dest->index);
4533 dump_edge_info (file, loc->e, dump_flags, 0);
4534 }
4535 fprintf (file, "\n\tPREDICATE: ");
4536 print_generic_expr (file, name, 0);
4537 fprintf (file, " %s ", tree_code_name[(int)loc->comp_code]);
4538 print_generic_expr (file, loc->val, 0);
4539 fprintf (file, "\n\n");
4540 loc = loc->next;
4541 }
4542
4543 fprintf (file, "\n");
4544 }
4545
4546
4547 /* Dump all the registered assertions for NAME to stderr. */
4548
4549 DEBUG_FUNCTION void
4550 debug_asserts_for (tree name)
4551 {
4552 dump_asserts_for (stderr, name);
4553 }
4554
4555
4556 /* Dump all the registered assertions for all the names to FILE. */
4557
4558 void
4559 dump_all_asserts (FILE *file)
4560 {
4561 unsigned i;
4562 bitmap_iterator bi;
4563
4564 fprintf (file, "\nASSERT_EXPRs to be inserted\n\n");
4565 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
4566 dump_asserts_for (file, ssa_name (i));
4567 fprintf (file, "\n");
4568 }
4569
4570
4571 /* Dump all the registered assertions for all the names to stderr. */
4572
4573 DEBUG_FUNCTION void
4574 debug_all_asserts (void)
4575 {
4576 dump_all_asserts (stderr);
4577 }
4578
4579
4580 /* If NAME doesn't have an ASSERT_EXPR registered for asserting
4581 'EXPR COMP_CODE VAL' at a location that dominates block BB or
4582 E->DEST, then register this location as a possible insertion point
4583 for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
4584
4585 BB, E and SI provide the exact insertion point for the new
4586 ASSERT_EXPR. If BB is NULL, then the ASSERT_EXPR is to be inserted
4587 on edge E. Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
4588 BB. If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
4589 must not be NULL. */
4590
4591 static void
4592 register_new_assert_for (tree name, tree expr,
4593 enum tree_code comp_code,
4594 tree val,
4595 basic_block bb,
4596 edge e,
4597 gimple_stmt_iterator si)
4598 {
4599 assert_locus_t n, loc, last_loc;
4600 basic_block dest_bb;
4601
4602 gcc_checking_assert (bb == NULL || e == NULL);
4603
4604 if (e == NULL)
4605 gcc_checking_assert (gimple_code (gsi_stmt (si)) != GIMPLE_COND
4606 && gimple_code (gsi_stmt (si)) != GIMPLE_SWITCH);
4607
4608 /* Never build an assert comparing against an integer constant with
4609 TREE_OVERFLOW set. This confuses our undefined overflow warning
4610 machinery. */
4611 if (TREE_CODE (val) == INTEGER_CST
4612 && TREE_OVERFLOW (val))
4613 val = build_int_cst_wide (TREE_TYPE (val),
4614 TREE_INT_CST_LOW (val), TREE_INT_CST_HIGH (val));
4615
4616 /* The new assertion A will be inserted at BB or E. We need to
4617 determine if the new location is dominated by a previously
4618 registered location for A. If we are doing an edge insertion,
4619 assume that A will be inserted at E->DEST. Note that this is not
4620 necessarily true.
4621
4622 If E is a critical edge, it will be split. But even if E is
4623 split, the new block will dominate the same set of blocks that
4624 E->DEST dominates.
4625
4626 The reverse, however, is not true, blocks dominated by E->DEST
4627 will not be dominated by the new block created to split E. So,
4628 if the insertion location is on a critical edge, we will not use
4629 the new location to move another assertion previously registered
4630 at a block dominated by E->DEST. */
4631 dest_bb = (bb) ? bb : e->dest;
4632
4633 /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and
4634 VAL at a block dominating DEST_BB, then we don't need to insert a new
4635 one. Similarly, if the same assertion already exists at a block
4636 dominated by DEST_BB and the new location is not on a critical
4637 edge, then update the existing location for the assertion (i.e.,
4638 move the assertion up in the dominance tree).
4639
4640 Note, this is implemented as a simple linked list because there
4641 should not be more than a handful of assertions registered per
4642 name. If this becomes a performance problem, a table hashed by
4643 COMP_CODE and VAL could be implemented. */
4644 loc = asserts_for[SSA_NAME_VERSION (name)];
4645 last_loc = loc;
4646 while (loc)
4647 {
4648 if (loc->comp_code == comp_code
4649 && (loc->val == val
4650 || operand_equal_p (loc->val, val, 0))
4651 && (loc->expr == expr
4652 || operand_equal_p (loc->expr, expr, 0)))
4653 {
4654 /* If E is not a critical edge and DEST_BB
4655 dominates the existing location for the assertion, move
4656 the assertion up in the dominance tree by updating its
4657 location information. */
4658 if ((e == NULL || !EDGE_CRITICAL_P (e))
4659 && dominated_by_p (CDI_DOMINATORS, loc->bb, dest_bb))
4660 {
4661 loc->bb = dest_bb;
4662 loc->e = e;
4663 loc->si = si;
4664 return;
4665 }
4666 }
4667
4668 /* Update the last node of the list and move to the next one. */
4669 last_loc = loc;
4670 loc = loc->next;
4671 }
4672
4673 /* If we didn't find an assertion already registered for
4674 NAME COMP_CODE VAL, add a new one at the end of the list of
4675 assertions associated with NAME. */
4676 n = XNEW (struct assert_locus_d);
4677 n->bb = dest_bb;
4678 n->e = e;
4679 n->si = si;
4680 n->comp_code = comp_code;
4681 n->val = val;
4682 n->expr = expr;
4683 n->next = NULL;
4684
4685 if (last_loc)
4686 last_loc->next = n;
4687 else
4688 asserts_for[SSA_NAME_VERSION (name)] = n;
4689
4690 bitmap_set_bit (need_assert_for, SSA_NAME_VERSION (name));
4691 }
4692
4693 /* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME.
4694 Extract a suitable test code and value and store them into *CODE_P and
4695 *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P.
4696
4697 If no extraction was possible, return FALSE, otherwise return TRUE.
4698
4699 If INVERT is true, then we invert the result stored into *CODE_P. */
4700
4701 static bool
4702 extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code,
4703 tree cond_op0, tree cond_op1,
4704 bool invert, enum tree_code *code_p,
4705 tree *val_p)
4706 {
4707 enum tree_code comp_code;
4708 tree val;
4709
4710 /* Otherwise, we have a comparison of the form NAME COMP VAL
4711 or VAL COMP NAME. */
4712 if (name == cond_op1)
4713 {
4714 /* If the predicate is of the form VAL COMP NAME, flip
4715 COMP around because we need to register NAME as the
4716 first operand in the predicate. */
4717 comp_code = swap_tree_comparison (cond_code);
4718 val = cond_op0;
4719 }
4720 else
4721 {
4722 /* The comparison is of the form NAME COMP VAL, so the
4723 comparison code remains unchanged. */
4724 comp_code = cond_code;
4725 val = cond_op1;
4726 }
4727
4728 /* Invert the comparison code as necessary. */
4729 if (invert)
4730 comp_code = invert_tree_comparison (comp_code, 0);
4731
4732 /* VRP does not handle float types. */
4733 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (val)))
4734 return false;
4735
4736 /* Do not register always-false predicates.
4737 FIXME: this works around a limitation in fold() when dealing with
4738 enumerations. Given 'enum { N1, N2 } x;', fold will not
4739 fold 'if (x > N2)' to 'if (0)'. */
4740 if ((comp_code == GT_EXPR || comp_code == LT_EXPR)
4741 && INTEGRAL_TYPE_P (TREE_TYPE (val)))
4742 {
4743 tree min = TYPE_MIN_VALUE (TREE_TYPE (val));
4744 tree max = TYPE_MAX_VALUE (TREE_TYPE (val));
4745
4746 if (comp_code == GT_EXPR
4747 && (!max
4748 || compare_values (val, max) == 0))
4749 return false;
4750
4751 if (comp_code == LT_EXPR
4752 && (!min
4753 || compare_values (val, min) == 0))
4754 return false;
4755 }
4756 *code_p = comp_code;
4757 *val_p = val;
4758 return true;
4759 }
4760
4761 /* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any
4762 (otherwise return VAL). VAL and MASK must be zero-extended for
4763 precision PREC. If SGNBIT is non-zero, first xor VAL with SGNBIT
4764 (to transform signed values into unsigned) and at the end xor
4765 SGNBIT back. */
4766
4767 static double_int
4768 masked_increment (double_int val, double_int mask, double_int sgnbit,
4769 unsigned int prec)
4770 {
4771 double_int bit = double_int_one, res;
4772 unsigned int i;
4773
4774 val ^= sgnbit;
4775 for (i = 0; i < prec; i++, bit += bit)
4776 {
4777 res = mask;
4778 if ((res & bit).is_zero ())
4779 continue;
4780 res = bit - double_int_one;
4781 res = (val + bit).and_not (res);
4782 res &= mask;
4783 if (res.ugt (val))
4784 return res ^ sgnbit;
4785 }
4786 return val ^ sgnbit;
4787 }
4788
4789 /* Try to register an edge assertion for SSA name NAME on edge E for
4790 the condition COND contributing to the conditional jump pointed to by BSI.
4791 Invert the condition COND if INVERT is true.
4792 Return true if an assertion for NAME could be registered. */
4793
4794 static bool
4795 register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi,
4796 enum tree_code cond_code,
4797 tree cond_op0, tree cond_op1, bool invert)
4798 {
4799 tree val;
4800 enum tree_code comp_code;
4801 bool retval = false;
4802
4803 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
4804 cond_op0,
4805 cond_op1,
4806 invert, &comp_code, &val))
4807 return false;
4808
4809 /* Only register an ASSERT_EXPR if NAME was found in the sub-graph
4810 reachable from E. */
4811 if (live_on_edge (e, name)
4812 && !has_single_use (name))
4813 {
4814 register_new_assert_for (name, name, comp_code, val, NULL, e, bsi);
4815 retval = true;
4816 }
4817
4818 /* In the case of NAME <= CST and NAME being defined as
4819 NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2
4820 and NAME2 <= CST - CST2. We can do the same for NAME > CST.
4821 This catches range and anti-range tests. */
4822 if ((comp_code == LE_EXPR
4823 || comp_code == GT_EXPR)
4824 && TREE_CODE (val) == INTEGER_CST
4825 && TYPE_UNSIGNED (TREE_TYPE (val)))
4826 {
4827 gimple def_stmt = SSA_NAME_DEF_STMT (name);
4828 tree cst2 = NULL_TREE, name2 = NULL_TREE, name3 = NULL_TREE;
4829
4830 /* Extract CST2 from the (optional) addition. */
4831 if (is_gimple_assign (def_stmt)
4832 && gimple_assign_rhs_code (def_stmt) == PLUS_EXPR)
4833 {
4834 name2 = gimple_assign_rhs1 (def_stmt);
4835 cst2 = gimple_assign_rhs2 (def_stmt);
4836 if (TREE_CODE (name2) == SSA_NAME
4837 && TREE_CODE (cst2) == INTEGER_CST)
4838 def_stmt = SSA_NAME_DEF_STMT (name2);
4839 }
4840
4841 /* Extract NAME2 from the (optional) sign-changing cast. */
4842 if (gimple_assign_cast_p (def_stmt))
4843 {
4844 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt))
4845 && ! TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
4846 && (TYPE_PRECISION (gimple_expr_type (def_stmt))
4847 == TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))))
4848 name3 = gimple_assign_rhs1 (def_stmt);
4849 }
4850
4851 /* If name3 is used later, create an ASSERT_EXPR for it. */
4852 if (name3 != NULL_TREE
4853 && TREE_CODE (name3) == SSA_NAME
4854 && (cst2 == NULL_TREE
4855 || TREE_CODE (cst2) == INTEGER_CST)
4856 && INTEGRAL_TYPE_P (TREE_TYPE (name3))
4857 && live_on_edge (e, name3)
4858 && !has_single_use (name3))
4859 {
4860 tree tmp;
4861
4862 /* Build an expression for the range test. */
4863 tmp = build1 (NOP_EXPR, TREE_TYPE (name), name3);
4864 if (cst2 != NULL_TREE)
4865 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
4866
4867 if (dump_file)
4868 {
4869 fprintf (dump_file, "Adding assert for ");
4870 print_generic_expr (dump_file, name3, 0);
4871 fprintf (dump_file, " from ");
4872 print_generic_expr (dump_file, tmp, 0);
4873 fprintf (dump_file, "\n");
4874 }
4875
4876 register_new_assert_for (name3, tmp, comp_code, val, NULL, e, bsi);
4877
4878 retval = true;
4879 }
4880
4881 /* If name2 is used later, create an ASSERT_EXPR for it. */
4882 if (name2 != NULL_TREE
4883 && TREE_CODE (name2) == SSA_NAME
4884 && TREE_CODE (cst2) == INTEGER_CST
4885 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
4886 && live_on_edge (e, name2)
4887 && !has_single_use (name2))
4888 {
4889 tree tmp;
4890
4891 /* Build an expression for the range test. */
4892 tmp = name2;
4893 if (TREE_TYPE (name) != TREE_TYPE (name2))
4894 tmp = build1 (NOP_EXPR, TREE_TYPE (name), tmp);
4895 if (cst2 != NULL_TREE)
4896 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
4897
4898 if (dump_file)
4899 {
4900 fprintf (dump_file, "Adding assert for ");
4901 print_generic_expr (dump_file, name2, 0);
4902 fprintf (dump_file, " from ");
4903 print_generic_expr (dump_file, tmp, 0);
4904 fprintf (dump_file, "\n");
4905 }
4906
4907 register_new_assert_for (name2, tmp, comp_code, val, NULL, e, bsi);
4908
4909 retval = true;
4910 }
4911 }
4912
4913 /* In the case of post-in/decrement tests like if (i++) ... and uses
4914 of the in/decremented value on the edge the extra name we want to
4915 assert for is not on the def chain of the name compared. Instead
4916 it is in the set of use stmts. */
4917 if ((comp_code == NE_EXPR
4918 || comp_code == EQ_EXPR)
4919 && TREE_CODE (val) == INTEGER_CST)
4920 {
4921 imm_use_iterator ui;
4922 gimple use_stmt;
4923 FOR_EACH_IMM_USE_STMT (use_stmt, ui, name)
4924 {
4925 /* Cut off to use-stmts that are in the predecessor. */
4926 if (gimple_bb (use_stmt) != e->src)
4927 continue;
4928
4929 if (!is_gimple_assign (use_stmt))
4930 continue;
4931
4932 enum tree_code code = gimple_assign_rhs_code (use_stmt);
4933 if (code != PLUS_EXPR
4934 && code != MINUS_EXPR)
4935 continue;
4936
4937 tree cst = gimple_assign_rhs2 (use_stmt);
4938 if (TREE_CODE (cst) != INTEGER_CST)
4939 continue;
4940
4941 tree name2 = gimple_assign_lhs (use_stmt);
4942 if (live_on_edge (e, name2))
4943 {
4944 cst = int_const_binop (code, val, cst);
4945 register_new_assert_for (name2, name2, comp_code, cst,
4946 NULL, e, bsi);
4947 retval = true;
4948 }
4949 }
4950 }
4951
4952 if (TREE_CODE_CLASS (comp_code) == tcc_comparison
4953 && TREE_CODE (val) == INTEGER_CST)
4954 {
4955 gimple def_stmt = SSA_NAME_DEF_STMT (name);
4956 tree name2 = NULL_TREE, names[2], cst2 = NULL_TREE;
4957 tree val2 = NULL_TREE;
4958 double_int mask = double_int_zero;
4959 unsigned int prec = TYPE_PRECISION (TREE_TYPE (val));
4960 unsigned int nprec = prec;
4961 enum tree_code rhs_code = ERROR_MARK;
4962
4963 if (is_gimple_assign (def_stmt))
4964 rhs_code = gimple_assign_rhs_code (def_stmt);
4965
4966 /* Add asserts for NAME cmp CST and NAME being defined
4967 as NAME = (int) NAME2. */
4968 if (!TYPE_UNSIGNED (TREE_TYPE (val))
4969 && (comp_code == LE_EXPR || comp_code == LT_EXPR
4970 || comp_code == GT_EXPR || comp_code == GE_EXPR)
4971 && gimple_assign_cast_p (def_stmt))
4972 {
4973 name2 = gimple_assign_rhs1 (def_stmt);
4974 if (CONVERT_EXPR_CODE_P (rhs_code)
4975 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
4976 && TYPE_UNSIGNED (TREE_TYPE (name2))
4977 && prec == TYPE_PRECISION (TREE_TYPE (name2))
4978 && (comp_code == LE_EXPR || comp_code == GT_EXPR
4979 || !tree_int_cst_equal (val,
4980 TYPE_MIN_VALUE (TREE_TYPE (val))))
4981 && live_on_edge (e, name2)
4982 && !has_single_use (name2))
4983 {
4984 tree tmp, cst;
4985 enum tree_code new_comp_code = comp_code;
4986
4987 cst = fold_convert (TREE_TYPE (name2),
4988 TYPE_MIN_VALUE (TREE_TYPE (val)));
4989 /* Build an expression for the range test. */
4990 tmp = build2 (PLUS_EXPR, TREE_TYPE (name2), name2, cst);
4991 cst = fold_build2 (PLUS_EXPR, TREE_TYPE (name2), cst,
4992 fold_convert (TREE_TYPE (name2), val));
4993 if (comp_code == LT_EXPR || comp_code == GE_EXPR)
4994 {
4995 new_comp_code = comp_code == LT_EXPR ? LE_EXPR : GT_EXPR;
4996 cst = fold_build2 (MINUS_EXPR, TREE_TYPE (name2), cst,
4997 build_int_cst (TREE_TYPE (name2), 1));
4998 }
4999
5000 if (dump_file)
5001 {
5002 fprintf (dump_file, "Adding assert for ");
5003 print_generic_expr (dump_file, name2, 0);
5004 fprintf (dump_file, " from ");
5005 print_generic_expr (dump_file, tmp, 0);
5006 fprintf (dump_file, "\n");
5007 }
5008
5009 register_new_assert_for (name2, tmp, new_comp_code, cst, NULL,
5010 e, bsi);
5011
5012 retval = true;
5013 }
5014 }
5015
5016 /* Add asserts for NAME cmp CST and NAME being defined as
5017 NAME = NAME2 >> CST2.
5018
5019 Extract CST2 from the right shift. */
5020 if (rhs_code == RSHIFT_EXPR)
5021 {
5022 name2 = gimple_assign_rhs1 (def_stmt);
5023 cst2 = gimple_assign_rhs2 (def_stmt);
5024 if (TREE_CODE (name2) == SSA_NAME
5025 && host_integerp (cst2, 1)
5026 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
5027 && IN_RANGE (tree_low_cst (cst2, 1), 1, prec - 1)
5028 && prec <= HOST_BITS_PER_DOUBLE_INT
5029 && prec == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (val)))
5030 && live_on_edge (e, name2)
5031 && !has_single_use (name2))
5032 {
5033 mask = double_int::mask (tree_low_cst (cst2, 1));
5034 val2 = fold_binary (LSHIFT_EXPR, TREE_TYPE (val), val, cst2);
5035 }
5036 }
5037 if (val2 != NULL_TREE
5038 && TREE_CODE (val2) == INTEGER_CST
5039 && simple_cst_equal (fold_build2 (RSHIFT_EXPR,
5040 TREE_TYPE (val),
5041 val2, cst2), val))
5042 {
5043 enum tree_code new_comp_code = comp_code;
5044 tree tmp, new_val;
5045
5046 tmp = name2;
5047 if (comp_code == EQ_EXPR || comp_code == NE_EXPR)
5048 {
5049 if (!TYPE_UNSIGNED (TREE_TYPE (val)))
5050 {
5051 tree type = build_nonstandard_integer_type (prec, 1);
5052 tmp = build1 (NOP_EXPR, type, name2);
5053 val2 = fold_convert (type, val2);
5054 }
5055 tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (tmp), tmp, val2);
5056 new_val = double_int_to_tree (TREE_TYPE (tmp), mask);
5057 new_comp_code = comp_code == EQ_EXPR ? LE_EXPR : GT_EXPR;
5058 }
5059 else if (comp_code == LT_EXPR || comp_code == GE_EXPR)
5060 {
5061 double_int minval
5062 = double_int::min_value (prec, TYPE_UNSIGNED (TREE_TYPE (val)));
5063 new_val = val2;
5064 if (minval == tree_to_double_int (new_val))
5065 new_val = NULL_TREE;
5066 }
5067 else
5068 {
5069 double_int maxval
5070 = double_int::max_value (prec, TYPE_UNSIGNED (TREE_TYPE (val)));
5071 mask |= tree_to_double_int (val2);
5072 if (mask == maxval)
5073 new_val = NULL_TREE;
5074 else
5075 new_val = double_int_to_tree (TREE_TYPE (val2), mask);
5076 }
5077
5078 if (new_val)
5079 {
5080 if (dump_file)
5081 {
5082 fprintf (dump_file, "Adding assert for ");
5083 print_generic_expr (dump_file, name2, 0);
5084 fprintf (dump_file, " from ");
5085 print_generic_expr (dump_file, tmp, 0);
5086 fprintf (dump_file, "\n");
5087 }
5088
5089 register_new_assert_for (name2, tmp, new_comp_code, new_val,
5090 NULL, e, bsi);
5091 retval = true;
5092 }
5093 }
5094
5095 /* Add asserts for NAME cmp CST and NAME being defined as
5096 NAME = NAME2 & CST2.
5097
5098 Extract CST2 from the and.
5099
5100 Also handle
5101 NAME = (unsigned) NAME2;
5102 casts where NAME's type is unsigned and has smaller precision
5103 than NAME2's type as if it was NAME = NAME2 & MASK. */
5104 names[0] = NULL_TREE;
5105 names[1] = NULL_TREE;
5106 cst2 = NULL_TREE;
5107 if (rhs_code == BIT_AND_EXPR
5108 || (CONVERT_EXPR_CODE_P (rhs_code)
5109 && TREE_CODE (TREE_TYPE (val)) == INTEGER_TYPE
5110 && TYPE_UNSIGNED (TREE_TYPE (val))
5111 && TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
5112 > prec
5113 && !retval))
5114 {
5115 name2 = gimple_assign_rhs1 (def_stmt);
5116 if (rhs_code == BIT_AND_EXPR)
5117 cst2 = gimple_assign_rhs2 (def_stmt);
5118 else
5119 {
5120 cst2 = TYPE_MAX_VALUE (TREE_TYPE (val));
5121 nprec = TYPE_PRECISION (TREE_TYPE (name2));
5122 }
5123 if (TREE_CODE (name2) == SSA_NAME
5124 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
5125 && TREE_CODE (cst2) == INTEGER_CST
5126 && !integer_zerop (cst2)
5127 && nprec <= HOST_BITS_PER_DOUBLE_INT
5128 && (nprec > 1
5129 || TYPE_UNSIGNED (TREE_TYPE (val))))
5130 {
5131 gimple def_stmt2 = SSA_NAME_DEF_STMT (name2);
5132 if (gimple_assign_cast_p (def_stmt2))
5133 {
5134 names[1] = gimple_assign_rhs1 (def_stmt2);
5135 if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt2))
5136 || !INTEGRAL_TYPE_P (TREE_TYPE (names[1]))
5137 || (TYPE_PRECISION (TREE_TYPE (name2))
5138 != TYPE_PRECISION (TREE_TYPE (names[1])))
5139 || !live_on_edge (e, names[1])
5140 || has_single_use (names[1]))
5141 names[1] = NULL_TREE;
5142 }
5143 if (live_on_edge (e, name2)
5144 && !has_single_use (name2))
5145 names[0] = name2;
5146 }
5147 }
5148 if (names[0] || names[1])
5149 {
5150 double_int minv, maxv = double_int_zero, valv, cst2v;
5151 double_int tem, sgnbit;
5152 bool valid_p = false, valn = false, cst2n = false;
5153 enum tree_code ccode = comp_code;
5154
5155 valv = tree_to_double_int (val).zext (nprec);
5156 cst2v = tree_to_double_int (cst2).zext (nprec);
5157 if (!TYPE_UNSIGNED (TREE_TYPE (val)))
5158 {
5159 valn = valv.sext (nprec).is_negative ();
5160 cst2n = cst2v.sext (nprec).is_negative ();
5161 }
5162 /* If CST2 doesn't have most significant bit set,
5163 but VAL is negative, we have comparison like
5164 if ((x & 0x123) > -4) (always true). Just give up. */
5165 if (!cst2n && valn)
5166 ccode = ERROR_MARK;
5167 if (cst2n)
5168 sgnbit = double_int_one.llshift (nprec - 1, nprec).zext (nprec);
5169 else
5170 sgnbit = double_int_zero;
5171 minv = valv & cst2v;
5172 switch (ccode)
5173 {
5174 case EQ_EXPR:
5175 /* Minimum unsigned value for equality is VAL & CST2
5176 (should be equal to VAL, otherwise we probably should
5177 have folded the comparison into false) and
5178 maximum unsigned value is VAL | ~CST2. */
5179 maxv = valv | ~cst2v;
5180 maxv = maxv.zext (nprec);
5181 valid_p = true;
5182 break;
5183 case NE_EXPR:
5184 tem = valv | ~cst2v;
5185 tem = tem.zext (nprec);
5186 /* If VAL is 0, handle (X & CST2) != 0 as (X & CST2) > 0U. */
5187 if (valv.is_zero ())
5188 {
5189 cst2n = false;
5190 sgnbit = double_int_zero;
5191 goto gt_expr;
5192 }
5193 /* If (VAL | ~CST2) is all ones, handle it as
5194 (X & CST2) < VAL. */
5195 if (tem == double_int::mask (nprec))
5196 {
5197 cst2n = false;
5198 valn = false;
5199 sgnbit = double_int_zero;
5200 goto lt_expr;
5201 }
5202 if (!cst2n
5203 && cst2v.sext (nprec).is_negative ())
5204 sgnbit
5205 = double_int_one.llshift (nprec - 1, nprec).zext (nprec);
5206 if (!sgnbit.is_zero ())
5207 {
5208 if (valv == sgnbit)
5209 {
5210 cst2n = true;
5211 valn = true;
5212 goto gt_expr;
5213 }
5214 if (tem == double_int::mask (nprec - 1))
5215 {
5216 cst2n = true;
5217 goto lt_expr;
5218 }
5219 if (!cst2n)
5220 sgnbit = double_int_zero;
5221 }
5222 break;
5223 case GE_EXPR:
5224 /* Minimum unsigned value for >= if (VAL & CST2) == VAL
5225 is VAL and maximum unsigned value is ~0. For signed
5226 comparison, if CST2 doesn't have most significant bit
5227 set, handle it similarly. If CST2 has MSB set,
5228 the minimum is the same, and maximum is ~0U/2. */
5229 if (minv != valv)
5230 {
5231 /* If (VAL & CST2) != VAL, X & CST2 can't be equal to
5232 VAL. */
5233 minv = masked_increment (valv, cst2v, sgnbit, nprec);
5234 if (minv == valv)
5235 break;
5236 }
5237 maxv = double_int::mask (nprec - (cst2n ? 1 : 0));
5238 valid_p = true;
5239 break;
5240 case GT_EXPR:
5241 gt_expr:
5242 /* Find out smallest MINV where MINV > VAL
5243 && (MINV & CST2) == MINV, if any. If VAL is signed and
5244 CST2 has MSB set, compute it biased by 1 << (nprec - 1). */
5245 minv = masked_increment (valv, cst2v, sgnbit, nprec);
5246 if (minv == valv)
5247 break;
5248 maxv = double_int::mask (nprec - (cst2n ? 1 : 0));
5249 valid_p = true;
5250 break;
5251 case LE_EXPR:
5252 /* Minimum unsigned value for <= is 0 and maximum
5253 unsigned value is VAL | ~CST2 if (VAL & CST2) == VAL.
5254 Otherwise, find smallest VAL2 where VAL2 > VAL
5255 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
5256 as maximum.
5257 For signed comparison, if CST2 doesn't have most
5258 significant bit set, handle it similarly. If CST2 has
5259 MSB set, the maximum is the same and minimum is INT_MIN. */
5260 if (minv == valv)
5261 maxv = valv;
5262 else
5263 {
5264 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
5265 if (maxv == valv)
5266 break;
5267 maxv -= double_int_one;
5268 }
5269 maxv |= ~cst2v;
5270 maxv = maxv.zext (nprec);
5271 minv = sgnbit;
5272 valid_p = true;
5273 break;
5274 case LT_EXPR:
5275 lt_expr:
5276 /* Minimum unsigned value for < is 0 and maximum
5277 unsigned value is (VAL-1) | ~CST2 if (VAL & CST2) == VAL.
5278 Otherwise, find smallest VAL2 where VAL2 > VAL
5279 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
5280 as maximum.
5281 For signed comparison, if CST2 doesn't have most
5282 significant bit set, handle it similarly. If CST2 has
5283 MSB set, the maximum is the same and minimum is INT_MIN. */
5284 if (minv == valv)
5285 {
5286 if (valv == sgnbit)
5287 break;
5288 maxv = valv;
5289 }
5290 else
5291 {
5292 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
5293 if (maxv == valv)
5294 break;
5295 }
5296 maxv -= double_int_one;
5297 maxv |= ~cst2v;
5298 maxv = maxv.zext (nprec);
5299 minv = sgnbit;
5300 valid_p = true;
5301 break;
5302 default:
5303 break;
5304 }
5305 if (valid_p
5306 && (maxv - minv).zext (nprec) != double_int::mask (nprec))
5307 {
5308 tree tmp, new_val, type;
5309 int i;
5310
5311 for (i = 0; i < 2; i++)
5312 if (names[i])
5313 {
5314 double_int maxv2 = maxv;
5315 tmp = names[i];
5316 type = TREE_TYPE (names[i]);
5317 if (!TYPE_UNSIGNED (type))
5318 {
5319 type = build_nonstandard_integer_type (nprec, 1);
5320 tmp = build1 (NOP_EXPR, type, names[i]);
5321 }
5322 if (!minv.is_zero ())
5323 {
5324 tmp = build2 (PLUS_EXPR, type, tmp,
5325 double_int_to_tree (type, -minv));
5326 maxv2 = maxv - minv;
5327 }
5328 new_val = double_int_to_tree (type, maxv2);
5329
5330 if (dump_file)
5331 {
5332 fprintf (dump_file, "Adding assert for ");
5333 print_generic_expr (dump_file, names[i], 0);
5334 fprintf (dump_file, " from ");
5335 print_generic_expr (dump_file, tmp, 0);
5336 fprintf (dump_file, "\n");
5337 }
5338
5339 register_new_assert_for (names[i], tmp, LE_EXPR,
5340 new_val, NULL, e, bsi);
5341 retval = true;
5342 }
5343 }
5344 }
5345 }
5346
5347 return retval;
5348 }
5349
5350 /* OP is an operand of a truth value expression which is known to have
5351 a particular value. Register any asserts for OP and for any
5352 operands in OP's defining statement.
5353
5354 If CODE is EQ_EXPR, then we want to register OP is zero (false),
5355 if CODE is NE_EXPR, then we want to register OP is nonzero (true). */
5356
5357 static bool
5358 register_edge_assert_for_1 (tree op, enum tree_code code,
5359 edge e, gimple_stmt_iterator bsi)
5360 {
5361 bool retval = false;
5362 gimple op_def;
5363 tree val;
5364 enum tree_code rhs_code;
5365
5366 /* We only care about SSA_NAMEs. */
5367 if (TREE_CODE (op) != SSA_NAME)
5368 return false;
5369
5370 /* We know that OP will have a zero or nonzero value. If OP is used
5371 more than once go ahead and register an assert for OP.
5372
5373 The FOUND_IN_SUBGRAPH support is not helpful in this situation as
5374 it will always be set for OP (because OP is used in a COND_EXPR in
5375 the subgraph). */
5376 if (!has_single_use (op))
5377 {
5378 val = build_int_cst (TREE_TYPE (op), 0);
5379 register_new_assert_for (op, op, code, val, NULL, e, bsi);
5380 retval = true;
5381 }
5382
5383 /* Now look at how OP is set. If it's set from a comparison,
5384 a truth operation or some bit operations, then we may be able
5385 to register information about the operands of that assignment. */
5386 op_def = SSA_NAME_DEF_STMT (op);
5387 if (gimple_code (op_def) != GIMPLE_ASSIGN)
5388 return retval;
5389
5390 rhs_code = gimple_assign_rhs_code (op_def);
5391
5392 if (TREE_CODE_CLASS (rhs_code) == tcc_comparison)
5393 {
5394 bool invert = (code == EQ_EXPR ? true : false);
5395 tree op0 = gimple_assign_rhs1 (op_def);
5396 tree op1 = gimple_assign_rhs2 (op_def);
5397
5398 if (TREE_CODE (op0) == SSA_NAME)
5399 retval |= register_edge_assert_for_2 (op0, e, bsi, rhs_code, op0, op1,
5400 invert);
5401 if (TREE_CODE (op1) == SSA_NAME)
5402 retval |= register_edge_assert_for_2 (op1, e, bsi, rhs_code, op0, op1,
5403 invert);
5404 }
5405 else if ((code == NE_EXPR
5406 && gimple_assign_rhs_code (op_def) == BIT_AND_EXPR)
5407 || (code == EQ_EXPR
5408 && gimple_assign_rhs_code (op_def) == BIT_IOR_EXPR))
5409 {
5410 /* Recurse on each operand. */
5411 tree op0 = gimple_assign_rhs1 (op_def);
5412 tree op1 = gimple_assign_rhs2 (op_def);
5413 if (TREE_CODE (op0) == SSA_NAME
5414 && has_single_use (op0))
5415 retval |= register_edge_assert_for_1 (op0, code, e, bsi);
5416 if (TREE_CODE (op1) == SSA_NAME
5417 && has_single_use (op1))
5418 retval |= register_edge_assert_for_1 (op1, code, e, bsi);
5419 }
5420 else if (gimple_assign_rhs_code (op_def) == BIT_NOT_EXPR
5421 && TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (op_def))) == 1)
5422 {
5423 /* Recurse, flipping CODE. */
5424 code = invert_tree_comparison (code, false);
5425 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def),
5426 code, e, bsi);
5427 }
5428 else if (gimple_assign_rhs_code (op_def) == SSA_NAME)
5429 {
5430 /* Recurse through the copy. */
5431 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def),
5432 code, e, bsi);
5433 }
5434 else if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (op_def)))
5435 {
5436 /* Recurse through the type conversion. */
5437 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def),
5438 code, e, bsi);
5439 }
5440
5441 return retval;
5442 }
5443
5444 /* Try to register an edge assertion for SSA name NAME on edge E for
5445 the condition COND contributing to the conditional jump pointed to by SI.
5446 Return true if an assertion for NAME could be registered. */
5447
5448 static bool
5449 register_edge_assert_for (tree name, edge e, gimple_stmt_iterator si,
5450 enum tree_code cond_code, tree cond_op0,
5451 tree cond_op1)
5452 {
5453 tree val;
5454 enum tree_code comp_code;
5455 bool retval = false;
5456 bool is_else_edge = (e->flags & EDGE_FALSE_VALUE) != 0;
5457
5458 /* Do not attempt to infer anything in names that flow through
5459 abnormal edges. */
5460 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
5461 return false;
5462
5463 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
5464 cond_op0, cond_op1,
5465 is_else_edge,
5466 &comp_code, &val))
5467 return false;
5468
5469 /* Register ASSERT_EXPRs for name. */
5470 retval |= register_edge_assert_for_2 (name, e, si, cond_code, cond_op0,
5471 cond_op1, is_else_edge);
5472
5473
5474 /* If COND is effectively an equality test of an SSA_NAME against
5475 the value zero or one, then we may be able to assert values
5476 for SSA_NAMEs which flow into COND. */
5477
5478 /* In the case of NAME == 1 or NAME != 0, for BIT_AND_EXPR defining
5479 statement of NAME we can assert both operands of the BIT_AND_EXPR
5480 have nonzero value. */
5481 if (((comp_code == EQ_EXPR && integer_onep (val))
5482 || (comp_code == NE_EXPR && integer_zerop (val))))
5483 {
5484 gimple def_stmt = SSA_NAME_DEF_STMT (name);
5485
5486 if (is_gimple_assign (def_stmt)
5487 && gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR)
5488 {
5489 tree op0 = gimple_assign_rhs1 (def_stmt);
5490 tree op1 = gimple_assign_rhs2 (def_stmt);
5491 retval |= register_edge_assert_for_1 (op0, NE_EXPR, e, si);
5492 retval |= register_edge_assert_for_1 (op1, NE_EXPR, e, si);
5493 }
5494 }
5495
5496 /* In the case of NAME == 0 or NAME != 1, for BIT_IOR_EXPR defining
5497 statement of NAME we can assert both operands of the BIT_IOR_EXPR
5498 have zero value. */
5499 if (((comp_code == EQ_EXPR && integer_zerop (val))
5500 || (comp_code == NE_EXPR && integer_onep (val))))
5501 {
5502 gimple def_stmt = SSA_NAME_DEF_STMT (name);
5503
5504 /* For BIT_IOR_EXPR only if NAME == 0 both operands have
5505 necessarily zero value, or if type-precision is one. */
5506 if (is_gimple_assign (def_stmt)
5507 && (gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR
5508 && (TYPE_PRECISION (TREE_TYPE (name)) == 1
5509 || comp_code == EQ_EXPR)))
5510 {
5511 tree op0 = gimple_assign_rhs1 (def_stmt);
5512 tree op1 = gimple_assign_rhs2 (def_stmt);
5513 retval |= register_edge_assert_for_1 (op0, EQ_EXPR, e, si);
5514 retval |= register_edge_assert_for_1 (op1, EQ_EXPR, e, si);
5515 }
5516 }
5517
5518 return retval;
5519 }
5520
5521
5522 /* Determine whether the outgoing edges of BB should receive an
5523 ASSERT_EXPR for each of the operands of BB's LAST statement.
5524 The last statement of BB must be a COND_EXPR.
5525
5526 If any of the sub-graphs rooted at BB have an interesting use of
5527 the predicate operands, an assert location node is added to the
5528 list of assertions for the corresponding operands. */
5529
5530 static bool
5531 find_conditional_asserts (basic_block bb, gimple last)
5532 {
5533 bool need_assert;
5534 gimple_stmt_iterator bsi;
5535 tree op;
5536 edge_iterator ei;
5537 edge e;
5538 ssa_op_iter iter;
5539
5540 need_assert = false;
5541 bsi = gsi_for_stmt (last);
5542
5543 /* Look for uses of the operands in each of the sub-graphs
5544 rooted at BB. We need to check each of the outgoing edges
5545 separately, so that we know what kind of ASSERT_EXPR to
5546 insert. */
5547 FOR_EACH_EDGE (e, ei, bb->succs)
5548 {
5549 if (e->dest == bb)
5550 continue;
5551
5552 /* Register the necessary assertions for each operand in the
5553 conditional predicate. */
5554 FOR_EACH_SSA_TREE_OPERAND (op, last, iter, SSA_OP_USE)
5555 {
5556 need_assert |= register_edge_assert_for (op, e, bsi,
5557 gimple_cond_code (last),
5558 gimple_cond_lhs (last),
5559 gimple_cond_rhs (last));
5560 }
5561 }
5562
5563 return need_assert;
5564 }
5565
5566 struct case_info
5567 {
5568 tree expr;
5569 basic_block bb;
5570 };
5571
5572 /* Compare two case labels sorting first by the destination bb index
5573 and then by the case value. */
5574
5575 static int
5576 compare_case_labels (const void *p1, const void *p2)
5577 {
5578 const struct case_info *ci1 = (const struct case_info *) p1;
5579 const struct case_info *ci2 = (const struct case_info *) p2;
5580 int idx1 = ci1->bb->index;
5581 int idx2 = ci2->bb->index;
5582
5583 if (idx1 < idx2)
5584 return -1;
5585 else if (idx1 == idx2)
5586 {
5587 /* Make sure the default label is first in a group. */
5588 if (!CASE_LOW (ci1->expr))
5589 return -1;
5590 else if (!CASE_LOW (ci2->expr))
5591 return 1;
5592 else
5593 return tree_int_cst_compare (CASE_LOW (ci1->expr),
5594 CASE_LOW (ci2->expr));
5595 }
5596 else
5597 return 1;
5598 }
5599
5600 /* Determine whether the outgoing edges of BB should receive an
5601 ASSERT_EXPR for each of the operands of BB's LAST statement.
5602 The last statement of BB must be a SWITCH_EXPR.
5603
5604 If any of the sub-graphs rooted at BB have an interesting use of
5605 the predicate operands, an assert location node is added to the
5606 list of assertions for the corresponding operands. */
5607
5608 static bool
5609 find_switch_asserts (basic_block bb, gimple last)
5610 {
5611 bool need_assert;
5612 gimple_stmt_iterator bsi;
5613 tree op;
5614 edge e;
5615 struct case_info *ci;
5616 size_t n = gimple_switch_num_labels (last);
5617 #if GCC_VERSION >= 4000
5618 unsigned int idx;
5619 #else
5620 /* Work around GCC 3.4 bug (PR 37086). */
5621 volatile unsigned int idx;
5622 #endif
5623
5624 need_assert = false;
5625 bsi = gsi_for_stmt (last);
5626 op = gimple_switch_index (last);
5627 if (TREE_CODE (op) != SSA_NAME)
5628 return false;
5629
5630 /* Build a vector of case labels sorted by destination label. */
5631 ci = XNEWVEC (struct case_info, n);
5632 for (idx = 0; idx < n; ++idx)
5633 {
5634 ci[idx].expr = gimple_switch_label (last, idx);
5635 ci[idx].bb = label_to_block (CASE_LABEL (ci[idx].expr));
5636 }
5637 qsort (ci, n, sizeof (struct case_info), compare_case_labels);
5638
5639 for (idx = 0; idx < n; ++idx)
5640 {
5641 tree min, max;
5642 tree cl = ci[idx].expr;
5643 basic_block cbb = ci[idx].bb;
5644
5645 min = CASE_LOW (cl);
5646 max = CASE_HIGH (cl);
5647
5648 /* If there are multiple case labels with the same destination
5649 we need to combine them to a single value range for the edge. */
5650 if (idx + 1 < n && cbb == ci[idx + 1].bb)
5651 {
5652 /* Skip labels until the last of the group. */
5653 do {
5654 ++idx;
5655 } while (idx < n && cbb == ci[idx].bb);
5656 --idx;
5657
5658 /* Pick up the maximum of the case label range. */
5659 if (CASE_HIGH (ci[idx].expr))
5660 max = CASE_HIGH (ci[idx].expr);
5661 else
5662 max = CASE_LOW (ci[idx].expr);
5663 }
5664
5665 /* Nothing to do if the range includes the default label until we
5666 can register anti-ranges. */
5667 if (min == NULL_TREE)
5668 continue;
5669
5670 /* Find the edge to register the assert expr on. */
5671 e = find_edge (bb, cbb);
5672
5673 /* Register the necessary assertions for the operand in the
5674 SWITCH_EXPR. */
5675 need_assert |= register_edge_assert_for (op, e, bsi,
5676 max ? GE_EXPR : EQ_EXPR,
5677 op,
5678 fold_convert (TREE_TYPE (op),
5679 min));
5680 if (max)
5681 {
5682 need_assert |= register_edge_assert_for (op, e, bsi, LE_EXPR,
5683 op,
5684 fold_convert (TREE_TYPE (op),
5685 max));
5686 }
5687 }
5688
5689 XDELETEVEC (ci);
5690 return need_assert;
5691 }
5692
5693
5694 /* Traverse all the statements in block BB looking for statements that
5695 may generate useful assertions for the SSA names in their operand.
5696 If a statement produces a useful assertion A for name N_i, then the
5697 list of assertions already generated for N_i is scanned to
5698 determine if A is actually needed.
5699
5700 If N_i already had the assertion A at a location dominating the
5701 current location, then nothing needs to be done. Otherwise, the
5702 new location for A is recorded instead.
5703
5704 1- For every statement S in BB, all the variables used by S are
5705 added to bitmap FOUND_IN_SUBGRAPH.
5706
5707 2- If statement S uses an operand N in a way that exposes a known
5708 value range for N, then if N was not already generated by an
5709 ASSERT_EXPR, create a new assert location for N. For instance,
5710 if N is a pointer and the statement dereferences it, we can
5711 assume that N is not NULL.
5712
5713 3- COND_EXPRs are a special case of #2. We can derive range
5714 information from the predicate but need to insert different
5715 ASSERT_EXPRs for each of the sub-graphs rooted at the
5716 conditional block. If the last statement of BB is a conditional
5717 expression of the form 'X op Y', then
5718
5719 a) Remove X and Y from the set FOUND_IN_SUBGRAPH.
5720
5721 b) If the conditional is the only entry point to the sub-graph
5722 corresponding to the THEN_CLAUSE, recurse into it. On
5723 return, if X and/or Y are marked in FOUND_IN_SUBGRAPH, then
5724 an ASSERT_EXPR is added for the corresponding variable.
5725
5726 c) Repeat step (b) on the ELSE_CLAUSE.
5727
5728 d) Mark X and Y in FOUND_IN_SUBGRAPH.
5729
5730 For instance,
5731
5732 if (a == 9)
5733 b = a;
5734 else
5735 b = c + 1;
5736
5737 In this case, an assertion on the THEN clause is useful to
5738 determine that 'a' is always 9 on that edge. However, an assertion
5739 on the ELSE clause would be unnecessary.
5740
5741 4- If BB does not end in a conditional expression, then we recurse
5742 into BB's dominator children.
5743
5744 At the end of the recursive traversal, every SSA name will have a
5745 list of locations where ASSERT_EXPRs should be added. When a new
5746 location for name N is found, it is registered by calling
5747 register_new_assert_for. That function keeps track of all the
5748 registered assertions to prevent adding unnecessary assertions.
5749 For instance, if a pointer P_4 is dereferenced more than once in a
5750 dominator tree, only the location dominating all the dereference of
5751 P_4 will receive an ASSERT_EXPR.
5752
5753 If this function returns true, then it means that there are names
5754 for which we need to generate ASSERT_EXPRs. Those assertions are
5755 inserted by process_assert_insertions. */
5756
5757 static bool
5758 find_assert_locations_1 (basic_block bb, sbitmap live)
5759 {
5760 gimple_stmt_iterator si;
5761 gimple last;
5762 bool need_assert;
5763
5764 need_assert = false;
5765 last = last_stmt (bb);
5766
5767 /* If BB's last statement is a conditional statement involving integer
5768 operands, determine if we need to add ASSERT_EXPRs. */
5769 if (last
5770 && gimple_code (last) == GIMPLE_COND
5771 && !fp_predicate (last)
5772 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
5773 need_assert |= find_conditional_asserts (bb, last);
5774
5775 /* If BB's last statement is a switch statement involving integer
5776 operands, determine if we need to add ASSERT_EXPRs. */
5777 if (last
5778 && gimple_code (last) == GIMPLE_SWITCH
5779 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
5780 need_assert |= find_switch_asserts (bb, last);
5781
5782 /* Traverse all the statements in BB marking used names and looking
5783 for statements that may infer assertions for their used operands. */
5784 for (si = gsi_last_bb (bb); !gsi_end_p (si); gsi_prev (&si))
5785 {
5786 gimple stmt;
5787 tree op;
5788 ssa_op_iter i;
5789
5790 stmt = gsi_stmt (si);
5791
5792 if (is_gimple_debug (stmt))
5793 continue;
5794
5795 /* See if we can derive an assertion for any of STMT's operands. */
5796 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
5797 {
5798 tree value;
5799 enum tree_code comp_code;
5800
5801 /* If op is not live beyond this stmt, do not bother to insert
5802 asserts for it. */
5803 if (!bitmap_bit_p (live, SSA_NAME_VERSION (op)))
5804 continue;
5805
5806 /* If OP is used in such a way that we can infer a value
5807 range for it, and we don't find a previous assertion for
5808 it, create a new assertion location node for OP. */
5809 if (infer_value_range (stmt, op, &comp_code, &value))
5810 {
5811 /* If we are able to infer a nonzero value range for OP,
5812 then walk backwards through the use-def chain to see if OP
5813 was set via a typecast.
5814
5815 If so, then we can also infer a nonzero value range
5816 for the operand of the NOP_EXPR. */
5817 if (comp_code == NE_EXPR && integer_zerop (value))
5818 {
5819 tree t = op;
5820 gimple def_stmt = SSA_NAME_DEF_STMT (t);
5821
5822 while (is_gimple_assign (def_stmt)
5823 && gimple_assign_rhs_code (def_stmt) == NOP_EXPR
5824 && TREE_CODE
5825 (gimple_assign_rhs1 (def_stmt)) == SSA_NAME
5826 && POINTER_TYPE_P
5827 (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))
5828 {
5829 t = gimple_assign_rhs1 (def_stmt);
5830 def_stmt = SSA_NAME_DEF_STMT (t);
5831
5832 /* Note we want to register the assert for the
5833 operand of the NOP_EXPR after SI, not after the
5834 conversion. */
5835 if (! has_single_use (t))
5836 {
5837 register_new_assert_for (t, t, comp_code, value,
5838 bb, NULL, si);
5839 need_assert = true;
5840 }
5841 }
5842 }
5843
5844 register_new_assert_for (op, op, comp_code, value, bb, NULL, si);
5845 need_assert = true;
5846 }
5847 }
5848
5849 /* Update live. */
5850 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
5851 bitmap_set_bit (live, SSA_NAME_VERSION (op));
5852 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_DEF)
5853 bitmap_clear_bit (live, SSA_NAME_VERSION (op));
5854 }
5855
5856 /* Traverse all PHI nodes in BB, updating live. */
5857 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
5858 {
5859 use_operand_p arg_p;
5860 ssa_op_iter i;
5861 gimple phi = gsi_stmt (si);
5862 tree res = gimple_phi_result (phi);
5863
5864 if (virtual_operand_p (res))
5865 continue;
5866
5867 FOR_EACH_PHI_ARG (arg_p, phi, i, SSA_OP_USE)
5868 {
5869 tree arg = USE_FROM_PTR (arg_p);
5870 if (TREE_CODE (arg) == SSA_NAME)
5871 bitmap_set_bit (live, SSA_NAME_VERSION (arg));
5872 }
5873
5874 bitmap_clear_bit (live, SSA_NAME_VERSION (res));
5875 }
5876
5877 return need_assert;
5878 }
5879
5880 /* Do an RPO walk over the function computing SSA name liveness
5881 on-the-fly and deciding on assert expressions to insert.
5882 Returns true if there are assert expressions to be inserted. */
5883
5884 static bool
5885 find_assert_locations (void)
5886 {
5887 int *rpo = XNEWVEC (int, last_basic_block);
5888 int *bb_rpo = XNEWVEC (int, last_basic_block);
5889 int *last_rpo = XCNEWVEC (int, last_basic_block);
5890 int rpo_cnt, i;
5891 bool need_asserts;
5892
5893 live = XCNEWVEC (sbitmap, last_basic_block);
5894 rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
5895 for (i = 0; i < rpo_cnt; ++i)
5896 bb_rpo[rpo[i]] = i;
5897
5898 need_asserts = false;
5899 for (i = rpo_cnt - 1; i >= 0; --i)
5900 {
5901 basic_block bb = BASIC_BLOCK (rpo[i]);
5902 edge e;
5903 edge_iterator ei;
5904
5905 if (!live[rpo[i]])
5906 {
5907 live[rpo[i]] = sbitmap_alloc (num_ssa_names);
5908 bitmap_clear (live[rpo[i]]);
5909 }
5910
5911 /* Process BB and update the live information with uses in
5912 this block. */
5913 need_asserts |= find_assert_locations_1 (bb, live[rpo[i]]);
5914
5915 /* Merge liveness into the predecessor blocks and free it. */
5916 if (!bitmap_empty_p (live[rpo[i]]))
5917 {
5918 int pred_rpo = i;
5919 FOR_EACH_EDGE (e, ei, bb->preds)
5920 {
5921 int pred = e->src->index;
5922 if ((e->flags & EDGE_DFS_BACK) || pred == ENTRY_BLOCK)
5923 continue;
5924
5925 if (!live[pred])
5926 {
5927 live[pred] = sbitmap_alloc (num_ssa_names);
5928 bitmap_clear (live[pred]);
5929 }
5930 bitmap_ior (live[pred], live[pred], live[rpo[i]]);
5931
5932 if (bb_rpo[pred] < pred_rpo)
5933 pred_rpo = bb_rpo[pred];
5934 }
5935
5936 /* Record the RPO number of the last visited block that needs
5937 live information from this block. */
5938 last_rpo[rpo[i]] = pred_rpo;
5939 }
5940 else
5941 {
5942 sbitmap_free (live[rpo[i]]);
5943 live[rpo[i]] = NULL;
5944 }
5945
5946 /* We can free all successors live bitmaps if all their
5947 predecessors have been visited already. */
5948 FOR_EACH_EDGE (e, ei, bb->succs)
5949 if (last_rpo[e->dest->index] == i
5950 && live[e->dest->index])
5951 {
5952 sbitmap_free (live[e->dest->index]);
5953 live[e->dest->index] = NULL;
5954 }
5955 }
5956
5957 XDELETEVEC (rpo);
5958 XDELETEVEC (bb_rpo);
5959 XDELETEVEC (last_rpo);
5960 for (i = 0; i < last_basic_block; ++i)
5961 if (live[i])
5962 sbitmap_free (live[i]);
5963 XDELETEVEC (live);
5964
5965 return need_asserts;
5966 }
5967
5968 /* Create an ASSERT_EXPR for NAME and insert it in the location
5969 indicated by LOC. Return true if we made any edge insertions. */
5970
5971 static bool
5972 process_assert_insertions_for (tree name, assert_locus_t loc)
5973 {
5974 /* Build the comparison expression NAME_i COMP_CODE VAL. */
5975 gimple stmt;
5976 tree cond;
5977 gimple assert_stmt;
5978 edge_iterator ei;
5979 edge e;
5980
5981 /* If we have X <=> X do not insert an assert expr for that. */
5982 if (loc->expr == loc->val)
5983 return false;
5984
5985 cond = build2 (loc->comp_code, boolean_type_node, loc->expr, loc->val);
5986 assert_stmt = build_assert_expr_for (cond, name);
5987 if (loc->e)
5988 {
5989 /* We have been asked to insert the assertion on an edge. This
5990 is used only by COND_EXPR and SWITCH_EXPR assertions. */
5991 gcc_checking_assert (gimple_code (gsi_stmt (loc->si)) == GIMPLE_COND
5992 || (gimple_code (gsi_stmt (loc->si))
5993 == GIMPLE_SWITCH));
5994
5995 gsi_insert_on_edge (loc->e, assert_stmt);
5996 return true;
5997 }
5998
5999 /* Otherwise, we can insert right after LOC->SI iff the
6000 statement must not be the last statement in the block. */
6001 stmt = gsi_stmt (loc->si);
6002 if (!stmt_ends_bb_p (stmt))
6003 {
6004 gsi_insert_after (&loc->si, assert_stmt, GSI_SAME_STMT);
6005 return false;
6006 }
6007
6008 /* If STMT must be the last statement in BB, we can only insert new
6009 assertions on the non-abnormal edge out of BB. Note that since
6010 STMT is not control flow, there may only be one non-abnormal edge
6011 out of BB. */
6012 FOR_EACH_EDGE (e, ei, loc->bb->succs)
6013 if (!(e->flags & EDGE_ABNORMAL))
6014 {
6015 gsi_insert_on_edge (e, assert_stmt);
6016 return true;
6017 }
6018
6019 gcc_unreachable ();
6020 }
6021
6022
6023 /* Process all the insertions registered for every name N_i registered
6024 in NEED_ASSERT_FOR. The list of assertions to be inserted are
6025 found in ASSERTS_FOR[i]. */
6026
6027 static void
6028 process_assert_insertions (void)
6029 {
6030 unsigned i;
6031 bitmap_iterator bi;
6032 bool update_edges_p = false;
6033 int num_asserts = 0;
6034
6035 if (dump_file && (dump_flags & TDF_DETAILS))
6036 dump_all_asserts (dump_file);
6037
6038 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
6039 {
6040 assert_locus_t loc = asserts_for[i];
6041 gcc_assert (loc);
6042
6043 while (loc)
6044 {
6045 assert_locus_t next = loc->next;
6046 update_edges_p |= process_assert_insertions_for (ssa_name (i), loc);
6047 free (loc);
6048 loc = next;
6049 num_asserts++;
6050 }
6051 }
6052
6053 if (update_edges_p)
6054 gsi_commit_edge_inserts ();
6055
6056 statistics_counter_event (cfun, "Number of ASSERT_EXPR expressions inserted",
6057 num_asserts);
6058 }
6059
6060
6061 /* Traverse the flowgraph looking for conditional jumps to insert range
6062 expressions. These range expressions are meant to provide information
6063 to optimizations that need to reason in terms of value ranges. They
6064 will not be expanded into RTL. For instance, given:
6065
6066 x = ...
6067 y = ...
6068 if (x < y)
6069 y = x - 2;
6070 else
6071 x = y + 3;
6072
6073 this pass will transform the code into:
6074
6075 x = ...
6076 y = ...
6077 if (x < y)
6078 {
6079 x = ASSERT_EXPR <x, x < y>
6080 y = x - 2
6081 }
6082 else
6083 {
6084 y = ASSERT_EXPR <y, x <= y>
6085 x = y + 3
6086 }
6087
6088 The idea is that once copy and constant propagation have run, other
6089 optimizations will be able to determine what ranges of values can 'x'
6090 take in different paths of the code, simply by checking the reaching
6091 definition of 'x'. */
6092
6093 static void
6094 insert_range_assertions (void)
6095 {
6096 need_assert_for = BITMAP_ALLOC (NULL);
6097 asserts_for = XCNEWVEC (assert_locus_t, num_ssa_names);
6098
6099 calculate_dominance_info (CDI_DOMINATORS);
6100
6101 if (find_assert_locations ())
6102 {
6103 process_assert_insertions ();
6104 update_ssa (TODO_update_ssa_no_phi);
6105 }
6106
6107 if (dump_file && (dump_flags & TDF_DETAILS))
6108 {
6109 fprintf (dump_file, "\nSSA form after inserting ASSERT_EXPRs\n");
6110 dump_function_to_file (current_function_decl, dump_file, dump_flags);
6111 }
6112
6113 free (asserts_for);
6114 BITMAP_FREE (need_assert_for);
6115 }
6116
6117 /* Checks one ARRAY_REF in REF, located at LOCUS. Ignores flexible arrays
6118 and "struct" hacks. If VRP can determine that the
6119 array subscript is a constant, check if it is outside valid
6120 range. If the array subscript is a RANGE, warn if it is
6121 non-overlapping with valid range.
6122 IGNORE_OFF_BY_ONE is true if the ARRAY_REF is inside a ADDR_EXPR. */
6123
6124 static void
6125 check_array_ref (location_t location, tree ref, bool ignore_off_by_one)
6126 {
6127 value_range_t* vr = NULL;
6128 tree low_sub, up_sub;
6129 tree low_bound, up_bound, up_bound_p1;
6130 tree base;
6131
6132 if (TREE_NO_WARNING (ref))
6133 return;
6134
6135 low_sub = up_sub = TREE_OPERAND (ref, 1);
6136 up_bound = array_ref_up_bound (ref);
6137
6138 /* Can not check flexible arrays. */
6139 if (!up_bound
6140 || TREE_CODE (up_bound) != INTEGER_CST)
6141 return;
6142
6143 /* Accesses to trailing arrays via pointers may access storage
6144 beyond the types array bounds. */
6145 base = get_base_address (ref);
6146 if (base && TREE_CODE (base) == MEM_REF)
6147 {
6148 tree cref, next = NULL_TREE;
6149
6150 if (TREE_CODE (TREE_OPERAND (ref, 0)) != COMPONENT_REF)
6151 return;
6152
6153 cref = TREE_OPERAND (ref, 0);
6154 if (TREE_CODE (TREE_TYPE (TREE_OPERAND (cref, 0))) == RECORD_TYPE)
6155 for (next = DECL_CHAIN (TREE_OPERAND (cref, 1));
6156 next && TREE_CODE (next) != FIELD_DECL;
6157 next = DECL_CHAIN (next))
6158 ;
6159
6160 /* If this is the last field in a struct type or a field in a
6161 union type do not warn. */
6162 if (!next)
6163 return;
6164 }
6165
6166 low_bound = array_ref_low_bound (ref);
6167 up_bound_p1 = int_const_binop (PLUS_EXPR, up_bound, integer_one_node);
6168
6169 if (TREE_CODE (low_sub) == SSA_NAME)
6170 {
6171 vr = get_value_range (low_sub);
6172 if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
6173 {
6174 low_sub = vr->type == VR_RANGE ? vr->max : vr->min;
6175 up_sub = vr->type == VR_RANGE ? vr->min : vr->max;
6176 }
6177 }
6178
6179 if (vr && vr->type == VR_ANTI_RANGE)
6180 {
6181 if (TREE_CODE (up_sub) == INTEGER_CST
6182 && tree_int_cst_lt (up_bound, up_sub)
6183 && TREE_CODE (low_sub) == INTEGER_CST
6184 && tree_int_cst_lt (low_sub, low_bound))
6185 {
6186 warning_at (location, OPT_Warray_bounds,
6187 "array subscript is outside array bounds");
6188 TREE_NO_WARNING (ref) = 1;
6189 }
6190 }
6191 else if (TREE_CODE (up_sub) == INTEGER_CST
6192 && (ignore_off_by_one
6193 ? (tree_int_cst_lt (up_bound, up_sub)
6194 && !tree_int_cst_equal (up_bound_p1, up_sub))
6195 : (tree_int_cst_lt (up_bound, up_sub)
6196 || tree_int_cst_equal (up_bound_p1, up_sub))))
6197 {
6198 if (dump_file && (dump_flags & TDF_DETAILS))
6199 {
6200 fprintf (dump_file, "Array bound warning for ");
6201 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
6202 fprintf (dump_file, "\n");
6203 }
6204 warning_at (location, OPT_Warray_bounds,
6205 "array subscript is above array bounds");
6206 TREE_NO_WARNING (ref) = 1;
6207 }
6208 else if (TREE_CODE (low_sub) == INTEGER_CST
6209 && tree_int_cst_lt (low_sub, low_bound))
6210 {
6211 if (dump_file && (dump_flags & TDF_DETAILS))
6212 {
6213 fprintf (dump_file, "Array bound warning for ");
6214 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
6215 fprintf (dump_file, "\n");
6216 }
6217 warning_at (location, OPT_Warray_bounds,
6218 "array subscript is below array bounds");
6219 TREE_NO_WARNING (ref) = 1;
6220 }
6221 }
6222
6223 /* Searches if the expr T, located at LOCATION computes
6224 address of an ARRAY_REF, and call check_array_ref on it. */
6225
6226 static void
6227 search_for_addr_array (tree t, location_t location)
6228 {
6229 while (TREE_CODE (t) == SSA_NAME)
6230 {
6231 gimple g = SSA_NAME_DEF_STMT (t);
6232
6233 if (gimple_code (g) != GIMPLE_ASSIGN)
6234 return;
6235
6236 if (get_gimple_rhs_class (gimple_assign_rhs_code (g))
6237 != GIMPLE_SINGLE_RHS)
6238 return;
6239
6240 t = gimple_assign_rhs1 (g);
6241 }
6242
6243
6244 /* We are only interested in addresses of ARRAY_REF's. */
6245 if (TREE_CODE (t) != ADDR_EXPR)
6246 return;
6247
6248 /* Check each ARRAY_REFs in the reference chain. */
6249 do
6250 {
6251 if (TREE_CODE (t) == ARRAY_REF)
6252 check_array_ref (location, t, true /*ignore_off_by_one*/);
6253
6254 t = TREE_OPERAND (t, 0);
6255 }
6256 while (handled_component_p (t));
6257
6258 if (TREE_CODE (t) == MEM_REF
6259 && TREE_CODE (TREE_OPERAND (t, 0)) == ADDR_EXPR
6260 && !TREE_NO_WARNING (t))
6261 {
6262 tree tem = TREE_OPERAND (TREE_OPERAND (t, 0), 0);
6263 tree low_bound, up_bound, el_sz;
6264 double_int idx;
6265 if (TREE_CODE (TREE_TYPE (tem)) != ARRAY_TYPE
6266 || TREE_CODE (TREE_TYPE (TREE_TYPE (tem))) == ARRAY_TYPE
6267 || !TYPE_DOMAIN (TREE_TYPE (tem)))
6268 return;
6269
6270 low_bound = TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
6271 up_bound = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
6272 el_sz = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (tem)));
6273 if (!low_bound
6274 || TREE_CODE (low_bound) != INTEGER_CST
6275 || !up_bound
6276 || TREE_CODE (up_bound) != INTEGER_CST
6277 || !el_sz
6278 || TREE_CODE (el_sz) != INTEGER_CST)
6279 return;
6280
6281 idx = mem_ref_offset (t);
6282 idx = idx.sdiv (tree_to_double_int (el_sz), TRUNC_DIV_EXPR);
6283 if (idx.slt (double_int_zero))
6284 {
6285 if (dump_file && (dump_flags & TDF_DETAILS))
6286 {
6287 fprintf (dump_file, "Array bound warning for ");
6288 dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
6289 fprintf (dump_file, "\n");
6290 }
6291 warning_at (location, OPT_Warray_bounds,
6292 "array subscript is below array bounds");
6293 TREE_NO_WARNING (t) = 1;
6294 }
6295 else if (idx.sgt (tree_to_double_int (up_bound)
6296 - tree_to_double_int (low_bound)
6297 + double_int_one))
6298 {
6299 if (dump_file && (dump_flags & TDF_DETAILS))
6300 {
6301 fprintf (dump_file, "Array bound warning for ");
6302 dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
6303 fprintf (dump_file, "\n");
6304 }
6305 warning_at (location, OPT_Warray_bounds,
6306 "array subscript is above array bounds");
6307 TREE_NO_WARNING (t) = 1;
6308 }
6309 }
6310 }
6311
6312 /* walk_tree() callback that checks if *TP is
6313 an ARRAY_REF inside an ADDR_EXPR (in which an array
6314 subscript one outside the valid range is allowed). Call
6315 check_array_ref for each ARRAY_REF found. The location is
6316 passed in DATA. */
6317
6318 static tree
6319 check_array_bounds (tree *tp, int *walk_subtree, void *data)
6320 {
6321 tree t = *tp;
6322 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
6323 location_t location;
6324
6325 if (EXPR_HAS_LOCATION (t))
6326 location = EXPR_LOCATION (t);
6327 else
6328 {
6329 location_t *locp = (location_t *) wi->info;
6330 location = *locp;
6331 }
6332
6333 *walk_subtree = TRUE;
6334
6335 if (TREE_CODE (t) == ARRAY_REF)
6336 check_array_ref (location, t, false /*ignore_off_by_one*/);
6337
6338 if (TREE_CODE (t) == MEM_REF
6339 || (TREE_CODE (t) == RETURN_EXPR && TREE_OPERAND (t, 0)))
6340 search_for_addr_array (TREE_OPERAND (t, 0), location);
6341
6342 if (TREE_CODE (t) == ADDR_EXPR)
6343 *walk_subtree = FALSE;
6344
6345 return NULL_TREE;
6346 }
6347
6348 /* Walk over all statements of all reachable BBs and call check_array_bounds
6349 on them. */
6350
6351 static void
6352 check_all_array_refs (void)
6353 {
6354 basic_block bb;
6355 gimple_stmt_iterator si;
6356
6357 FOR_EACH_BB (bb)
6358 {
6359 edge_iterator ei;
6360 edge e;
6361 bool executable = false;
6362
6363 /* Skip blocks that were found to be unreachable. */
6364 FOR_EACH_EDGE (e, ei, bb->preds)
6365 executable |= !!(e->flags & EDGE_EXECUTABLE);
6366 if (!executable)
6367 continue;
6368
6369 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
6370 {
6371 gimple stmt = gsi_stmt (si);
6372 struct walk_stmt_info wi;
6373 if (!gimple_has_location (stmt))
6374 continue;
6375
6376 if (is_gimple_call (stmt))
6377 {
6378 size_t i;
6379 size_t n = gimple_call_num_args (stmt);
6380 for (i = 0; i < n; i++)
6381 {
6382 tree arg = gimple_call_arg (stmt, i);
6383 search_for_addr_array (arg, gimple_location (stmt));
6384 }
6385 }
6386 else
6387 {
6388 memset (&wi, 0, sizeof (wi));
6389 wi.info = CONST_CAST (void *, (const void *)
6390 gimple_location_ptr (stmt));
6391
6392 walk_gimple_op (gsi_stmt (si),
6393 check_array_bounds,
6394 &wi);
6395 }
6396 }
6397 }
6398 }
6399
6400 /* Convert range assertion expressions into the implied copies and
6401 copy propagate away the copies. Doing the trivial copy propagation
6402 here avoids the need to run the full copy propagation pass after
6403 VRP.
6404
6405 FIXME, this will eventually lead to copy propagation removing the
6406 names that had useful range information attached to them. For
6407 instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>,
6408 then N_i will have the range [3, +INF].
6409
6410 However, by converting the assertion into the implied copy
6411 operation N_i = N_j, we will then copy-propagate N_j into the uses
6412 of N_i and lose the range information. We may want to hold on to
6413 ASSERT_EXPRs a little while longer as the ranges could be used in
6414 things like jump threading.
6415
6416 The problem with keeping ASSERT_EXPRs around is that passes after
6417 VRP need to handle them appropriately.
6418
6419 Another approach would be to make the range information a first
6420 class property of the SSA_NAME so that it can be queried from
6421 any pass. This is made somewhat more complex by the need for
6422 multiple ranges to be associated with one SSA_NAME. */
6423
6424 static void
6425 remove_range_assertions (void)
6426 {
6427 basic_block bb;
6428 gimple_stmt_iterator si;
6429
6430 /* Note that the BSI iterator bump happens at the bottom of the
6431 loop and no bump is necessary if we're removing the statement
6432 referenced by the current BSI. */
6433 FOR_EACH_BB (bb)
6434 for (si = gsi_start_bb (bb); !gsi_end_p (si);)
6435 {
6436 gimple stmt = gsi_stmt (si);
6437 gimple use_stmt;
6438
6439 if (is_gimple_assign (stmt)
6440 && gimple_assign_rhs_code (stmt) == ASSERT_EXPR)
6441 {
6442 tree rhs = gimple_assign_rhs1 (stmt);
6443 tree var;
6444 tree cond = fold (ASSERT_EXPR_COND (rhs));
6445 use_operand_p use_p;
6446 imm_use_iterator iter;
6447
6448 gcc_assert (cond != boolean_false_node);
6449
6450 /* Propagate the RHS into every use of the LHS. */
6451 var = ASSERT_EXPR_VAR (rhs);
6452 FOR_EACH_IMM_USE_STMT (use_stmt, iter,
6453 gimple_assign_lhs (stmt))
6454 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
6455 {
6456 SET_USE (use_p, var);
6457 gcc_assert (TREE_CODE (var) == SSA_NAME);
6458 }
6459
6460 /* And finally, remove the copy, it is not needed. */
6461 gsi_remove (&si, true);
6462 release_defs (stmt);
6463 }
6464 else
6465 gsi_next (&si);
6466 }
6467 }
6468
6469
6470 /* Return true if STMT is interesting for VRP. */
6471
6472 static bool
6473 stmt_interesting_for_vrp (gimple stmt)
6474 {
6475 if (gimple_code (stmt) == GIMPLE_PHI)
6476 {
6477 tree res = gimple_phi_result (stmt);
6478 return (!virtual_operand_p (res)
6479 && (INTEGRAL_TYPE_P (TREE_TYPE (res))
6480 || POINTER_TYPE_P (TREE_TYPE (res))));
6481 }
6482 else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
6483 {
6484 tree lhs = gimple_get_lhs (stmt);
6485
6486 /* In general, assignments with virtual operands are not useful
6487 for deriving ranges, with the obvious exception of calls to
6488 builtin functions. */
6489 if (lhs && TREE_CODE (lhs) == SSA_NAME
6490 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
6491 || POINTER_TYPE_P (TREE_TYPE (lhs)))
6492 && ((is_gimple_call (stmt)
6493 && gimple_call_fndecl (stmt) != NULL_TREE
6494 && DECL_BUILT_IN (gimple_call_fndecl (stmt)))
6495 || !gimple_vuse (stmt)))
6496 return true;
6497 }
6498 else if (gimple_code (stmt) == GIMPLE_COND
6499 || gimple_code (stmt) == GIMPLE_SWITCH)
6500 return true;
6501
6502 return false;
6503 }
6504
6505
6506 /* Initialize local data structures for VRP. */
6507
6508 static void
6509 vrp_initialize (void)
6510 {
6511 basic_block bb;
6512
6513 values_propagated = false;
6514 num_vr_values = num_ssa_names;
6515 vr_value = XCNEWVEC (value_range_t *, num_vr_values);
6516 vr_phi_edge_counts = XCNEWVEC (int, num_ssa_names);
6517
6518 FOR_EACH_BB (bb)
6519 {
6520 gimple_stmt_iterator si;
6521
6522 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
6523 {
6524 gimple phi = gsi_stmt (si);
6525 if (!stmt_interesting_for_vrp (phi))
6526 {
6527 tree lhs = PHI_RESULT (phi);
6528 set_value_range_to_varying (get_value_range (lhs));
6529 prop_set_simulate_again (phi, false);
6530 }
6531 else
6532 prop_set_simulate_again (phi, true);
6533 }
6534
6535 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
6536 {
6537 gimple stmt = gsi_stmt (si);
6538
6539 /* If the statement is a control insn, then we do not
6540 want to avoid simulating the statement once. Failure
6541 to do so means that those edges will never get added. */
6542 if (stmt_ends_bb_p (stmt))
6543 prop_set_simulate_again (stmt, true);
6544 else if (!stmt_interesting_for_vrp (stmt))
6545 {
6546 ssa_op_iter i;
6547 tree def;
6548 FOR_EACH_SSA_TREE_OPERAND (def, stmt, i, SSA_OP_DEF)
6549 set_value_range_to_varying (get_value_range (def));
6550 prop_set_simulate_again (stmt, false);
6551 }
6552 else
6553 prop_set_simulate_again (stmt, true);
6554 }
6555 }
6556 }
6557
6558 /* Return the singleton value-range for NAME or NAME. */
6559
6560 static inline tree
6561 vrp_valueize (tree name)
6562 {
6563 if (TREE_CODE (name) == SSA_NAME)
6564 {
6565 value_range_t *vr = get_value_range (name);
6566 if (vr->type == VR_RANGE
6567 && (vr->min == vr->max
6568 || operand_equal_p (vr->min, vr->max, 0)))
6569 return vr->min;
6570 }
6571 return name;
6572 }
6573
6574 /* Visit assignment STMT. If it produces an interesting range, record
6575 the SSA name in *OUTPUT_P. */
6576
6577 static enum ssa_prop_result
6578 vrp_visit_assignment_or_call (gimple stmt, tree *output_p)
6579 {
6580 tree def, lhs;
6581 ssa_op_iter iter;
6582 enum gimple_code code = gimple_code (stmt);
6583 lhs = gimple_get_lhs (stmt);
6584
6585 /* We only keep track of ranges in integral and pointer types. */
6586 if (TREE_CODE (lhs) == SSA_NAME
6587 && ((INTEGRAL_TYPE_P (TREE_TYPE (lhs))
6588 /* It is valid to have NULL MIN/MAX values on a type. See
6589 build_range_type. */
6590 && TYPE_MIN_VALUE (TREE_TYPE (lhs))
6591 && TYPE_MAX_VALUE (TREE_TYPE (lhs)))
6592 || POINTER_TYPE_P (TREE_TYPE (lhs))))
6593 {
6594 value_range_t new_vr = VR_INITIALIZER;
6595
6596 /* Try folding the statement to a constant first. */
6597 tree tem = gimple_fold_stmt_to_constant (stmt, vrp_valueize);
6598 if (tem && !is_overflow_infinity (tem))
6599 set_value_range (&new_vr, VR_RANGE, tem, tem, NULL);
6600 /* Then dispatch to value-range extracting functions. */
6601 else if (code == GIMPLE_CALL)
6602 extract_range_basic (&new_vr, stmt);
6603 else
6604 extract_range_from_assignment (&new_vr, stmt);
6605
6606 if (update_value_range (lhs, &new_vr))
6607 {
6608 *output_p = lhs;
6609
6610 if (dump_file && (dump_flags & TDF_DETAILS))
6611 {
6612 fprintf (dump_file, "Found new range for ");
6613 print_generic_expr (dump_file, lhs, 0);
6614 fprintf (dump_file, ": ");
6615 dump_value_range (dump_file, &new_vr);
6616 fprintf (dump_file, "\n\n");
6617 }
6618
6619 if (new_vr.type == VR_VARYING)
6620 return SSA_PROP_VARYING;
6621
6622 return SSA_PROP_INTERESTING;
6623 }
6624
6625 return SSA_PROP_NOT_INTERESTING;
6626 }
6627
6628 /* Every other statement produces no useful ranges. */
6629 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_DEF)
6630 set_value_range_to_varying (get_value_range (def));
6631
6632 return SSA_PROP_VARYING;
6633 }
6634
6635 /* Helper that gets the value range of the SSA_NAME with version I
6636 or a symbolic range containing the SSA_NAME only if the value range
6637 is varying or undefined. */
6638
6639 static inline value_range_t
6640 get_vr_for_comparison (int i)
6641 {
6642 value_range_t vr = *get_value_range (ssa_name (i));
6643
6644 /* If name N_i does not have a valid range, use N_i as its own
6645 range. This allows us to compare against names that may
6646 have N_i in their ranges. */
6647 if (vr.type == VR_VARYING || vr.type == VR_UNDEFINED)
6648 {
6649 vr.type = VR_RANGE;
6650 vr.min = ssa_name (i);
6651 vr.max = ssa_name (i);
6652 }
6653
6654 return vr;
6655 }
6656
6657 /* Compare all the value ranges for names equivalent to VAR with VAL
6658 using comparison code COMP. Return the same value returned by
6659 compare_range_with_value, including the setting of
6660 *STRICT_OVERFLOW_P. */
6661
6662 static tree
6663 compare_name_with_value (enum tree_code comp, tree var, tree val,
6664 bool *strict_overflow_p)
6665 {
6666 bitmap_iterator bi;
6667 unsigned i;
6668 bitmap e;
6669 tree retval, t;
6670 int used_strict_overflow;
6671 bool sop;
6672 value_range_t equiv_vr;
6673
6674 /* Get the set of equivalences for VAR. */
6675 e = get_value_range (var)->equiv;
6676
6677 /* Start at -1. Set it to 0 if we do a comparison without relying
6678 on overflow, or 1 if all comparisons rely on overflow. */
6679 used_strict_overflow = -1;
6680
6681 /* Compare vars' value range with val. */
6682 equiv_vr = get_vr_for_comparison (SSA_NAME_VERSION (var));
6683 sop = false;
6684 retval = compare_range_with_value (comp, &equiv_vr, val, &sop);
6685 if (retval)
6686 used_strict_overflow = sop ? 1 : 0;
6687
6688 /* If the equiv set is empty we have done all work we need to do. */
6689 if (e == NULL)
6690 {
6691 if (retval
6692 && used_strict_overflow > 0)
6693 *strict_overflow_p = true;
6694 return retval;
6695 }
6696
6697 EXECUTE_IF_SET_IN_BITMAP (e, 0, i, bi)
6698 {
6699 equiv_vr = get_vr_for_comparison (i);
6700 sop = false;
6701 t = compare_range_with_value (comp, &equiv_vr, val, &sop);
6702 if (t)
6703 {
6704 /* If we get different answers from different members
6705 of the equivalence set this check must be in a dead
6706 code region. Folding it to a trap representation
6707 would be correct here. For now just return don't-know. */
6708 if (retval != NULL
6709 && t != retval)
6710 {
6711 retval = NULL_TREE;
6712 break;
6713 }
6714 retval = t;
6715
6716 if (!sop)
6717 used_strict_overflow = 0;
6718 else if (used_strict_overflow < 0)
6719 used_strict_overflow = 1;
6720 }
6721 }
6722
6723 if (retval
6724 && used_strict_overflow > 0)
6725 *strict_overflow_p = true;
6726
6727 return retval;
6728 }
6729
6730
6731 /* Given a comparison code COMP and names N1 and N2, compare all the
6732 ranges equivalent to N1 against all the ranges equivalent to N2
6733 to determine the value of N1 COMP N2. Return the same value
6734 returned by compare_ranges. Set *STRICT_OVERFLOW_P to indicate
6735 whether we relied on an overflow infinity in the comparison. */
6736
6737
6738 static tree
6739 compare_names (enum tree_code comp, tree n1, tree n2,
6740 bool *strict_overflow_p)
6741 {
6742 tree t, retval;
6743 bitmap e1, e2;
6744 bitmap_iterator bi1, bi2;
6745 unsigned i1, i2;
6746 int used_strict_overflow;
6747 static bitmap_obstack *s_obstack = NULL;
6748 static bitmap s_e1 = NULL, s_e2 = NULL;
6749
6750 /* Compare the ranges of every name equivalent to N1 against the
6751 ranges of every name equivalent to N2. */
6752 e1 = get_value_range (n1)->equiv;
6753 e2 = get_value_range (n2)->equiv;
6754
6755 /* Use the fake bitmaps if e1 or e2 are not available. */
6756 if (s_obstack == NULL)
6757 {
6758 s_obstack = XNEW (bitmap_obstack);
6759 bitmap_obstack_initialize (s_obstack);
6760 s_e1 = BITMAP_ALLOC (s_obstack);
6761 s_e2 = BITMAP_ALLOC (s_obstack);
6762 }
6763 if (e1 == NULL)
6764 e1 = s_e1;
6765 if (e2 == NULL)
6766 e2 = s_e2;
6767
6768 /* Add N1 and N2 to their own set of equivalences to avoid
6769 duplicating the body of the loop just to check N1 and N2
6770 ranges. */
6771 bitmap_set_bit (e1, SSA_NAME_VERSION (n1));
6772 bitmap_set_bit (e2, SSA_NAME_VERSION (n2));
6773
6774 /* If the equivalence sets have a common intersection, then the two
6775 names can be compared without checking their ranges. */
6776 if (bitmap_intersect_p (e1, e2))
6777 {
6778 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
6779 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
6780
6781 return (comp == EQ_EXPR || comp == GE_EXPR || comp == LE_EXPR)
6782 ? boolean_true_node
6783 : boolean_false_node;
6784 }
6785
6786 /* Start at -1. Set it to 0 if we do a comparison without relying
6787 on overflow, or 1 if all comparisons rely on overflow. */
6788 used_strict_overflow = -1;
6789
6790 /* Otherwise, compare all the equivalent ranges. First, add N1 and
6791 N2 to their own set of equivalences to avoid duplicating the body
6792 of the loop just to check N1 and N2 ranges. */
6793 EXECUTE_IF_SET_IN_BITMAP (e1, 0, i1, bi1)
6794 {
6795 value_range_t vr1 = get_vr_for_comparison (i1);
6796
6797 t = retval = NULL_TREE;
6798 EXECUTE_IF_SET_IN_BITMAP (e2, 0, i2, bi2)
6799 {
6800 bool sop = false;
6801
6802 value_range_t vr2 = get_vr_for_comparison (i2);
6803
6804 t = compare_ranges (comp, &vr1, &vr2, &sop);
6805 if (t)
6806 {
6807 /* If we get different answers from different members
6808 of the equivalence set this check must be in a dead
6809 code region. Folding it to a trap representation
6810 would be correct here. For now just return don't-know. */
6811 if (retval != NULL
6812 && t != retval)
6813 {
6814 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
6815 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
6816 return NULL_TREE;
6817 }
6818 retval = t;
6819
6820 if (!sop)
6821 used_strict_overflow = 0;
6822 else if (used_strict_overflow < 0)
6823 used_strict_overflow = 1;
6824 }
6825 }
6826
6827 if (retval)
6828 {
6829 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
6830 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
6831 if (used_strict_overflow > 0)
6832 *strict_overflow_p = true;
6833 return retval;
6834 }
6835 }
6836
6837 /* None of the equivalent ranges are useful in computing this
6838 comparison. */
6839 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
6840 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
6841 return NULL_TREE;
6842 }
6843
6844 /* Helper function for vrp_evaluate_conditional_warnv. */
6845
6846 static tree
6847 vrp_evaluate_conditional_warnv_with_ops_using_ranges (enum tree_code code,
6848 tree op0, tree op1,
6849 bool * strict_overflow_p)
6850 {
6851 value_range_t *vr0, *vr1;
6852
6853 vr0 = (TREE_CODE (op0) == SSA_NAME) ? get_value_range (op0) : NULL;
6854 vr1 = (TREE_CODE (op1) == SSA_NAME) ? get_value_range (op1) : NULL;
6855
6856 if (vr0 && vr1)
6857 return compare_ranges (code, vr0, vr1, strict_overflow_p);
6858 else if (vr0 && vr1 == NULL)
6859 return compare_range_with_value (code, vr0, op1, strict_overflow_p);
6860 else if (vr0 == NULL && vr1)
6861 return (compare_range_with_value
6862 (swap_tree_comparison (code), vr1, op0, strict_overflow_p));
6863 return NULL;
6864 }
6865
6866 /* Helper function for vrp_evaluate_conditional_warnv. */
6867
6868 static tree
6869 vrp_evaluate_conditional_warnv_with_ops (enum tree_code code, tree op0,
6870 tree op1, bool use_equiv_p,
6871 bool *strict_overflow_p, bool *only_ranges)
6872 {
6873 tree ret;
6874 if (only_ranges)
6875 *only_ranges = true;
6876
6877 /* We only deal with integral and pointer types. */
6878 if (!INTEGRAL_TYPE_P (TREE_TYPE (op0))
6879 && !POINTER_TYPE_P (TREE_TYPE (op0)))
6880 return NULL_TREE;
6881
6882 if (use_equiv_p)
6883 {
6884 if (only_ranges
6885 && (ret = vrp_evaluate_conditional_warnv_with_ops_using_ranges
6886 (code, op0, op1, strict_overflow_p)))
6887 return ret;
6888 *only_ranges = false;
6889 if (TREE_CODE (op0) == SSA_NAME && TREE_CODE (op1) == SSA_NAME)
6890 return compare_names (code, op0, op1, strict_overflow_p);
6891 else if (TREE_CODE (op0) == SSA_NAME)
6892 return compare_name_with_value (code, op0, op1, strict_overflow_p);
6893 else if (TREE_CODE (op1) == SSA_NAME)
6894 return (compare_name_with_value
6895 (swap_tree_comparison (code), op1, op0, strict_overflow_p));
6896 }
6897 else
6898 return vrp_evaluate_conditional_warnv_with_ops_using_ranges (code, op0, op1,
6899 strict_overflow_p);
6900 return NULL_TREE;
6901 }
6902
6903 /* Given (CODE OP0 OP1) within STMT, try to simplify it based on value range
6904 information. Return NULL if the conditional can not be evaluated.
6905 The ranges of all the names equivalent with the operands in COND
6906 will be used when trying to compute the value. If the result is
6907 based on undefined signed overflow, issue a warning if
6908 appropriate. */
6909
6910 static tree
6911 vrp_evaluate_conditional (enum tree_code code, tree op0, tree op1, gimple stmt)
6912 {
6913 bool sop;
6914 tree ret;
6915 bool only_ranges;
6916
6917 /* Some passes and foldings leak constants with overflow flag set
6918 into the IL. Avoid doing wrong things with these and bail out. */
6919 if ((TREE_CODE (op0) == INTEGER_CST
6920 && TREE_OVERFLOW (op0))
6921 || (TREE_CODE (op1) == INTEGER_CST
6922 && TREE_OVERFLOW (op1)))
6923 return NULL_TREE;
6924
6925 sop = false;
6926 ret = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, true, &sop,
6927 &only_ranges);
6928
6929 if (ret && sop)
6930 {
6931 enum warn_strict_overflow_code wc;
6932 const char* warnmsg;
6933
6934 if (is_gimple_min_invariant (ret))
6935 {
6936 wc = WARN_STRICT_OVERFLOW_CONDITIONAL;
6937 warnmsg = G_("assuming signed overflow does not occur when "
6938 "simplifying conditional to constant");
6939 }
6940 else
6941 {
6942 wc = WARN_STRICT_OVERFLOW_COMPARISON;
6943 warnmsg = G_("assuming signed overflow does not occur when "
6944 "simplifying conditional");
6945 }
6946
6947 if (issue_strict_overflow_warning (wc))
6948 {
6949 location_t location;
6950
6951 if (!gimple_has_location (stmt))
6952 location = input_location;
6953 else
6954 location = gimple_location (stmt);
6955 warning_at (location, OPT_Wstrict_overflow, "%s", warnmsg);
6956 }
6957 }
6958
6959 if (warn_type_limits
6960 && ret && only_ranges
6961 && TREE_CODE_CLASS (code) == tcc_comparison
6962 && TREE_CODE (op0) == SSA_NAME)
6963 {
6964 /* If the comparison is being folded and the operand on the LHS
6965 is being compared against a constant value that is outside of
6966 the natural range of OP0's type, then the predicate will
6967 always fold regardless of the value of OP0. If -Wtype-limits
6968 was specified, emit a warning. */
6969 tree type = TREE_TYPE (op0);
6970 value_range_t *vr0 = get_value_range (op0);
6971
6972 if (vr0->type != VR_VARYING
6973 && INTEGRAL_TYPE_P (type)
6974 && vrp_val_is_min (vr0->min)
6975 && vrp_val_is_max (vr0->max)
6976 && is_gimple_min_invariant (op1))
6977 {
6978 location_t location;
6979
6980 if (!gimple_has_location (stmt))
6981 location = input_location;
6982 else
6983 location = gimple_location (stmt);
6984
6985 warning_at (location, OPT_Wtype_limits,
6986 integer_zerop (ret)
6987 ? G_("comparison always false "
6988 "due to limited range of data type")
6989 : G_("comparison always true "
6990 "due to limited range of data type"));
6991 }
6992 }
6993
6994 return ret;
6995 }
6996
6997
6998 /* Visit conditional statement STMT. If we can determine which edge
6999 will be taken out of STMT's basic block, record it in
7000 *TAKEN_EDGE_P and return SSA_PROP_INTERESTING. Otherwise, return
7001 SSA_PROP_VARYING. */
7002
7003 static enum ssa_prop_result
7004 vrp_visit_cond_stmt (gimple stmt, edge *taken_edge_p)
7005 {
7006 tree val;
7007 bool sop;
7008
7009 *taken_edge_p = NULL;
7010
7011 if (dump_file && (dump_flags & TDF_DETAILS))
7012 {
7013 tree use;
7014 ssa_op_iter i;
7015
7016 fprintf (dump_file, "\nVisiting conditional with predicate: ");
7017 print_gimple_stmt (dump_file, stmt, 0, 0);
7018 fprintf (dump_file, "\nWith known ranges\n");
7019
7020 FOR_EACH_SSA_TREE_OPERAND (use, stmt, i, SSA_OP_USE)
7021 {
7022 fprintf (dump_file, "\t");
7023 print_generic_expr (dump_file, use, 0);
7024 fprintf (dump_file, ": ");
7025 dump_value_range (dump_file, vr_value[SSA_NAME_VERSION (use)]);
7026 }
7027
7028 fprintf (dump_file, "\n");
7029 }
7030
7031 /* Compute the value of the predicate COND by checking the known
7032 ranges of each of its operands.
7033
7034 Note that we cannot evaluate all the equivalent ranges here
7035 because those ranges may not yet be final and with the current
7036 propagation strategy, we cannot determine when the value ranges
7037 of the names in the equivalence set have changed.
7038
7039 For instance, given the following code fragment
7040
7041 i_5 = PHI <8, i_13>
7042 ...
7043 i_14 = ASSERT_EXPR <i_5, i_5 != 0>
7044 if (i_14 == 1)
7045 ...
7046
7047 Assume that on the first visit to i_14, i_5 has the temporary
7048 range [8, 8] because the second argument to the PHI function is
7049 not yet executable. We derive the range ~[0, 0] for i_14 and the
7050 equivalence set { i_5 }. So, when we visit 'if (i_14 == 1)' for
7051 the first time, since i_14 is equivalent to the range [8, 8], we
7052 determine that the predicate is always false.
7053
7054 On the next round of propagation, i_13 is determined to be
7055 VARYING, which causes i_5 to drop down to VARYING. So, another
7056 visit to i_14 is scheduled. In this second visit, we compute the
7057 exact same range and equivalence set for i_14, namely ~[0, 0] and
7058 { i_5 }. But we did not have the previous range for i_5
7059 registered, so vrp_visit_assignment thinks that the range for
7060 i_14 has not changed. Therefore, the predicate 'if (i_14 == 1)'
7061 is not visited again, which stops propagation from visiting
7062 statements in the THEN clause of that if().
7063
7064 To properly fix this we would need to keep the previous range
7065 value for the names in the equivalence set. This way we would've
7066 discovered that from one visit to the other i_5 changed from
7067 range [8, 8] to VR_VARYING.
7068
7069 However, fixing this apparent limitation may not be worth the
7070 additional checking. Testing on several code bases (GCC, DLV,
7071 MICO, TRAMP3D and SPEC2000) showed that doing this results in
7072 4 more predicates folded in SPEC. */
7073 sop = false;
7074
7075 val = vrp_evaluate_conditional_warnv_with_ops (gimple_cond_code (stmt),
7076 gimple_cond_lhs (stmt),
7077 gimple_cond_rhs (stmt),
7078 false, &sop, NULL);
7079 if (val)
7080 {
7081 if (!sop)
7082 *taken_edge_p = find_taken_edge (gimple_bb (stmt), val);
7083 else
7084 {
7085 if (dump_file && (dump_flags & TDF_DETAILS))
7086 fprintf (dump_file,
7087 "\nIgnoring predicate evaluation because "
7088 "it assumes that signed overflow is undefined");
7089 val = NULL_TREE;
7090 }
7091 }
7092
7093 if (dump_file && (dump_flags & TDF_DETAILS))
7094 {
7095 fprintf (dump_file, "\nPredicate evaluates to: ");
7096 if (val == NULL_TREE)
7097 fprintf (dump_file, "DON'T KNOW\n");
7098 else
7099 print_generic_stmt (dump_file, val, 0);
7100 }
7101
7102 return (*taken_edge_p) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING;
7103 }
7104
7105 /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL
7106 that includes the value VAL. The search is restricted to the range
7107 [START_IDX, n - 1] where n is the size of VEC.
7108
7109 If there is a CASE_LABEL for VAL, its index is placed in IDX and true is
7110 returned.
7111
7112 If there is no CASE_LABEL for VAL and there is one that is larger than VAL,
7113 it is placed in IDX and false is returned.
7114
7115 If VAL is larger than any CASE_LABEL, n is placed on IDX and false is
7116 returned. */
7117
7118 static bool
7119 find_case_label_index (gimple stmt, size_t start_idx, tree val, size_t *idx)
7120 {
7121 size_t n = gimple_switch_num_labels (stmt);
7122 size_t low, high;
7123
7124 /* Find case label for minimum of the value range or the next one.
7125 At each iteration we are searching in [low, high - 1]. */
7126
7127 for (low = start_idx, high = n; high != low; )
7128 {
7129 tree t;
7130 int cmp;
7131 /* Note that i != high, so we never ask for n. */
7132 size_t i = (high + low) / 2;
7133 t = gimple_switch_label (stmt, i);
7134
7135 /* Cache the result of comparing CASE_LOW and val. */
7136 cmp = tree_int_cst_compare (CASE_LOW (t), val);
7137
7138 if (cmp == 0)
7139 {
7140 /* Ranges cannot be empty. */
7141 *idx = i;
7142 return true;
7143 }
7144 else if (cmp > 0)
7145 high = i;
7146 else
7147 {
7148 low = i + 1;
7149 if (CASE_HIGH (t) != NULL
7150 && tree_int_cst_compare (CASE_HIGH (t), val) >= 0)
7151 {
7152 *idx = i;
7153 return true;
7154 }
7155 }
7156 }
7157
7158 *idx = high;
7159 return false;
7160 }
7161
7162 /* Searches the case label vector VEC for the range of CASE_LABELs that is used
7163 for values between MIN and MAX. The first index is placed in MIN_IDX. The
7164 last index is placed in MAX_IDX. If the range of CASE_LABELs is empty
7165 then MAX_IDX < MIN_IDX.
7166 Returns true if the default label is not needed. */
7167
7168 static bool
7169 find_case_label_range (gimple stmt, tree min, tree max, size_t *min_idx,
7170 size_t *max_idx)
7171 {
7172 size_t i, j;
7173 bool min_take_default = !find_case_label_index (stmt, 1, min, &i);
7174 bool max_take_default = !find_case_label_index (stmt, i, max, &j);
7175
7176 if (i == j
7177 && min_take_default
7178 && max_take_default)
7179 {
7180 /* Only the default case label reached.
7181 Return an empty range. */
7182 *min_idx = 1;
7183 *max_idx = 0;
7184 return false;
7185 }
7186 else
7187 {
7188 bool take_default = min_take_default || max_take_default;
7189 tree low, high;
7190 size_t k;
7191
7192 if (max_take_default)
7193 j--;
7194
7195 /* If the case label range is continuous, we do not need
7196 the default case label. Verify that. */
7197 high = CASE_LOW (gimple_switch_label (stmt, i));
7198 if (CASE_HIGH (gimple_switch_label (stmt, i)))
7199 high = CASE_HIGH (gimple_switch_label (stmt, i));
7200 for (k = i + 1; k <= j; ++k)
7201 {
7202 low = CASE_LOW (gimple_switch_label (stmt, k));
7203 if (!integer_onep (int_const_binop (MINUS_EXPR, low, high)))
7204 {
7205 take_default = true;
7206 break;
7207 }
7208 high = low;
7209 if (CASE_HIGH (gimple_switch_label (stmt, k)))
7210 high = CASE_HIGH (gimple_switch_label (stmt, k));
7211 }
7212
7213 *min_idx = i;
7214 *max_idx = j;
7215 return !take_default;
7216 }
7217 }
7218
7219 /* Searches the case label vector VEC for the ranges of CASE_LABELs that are
7220 used in range VR. The indices are placed in MIN_IDX1, MAX_IDX, MIN_IDX2 and
7221 MAX_IDX2. If the ranges of CASE_LABELs are empty then MAX_IDX1 < MIN_IDX1.
7222 Returns true if the default label is not needed. */
7223
7224 static bool
7225 find_case_label_ranges (gimple stmt, value_range_t *vr, size_t *min_idx1,
7226 size_t *max_idx1, size_t *min_idx2,
7227 size_t *max_idx2)
7228 {
7229 size_t i, j, k, l;
7230 unsigned int n = gimple_switch_num_labels (stmt);
7231 bool take_default;
7232 tree case_low, case_high;
7233 tree min = vr->min, max = vr->max;
7234
7235 gcc_checking_assert (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE);
7236
7237 take_default = !find_case_label_range (stmt, min, max, &i, &j);
7238
7239 /* Set second range to emtpy. */
7240 *min_idx2 = 1;
7241 *max_idx2 = 0;
7242
7243 if (vr->type == VR_RANGE)
7244 {
7245 *min_idx1 = i;
7246 *max_idx1 = j;
7247 return !take_default;
7248 }
7249
7250 /* Set first range to all case labels. */
7251 *min_idx1 = 1;
7252 *max_idx1 = n - 1;
7253
7254 if (i > j)
7255 return false;
7256
7257 /* Make sure all the values of case labels [i , j] are contained in
7258 range [MIN, MAX]. */
7259 case_low = CASE_LOW (gimple_switch_label (stmt, i));
7260 case_high = CASE_HIGH (gimple_switch_label (stmt, j));
7261 if (tree_int_cst_compare (case_low, min) < 0)
7262 i += 1;
7263 if (case_high != NULL_TREE
7264 && tree_int_cst_compare (max, case_high) < 0)
7265 j -= 1;
7266
7267 if (i > j)
7268 return false;
7269
7270 /* If the range spans case labels [i, j], the corresponding anti-range spans
7271 the labels [1, i - 1] and [j + 1, n - 1]. */
7272 k = j + 1;
7273 l = n - 1;
7274 if (k > l)
7275 {
7276 k = 1;
7277 l = 0;
7278 }
7279
7280 j = i - 1;
7281 i = 1;
7282 if (i > j)
7283 {
7284 i = k;
7285 j = l;
7286 k = 1;
7287 l = 0;
7288 }
7289
7290 *min_idx1 = i;
7291 *max_idx1 = j;
7292 *min_idx2 = k;
7293 *max_idx2 = l;
7294 return false;
7295 }
7296
7297 /* Visit switch statement STMT. If we can determine which edge
7298 will be taken out of STMT's basic block, record it in
7299 *TAKEN_EDGE_P and return SSA_PROP_INTERESTING. Otherwise, return
7300 SSA_PROP_VARYING. */
7301
7302 static enum ssa_prop_result
7303 vrp_visit_switch_stmt (gimple stmt, edge *taken_edge_p)
7304 {
7305 tree op, val;
7306 value_range_t *vr;
7307 size_t i = 0, j = 0, k, l;
7308 bool take_default;
7309
7310 *taken_edge_p = NULL;
7311 op = gimple_switch_index (stmt);
7312 if (TREE_CODE (op) != SSA_NAME)
7313 return SSA_PROP_VARYING;
7314
7315 vr = get_value_range (op);
7316 if (dump_file && (dump_flags & TDF_DETAILS))
7317 {
7318 fprintf (dump_file, "\nVisiting switch expression with operand ");
7319 print_generic_expr (dump_file, op, 0);
7320 fprintf (dump_file, " with known range ");
7321 dump_value_range (dump_file, vr);
7322 fprintf (dump_file, "\n");
7323 }
7324
7325 if ((vr->type != VR_RANGE
7326 && vr->type != VR_ANTI_RANGE)
7327 || symbolic_range_p (vr))
7328 return SSA_PROP_VARYING;
7329
7330 /* Find the single edge that is taken from the switch expression. */
7331 take_default = !find_case_label_ranges (stmt, vr, &i, &j, &k, &l);
7332
7333 /* Check if the range spans no CASE_LABEL. If so, we only reach the default
7334 label */
7335 if (j < i)
7336 {
7337 gcc_assert (take_default);
7338 val = gimple_switch_default_label (stmt);
7339 }
7340 else
7341 {
7342 /* Check if labels with index i to j and maybe the default label
7343 are all reaching the same label. */
7344
7345 val = gimple_switch_label (stmt, i);
7346 if (take_default
7347 && CASE_LABEL (gimple_switch_default_label (stmt))
7348 != CASE_LABEL (val))
7349 {
7350 if (dump_file && (dump_flags & TDF_DETAILS))
7351 fprintf (dump_file, " not a single destination for this "
7352 "range\n");
7353 return SSA_PROP_VARYING;
7354 }
7355 for (++i; i <= j; ++i)
7356 {
7357 if (CASE_LABEL (gimple_switch_label (stmt, i)) != CASE_LABEL (val))
7358 {
7359 if (dump_file && (dump_flags & TDF_DETAILS))
7360 fprintf (dump_file, " not a single destination for this "
7361 "range\n");
7362 return SSA_PROP_VARYING;
7363 }
7364 }
7365 for (; k <= l; ++k)
7366 {
7367 if (CASE_LABEL (gimple_switch_label (stmt, k)) != CASE_LABEL (val))
7368 {
7369 if (dump_file && (dump_flags & TDF_DETAILS))
7370 fprintf (dump_file, " not a single destination for this "
7371 "range\n");
7372 return SSA_PROP_VARYING;
7373 }
7374 }
7375 }
7376
7377 *taken_edge_p = find_edge (gimple_bb (stmt),
7378 label_to_block (CASE_LABEL (val)));
7379
7380 if (dump_file && (dump_flags & TDF_DETAILS))
7381 {
7382 fprintf (dump_file, " will take edge to ");
7383 print_generic_stmt (dump_file, CASE_LABEL (val), 0);
7384 }
7385
7386 return SSA_PROP_INTERESTING;
7387 }
7388
7389
7390 /* Evaluate statement STMT. If the statement produces a useful range,
7391 return SSA_PROP_INTERESTING and record the SSA name with the
7392 interesting range into *OUTPUT_P.
7393
7394 If STMT is a conditional branch and we can determine its truth
7395 value, the taken edge is recorded in *TAKEN_EDGE_P.
7396
7397 If STMT produces a varying value, return SSA_PROP_VARYING. */
7398
7399 static enum ssa_prop_result
7400 vrp_visit_stmt (gimple stmt, edge *taken_edge_p, tree *output_p)
7401 {
7402 tree def;
7403 ssa_op_iter iter;
7404
7405 if (dump_file && (dump_flags & TDF_DETAILS))
7406 {
7407 fprintf (dump_file, "\nVisiting statement:\n");
7408 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
7409 fprintf (dump_file, "\n");
7410 }
7411
7412 if (!stmt_interesting_for_vrp (stmt))
7413 gcc_assert (stmt_ends_bb_p (stmt));
7414 else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
7415 {
7416 /* In general, assignments with virtual operands are not useful
7417 for deriving ranges, with the obvious exception of calls to
7418 builtin functions. */
7419 if ((is_gimple_call (stmt)
7420 && gimple_call_fndecl (stmt) != NULL_TREE
7421 && DECL_BUILT_IN (gimple_call_fndecl (stmt)))
7422 || !gimple_vuse (stmt))
7423 return vrp_visit_assignment_or_call (stmt, output_p);
7424 }
7425 else if (gimple_code (stmt) == GIMPLE_COND)
7426 return vrp_visit_cond_stmt (stmt, taken_edge_p);
7427 else if (gimple_code (stmt) == GIMPLE_SWITCH)
7428 return vrp_visit_switch_stmt (stmt, taken_edge_p);
7429
7430 /* All other statements produce nothing of interest for VRP, so mark
7431 their outputs varying and prevent further simulation. */
7432 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_DEF)
7433 set_value_range_to_varying (get_value_range (def));
7434
7435 return SSA_PROP_VARYING;
7436 }
7437
7438 /* Union the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
7439 { VR1TYPE, VR0MIN, VR0MAX } and store the result
7440 in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest
7441 possible such range. The resulting range is not canonicalized. */
7442
7443 static void
7444 union_ranges (enum value_range_type *vr0type,
7445 tree *vr0min, tree *vr0max,
7446 enum value_range_type vr1type,
7447 tree vr1min, tree vr1max)
7448 {
7449 bool mineq = operand_equal_p (*vr0min, vr1min, 0);
7450 bool maxeq = operand_equal_p (*vr0max, vr1max, 0);
7451
7452 /* [] is vr0, () is vr1 in the following classification comments. */
7453 if (mineq && maxeq)
7454 {
7455 /* [( )] */
7456 if (*vr0type == vr1type)
7457 /* Nothing to do for equal ranges. */
7458 ;
7459 else if ((*vr0type == VR_RANGE
7460 && vr1type == VR_ANTI_RANGE)
7461 || (*vr0type == VR_ANTI_RANGE
7462 && vr1type == VR_RANGE))
7463 {
7464 /* For anti-range with range union the result is varying. */
7465 goto give_up;
7466 }
7467 else
7468 gcc_unreachable ();
7469 }
7470 else if (operand_less_p (*vr0max, vr1min) == 1
7471 || operand_less_p (vr1max, *vr0min) == 1)
7472 {
7473 /* [ ] ( ) or ( ) [ ]
7474 If the ranges have an empty intersection, result of the union
7475 operation is the anti-range or if both are anti-ranges
7476 it covers all. */
7477 if (*vr0type == VR_ANTI_RANGE
7478 && vr1type == VR_ANTI_RANGE)
7479 goto give_up;
7480 else if (*vr0type == VR_ANTI_RANGE
7481 && vr1type == VR_RANGE)
7482 ;
7483 else if (*vr0type == VR_RANGE
7484 && vr1type == VR_ANTI_RANGE)
7485 {
7486 *vr0type = vr1type;
7487 *vr0min = vr1min;
7488 *vr0max = vr1max;
7489 }
7490 else if (*vr0type == VR_RANGE
7491 && vr1type == VR_RANGE)
7492 {
7493 /* The result is the convex hull of both ranges. */
7494 if (operand_less_p (*vr0max, vr1min) == 1)
7495 {
7496 /* If the result can be an anti-range, create one. */
7497 if (TREE_CODE (*vr0max) == INTEGER_CST
7498 && TREE_CODE (vr1min) == INTEGER_CST
7499 && vrp_val_is_min (*vr0min)
7500 && vrp_val_is_max (vr1max))
7501 {
7502 tree min = int_const_binop (PLUS_EXPR,
7503 *vr0max, integer_one_node);
7504 tree max = int_const_binop (MINUS_EXPR,
7505 vr1min, integer_one_node);
7506 if (!operand_less_p (max, min))
7507 {
7508 *vr0type = VR_ANTI_RANGE;
7509 *vr0min = min;
7510 *vr0max = max;
7511 }
7512 else
7513 *vr0max = vr1max;
7514 }
7515 else
7516 *vr0max = vr1max;
7517 }
7518 else
7519 {
7520 /* If the result can be an anti-range, create one. */
7521 if (TREE_CODE (vr1max) == INTEGER_CST
7522 && TREE_CODE (*vr0min) == INTEGER_CST
7523 && vrp_val_is_min (vr1min)
7524 && vrp_val_is_max (*vr0max))
7525 {
7526 tree min = int_const_binop (PLUS_EXPR,
7527 vr1max, integer_one_node);
7528 tree max = int_const_binop (MINUS_EXPR,
7529 *vr0min, integer_one_node);
7530 if (!operand_less_p (max, min))
7531 {
7532 *vr0type = VR_ANTI_RANGE;
7533 *vr0min = min;
7534 *vr0max = max;
7535 }
7536 else
7537 *vr0min = vr1min;
7538 }
7539 else
7540 *vr0min = vr1min;
7541 }
7542 }
7543 else
7544 gcc_unreachable ();
7545 }
7546 else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1)
7547 && (mineq || operand_less_p (*vr0min, vr1min) == 1))
7548 {
7549 /* [ ( ) ] or [( ) ] or [ ( )] */
7550 if (*vr0type == VR_RANGE
7551 && vr1type == VR_RANGE)
7552 ;
7553 else if (*vr0type == VR_ANTI_RANGE
7554 && vr1type == VR_ANTI_RANGE)
7555 {
7556 *vr0type = vr1type;
7557 *vr0min = vr1min;
7558 *vr0max = vr1max;
7559 }
7560 else if (*vr0type == VR_ANTI_RANGE
7561 && vr1type == VR_RANGE)
7562 {
7563 /* Arbitrarily choose the right or left gap. */
7564 if (!mineq && TREE_CODE (vr1min) == INTEGER_CST)
7565 *vr0max = int_const_binop (MINUS_EXPR, vr1min, integer_one_node);
7566 else if (!maxeq && TREE_CODE (vr1max) == INTEGER_CST)
7567 *vr0min = int_const_binop (PLUS_EXPR, vr1max, integer_one_node);
7568 else
7569 goto give_up;
7570 }
7571 else if (*vr0type == VR_RANGE
7572 && vr1type == VR_ANTI_RANGE)
7573 /* The result covers everything. */
7574 goto give_up;
7575 else
7576 gcc_unreachable ();
7577 }
7578 else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1)
7579 && (mineq || operand_less_p (vr1min, *vr0min) == 1))
7580 {
7581 /* ( [ ] ) or ([ ] ) or ( [ ]) */
7582 if (*vr0type == VR_RANGE
7583 && vr1type == VR_RANGE)
7584 {
7585 *vr0type = vr1type;
7586 *vr0min = vr1min;
7587 *vr0max = vr1max;
7588 }
7589 else if (*vr0type == VR_ANTI_RANGE
7590 && vr1type == VR_ANTI_RANGE)
7591 ;
7592 else if (*vr0type == VR_RANGE
7593 && vr1type == VR_ANTI_RANGE)
7594 {
7595 *vr0type = VR_ANTI_RANGE;
7596 if (!mineq && TREE_CODE (*vr0min) == INTEGER_CST)
7597 {
7598 *vr0max = int_const_binop (MINUS_EXPR, *vr0min, integer_one_node);
7599 *vr0min = vr1min;
7600 }
7601 else if (!maxeq && TREE_CODE (*vr0max) == INTEGER_CST)
7602 {
7603 *vr0min = int_const_binop (PLUS_EXPR, *vr0max, integer_one_node);
7604 *vr0max = vr1max;
7605 }
7606 else
7607 goto give_up;
7608 }
7609 else if (*vr0type == VR_ANTI_RANGE
7610 && vr1type == VR_RANGE)
7611 /* The result covers everything. */
7612 goto give_up;
7613 else
7614 gcc_unreachable ();
7615 }
7616 else if ((operand_less_p (vr1min, *vr0max) == 1
7617 || operand_equal_p (vr1min, *vr0max, 0))
7618 && operand_less_p (*vr0min, vr1min) == 1)
7619 {
7620 /* [ ( ] ) or [ ]( ) */
7621 if (*vr0type == VR_RANGE
7622 && vr1type == VR_RANGE)
7623 *vr0max = vr1max;
7624 else if (*vr0type == VR_ANTI_RANGE
7625 && vr1type == VR_ANTI_RANGE)
7626 *vr0min = vr1min;
7627 else if (*vr0type == VR_ANTI_RANGE
7628 && vr1type == VR_RANGE)
7629 {
7630 if (TREE_CODE (vr1min) == INTEGER_CST)
7631 *vr0max = int_const_binop (MINUS_EXPR, vr1min, integer_one_node);
7632 else
7633 goto give_up;
7634 }
7635 else if (*vr0type == VR_RANGE
7636 && vr1type == VR_ANTI_RANGE)
7637 {
7638 if (TREE_CODE (*vr0max) == INTEGER_CST)
7639 {
7640 *vr0type = vr1type;
7641 *vr0min = int_const_binop (PLUS_EXPR, *vr0max, integer_one_node);
7642 *vr0max = vr1max;
7643 }
7644 else
7645 goto give_up;
7646 }
7647 else
7648 gcc_unreachable ();
7649 }
7650 else if ((operand_less_p (*vr0min, vr1max) == 1
7651 || operand_equal_p (*vr0min, vr1max, 0))
7652 && operand_less_p (vr1min, *vr0min) == 1)
7653 {
7654 /* ( [ ) ] or ( )[ ] */
7655 if (*vr0type == VR_RANGE
7656 && vr1type == VR_RANGE)
7657 *vr0min = vr1min;
7658 else if (*vr0type == VR_ANTI_RANGE
7659 && vr1type == VR_ANTI_RANGE)
7660 *vr0max = vr1max;
7661 else if (*vr0type == VR_ANTI_RANGE
7662 && vr1type == VR_RANGE)
7663 {
7664 if (TREE_CODE (vr1max) == INTEGER_CST)
7665 *vr0min = int_const_binop (PLUS_EXPR, vr1max, integer_one_node);
7666 else
7667 goto give_up;
7668 }
7669 else if (*vr0type == VR_RANGE
7670 && vr1type == VR_ANTI_RANGE)
7671 {
7672 if (TREE_CODE (*vr0min) == INTEGER_CST)
7673 {
7674 *vr0type = vr1type;
7675 *vr0min = vr1min;
7676 *vr0max = int_const_binop (MINUS_EXPR, *vr0min, integer_one_node);
7677 }
7678 else
7679 goto give_up;
7680 }
7681 else
7682 gcc_unreachable ();
7683 }
7684 else
7685 goto give_up;
7686
7687 return;
7688
7689 give_up:
7690 *vr0type = VR_VARYING;
7691 *vr0min = NULL_TREE;
7692 *vr0max = NULL_TREE;
7693 }
7694
7695 /* Intersect the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
7696 { VR1TYPE, VR0MIN, VR0MAX } and store the result
7697 in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest
7698 possible such range. The resulting range is not canonicalized. */
7699
7700 static void
7701 intersect_ranges (enum value_range_type *vr0type,
7702 tree *vr0min, tree *vr0max,
7703 enum value_range_type vr1type,
7704 tree vr1min, tree vr1max)
7705 {
7706 bool mineq = operand_equal_p (*vr0min, vr1min, 0);
7707 bool maxeq = operand_equal_p (*vr0max, vr1max, 0);
7708
7709 /* [] is vr0, () is vr1 in the following classification comments. */
7710 if (mineq && maxeq)
7711 {
7712 /* [( )] */
7713 if (*vr0type == vr1type)
7714 /* Nothing to do for equal ranges. */
7715 ;
7716 else if ((*vr0type == VR_RANGE
7717 && vr1type == VR_ANTI_RANGE)
7718 || (*vr0type == VR_ANTI_RANGE
7719 && vr1type == VR_RANGE))
7720 {
7721 /* For anti-range with range intersection the result is empty. */
7722 *vr0type = VR_UNDEFINED;
7723 *vr0min = NULL_TREE;
7724 *vr0max = NULL_TREE;
7725 }
7726 else
7727 gcc_unreachable ();
7728 }
7729 else if (operand_less_p (*vr0max, vr1min) == 1
7730 || operand_less_p (vr1max, *vr0min) == 1)
7731 {
7732 /* [ ] ( ) or ( ) [ ]
7733 If the ranges have an empty intersection, the result of the
7734 intersect operation is the range for intersecting an
7735 anti-range with a range or empty when intersecting two ranges. */
7736 if (*vr0type == VR_RANGE
7737 && vr1type == VR_ANTI_RANGE)
7738 ;
7739 else if (*vr0type == VR_ANTI_RANGE
7740 && vr1type == VR_RANGE)
7741 {
7742 *vr0type = vr1type;
7743 *vr0min = vr1min;
7744 *vr0max = vr1max;
7745 }
7746 else if (*vr0type == VR_RANGE
7747 && vr1type == VR_RANGE)
7748 {
7749 *vr0type = VR_UNDEFINED;
7750 *vr0min = NULL_TREE;
7751 *vr0max = NULL_TREE;
7752 }
7753 else if (*vr0type == VR_ANTI_RANGE
7754 && vr1type == VR_ANTI_RANGE)
7755 {
7756 /* If the anti-ranges are adjacent to each other merge them. */
7757 if (TREE_CODE (*vr0max) == INTEGER_CST
7758 && TREE_CODE (vr1min) == INTEGER_CST
7759 && operand_less_p (*vr0max, vr1min) == 1
7760 && integer_onep (int_const_binop (MINUS_EXPR,
7761 vr1min, *vr0max)))
7762 *vr0max = vr1max;
7763 else if (TREE_CODE (vr1max) == INTEGER_CST
7764 && TREE_CODE (*vr0min) == INTEGER_CST
7765 && operand_less_p (vr1max, *vr0min) == 1
7766 && integer_onep (int_const_binop (MINUS_EXPR,
7767 *vr0min, vr1max)))
7768 *vr0min = vr1min;
7769 /* Else arbitrarily take VR0. */
7770 }
7771 }
7772 else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1)
7773 && (mineq || operand_less_p (*vr0min, vr1min) == 1))
7774 {
7775 /* [ ( ) ] or [( ) ] or [ ( )] */
7776 if (*vr0type == VR_RANGE
7777 && vr1type == VR_RANGE)
7778 {
7779 /* If both are ranges the result is the inner one. */
7780 *vr0type = vr1type;
7781 *vr0min = vr1min;
7782 *vr0max = vr1max;
7783 }
7784 else if (*vr0type == VR_RANGE
7785 && vr1type == VR_ANTI_RANGE)
7786 {
7787 /* Choose the right gap if the left one is empty. */
7788 if (mineq)
7789 {
7790 if (TREE_CODE (vr1max) == INTEGER_CST)
7791 *vr0min = int_const_binop (PLUS_EXPR, vr1max, integer_one_node);
7792 else
7793 *vr0min = vr1max;
7794 }
7795 /* Choose the left gap if the right one is empty. */
7796 else if (maxeq)
7797 {
7798 if (TREE_CODE (vr1min) == INTEGER_CST)
7799 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
7800 integer_one_node);
7801 else
7802 *vr0max = vr1min;
7803 }
7804 /* Choose the anti-range if the range is effectively varying. */
7805 else if (vrp_val_is_min (*vr0min)
7806 && vrp_val_is_max (*vr0max))
7807 {
7808 *vr0type = vr1type;
7809 *vr0min = vr1min;
7810 *vr0max = vr1max;
7811 }
7812 /* Else choose the range. */
7813 }
7814 else if (*vr0type == VR_ANTI_RANGE
7815 && vr1type == VR_ANTI_RANGE)
7816 /* If both are anti-ranges the result is the outer one. */
7817 ;
7818 else if (*vr0type == VR_ANTI_RANGE
7819 && vr1type == VR_RANGE)
7820 {
7821 /* The intersection is empty. */
7822 *vr0type = VR_UNDEFINED;
7823 *vr0min = NULL_TREE;
7824 *vr0max = NULL_TREE;
7825 }
7826 else
7827 gcc_unreachable ();
7828 }
7829 else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1)
7830 && (mineq || operand_less_p (vr1min, *vr0min) == 1))
7831 {
7832 /* ( [ ] ) or ([ ] ) or ( [ ]) */
7833 if (*vr0type == VR_RANGE
7834 && vr1type == VR_RANGE)
7835 /* Choose the inner range. */
7836 ;
7837 else if (*vr0type == VR_ANTI_RANGE
7838 && vr1type == VR_RANGE)
7839 {
7840 /* Choose the right gap if the left is empty. */
7841 if (mineq)
7842 {
7843 *vr0type = VR_RANGE;
7844 if (TREE_CODE (*vr0max) == INTEGER_CST)
7845 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
7846 integer_one_node);
7847 else
7848 *vr0min = *vr0max;
7849 *vr0max = vr1max;
7850 }
7851 /* Choose the left gap if the right is empty. */
7852 else if (maxeq)
7853 {
7854 *vr0type = VR_RANGE;
7855 if (TREE_CODE (*vr0min) == INTEGER_CST)
7856 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
7857 integer_one_node);
7858 else
7859 *vr0max = *vr0min;
7860 *vr0min = vr1min;
7861 }
7862 /* Choose the anti-range if the range is effectively varying. */
7863 else if (vrp_val_is_min (vr1min)
7864 && vrp_val_is_max (vr1max))
7865 ;
7866 /* Else choose the range. */
7867 else
7868 {
7869 *vr0type = vr1type;
7870 *vr0min = vr1min;
7871 *vr0max = vr1max;
7872 }
7873 }
7874 else if (*vr0type == VR_ANTI_RANGE
7875 && vr1type == VR_ANTI_RANGE)
7876 {
7877 /* If both are anti-ranges the result is the outer one. */
7878 *vr0type = vr1type;
7879 *vr0min = vr1min;
7880 *vr0max = vr1max;
7881 }
7882 else if (vr1type == VR_ANTI_RANGE
7883 && *vr0type == VR_RANGE)
7884 {
7885 /* The intersection is empty. */
7886 *vr0type = VR_UNDEFINED;
7887 *vr0min = NULL_TREE;
7888 *vr0max = NULL_TREE;
7889 }
7890 else
7891 gcc_unreachable ();
7892 }
7893 else if ((operand_less_p (vr1min, *vr0max) == 1
7894 || operand_equal_p (vr1min, *vr0max, 0))
7895 && operand_less_p (*vr0min, vr1min) == 1)
7896 {
7897 /* [ ( ] ) or [ ]( ) */
7898 if (*vr0type == VR_ANTI_RANGE
7899 && vr1type == VR_ANTI_RANGE)
7900 *vr0max = vr1max;
7901 else if (*vr0type == VR_RANGE
7902 && vr1type == VR_RANGE)
7903 *vr0min = vr1min;
7904 else if (*vr0type == VR_RANGE
7905 && vr1type == VR_ANTI_RANGE)
7906 {
7907 if (TREE_CODE (vr1min) == INTEGER_CST)
7908 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
7909 integer_one_node);
7910 else
7911 *vr0max = vr1min;
7912 }
7913 else if (*vr0type == VR_ANTI_RANGE
7914 && vr1type == VR_RANGE)
7915 {
7916 *vr0type = VR_RANGE;
7917 if (TREE_CODE (*vr0max) == INTEGER_CST)
7918 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
7919 integer_one_node);
7920 else
7921 *vr0min = *vr0max;
7922 *vr0max = vr1max;
7923 }
7924 else
7925 gcc_unreachable ();
7926 }
7927 else if ((operand_less_p (*vr0min, vr1max) == 1
7928 || operand_equal_p (*vr0min, vr1max, 0))
7929 && operand_less_p (vr1min, *vr0min) == 1)
7930 {
7931 /* ( [ ) ] or ( )[ ] */
7932 if (*vr0type == VR_ANTI_RANGE
7933 && vr1type == VR_ANTI_RANGE)
7934 *vr0min = vr1min;
7935 else if (*vr0type == VR_RANGE
7936 && vr1type == VR_RANGE)
7937 *vr0max = vr1max;
7938 else if (*vr0type == VR_RANGE
7939 && vr1type == VR_ANTI_RANGE)
7940 {
7941 if (TREE_CODE (vr1max) == INTEGER_CST)
7942 *vr0min = int_const_binop (PLUS_EXPR, vr1max,
7943 integer_one_node);
7944 else
7945 *vr0min = vr1max;
7946 }
7947 else if (*vr0type == VR_ANTI_RANGE
7948 && vr1type == VR_RANGE)
7949 {
7950 *vr0type = VR_RANGE;
7951 if (TREE_CODE (*vr0min) == INTEGER_CST)
7952 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
7953 integer_one_node);
7954 else
7955 *vr0max = *vr0min;
7956 *vr0min = vr1min;
7957 }
7958 else
7959 gcc_unreachable ();
7960 }
7961
7962 /* As a fallback simply use { *VRTYPE, *VR0MIN, *VR0MAX } as
7963 result for the intersection. That's always a conservative
7964 correct estimate. */
7965
7966 return;
7967 }
7968
7969
7970 /* Intersect the two value-ranges *VR0 and *VR1 and store the result
7971 in *VR0. This may not be the smallest possible such range. */
7972
7973 static void
7974 vrp_intersect_ranges_1 (value_range_t *vr0, value_range_t *vr1)
7975 {
7976 value_range_t saved;
7977
7978 /* If either range is VR_VARYING the other one wins. */
7979 if (vr1->type == VR_VARYING)
7980 return;
7981 if (vr0->type == VR_VARYING)
7982 {
7983 copy_value_range (vr0, vr1);
7984 return;
7985 }
7986
7987 /* When either range is VR_UNDEFINED the resulting range is
7988 VR_UNDEFINED, too. */
7989 if (vr0->type == VR_UNDEFINED)
7990 return;
7991 if (vr1->type == VR_UNDEFINED)
7992 {
7993 set_value_range_to_undefined (vr0);
7994 return;
7995 }
7996
7997 /* Save the original vr0 so we can return it as conservative intersection
7998 result when our worker turns things to varying. */
7999 saved = *vr0;
8000 intersect_ranges (&vr0->type, &vr0->min, &vr0->max,
8001 vr1->type, vr1->min, vr1->max);
8002 /* Make sure to canonicalize the result though as the inversion of a
8003 VR_RANGE can still be a VR_RANGE. */
8004 set_and_canonicalize_value_range (vr0, vr0->type,
8005 vr0->min, vr0->max, vr0->equiv);
8006 /* If that failed, use the saved original VR0. */
8007 if (vr0->type == VR_VARYING)
8008 {
8009 *vr0 = saved;
8010 return;
8011 }
8012 /* If the result is VR_UNDEFINED there is no need to mess with
8013 the equivalencies. */
8014 if (vr0->type == VR_UNDEFINED)
8015 return;
8016
8017 /* The resulting set of equivalences for range intersection is the union of
8018 the two sets. */
8019 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
8020 bitmap_ior_into (vr0->equiv, vr1->equiv);
8021 else if (vr1->equiv && !vr0->equiv)
8022 bitmap_copy (vr0->equiv, vr1->equiv);
8023 }
8024
8025 static void
8026 vrp_intersect_ranges (value_range_t *vr0, value_range_t *vr1)
8027 {
8028 if (dump_file && (dump_flags & TDF_DETAILS))
8029 {
8030 fprintf (dump_file, "Intersecting\n ");
8031 dump_value_range (dump_file, vr0);
8032 fprintf (dump_file, "\nand\n ");
8033 dump_value_range (dump_file, vr1);
8034 fprintf (dump_file, "\n");
8035 }
8036 vrp_intersect_ranges_1 (vr0, vr1);
8037 if (dump_file && (dump_flags & TDF_DETAILS))
8038 {
8039 fprintf (dump_file, "to\n ");
8040 dump_value_range (dump_file, vr0);
8041 fprintf (dump_file, "\n");
8042 }
8043 }
8044
8045 /* Meet operation for value ranges. Given two value ranges VR0 and
8046 VR1, store in VR0 a range that contains both VR0 and VR1. This
8047 may not be the smallest possible such range. */
8048
8049 static void
8050 vrp_meet_1 (value_range_t *vr0, value_range_t *vr1)
8051 {
8052 value_range_t saved;
8053
8054 if (vr0->type == VR_UNDEFINED)
8055 {
8056 set_value_range (vr0, vr1->type, vr1->min, vr1->max, vr1->equiv);
8057 return;
8058 }
8059
8060 if (vr1->type == VR_UNDEFINED)
8061 {
8062 /* VR0 already has the resulting range. */
8063 return;
8064 }
8065
8066 if (vr0->type == VR_VARYING)
8067 {
8068 /* Nothing to do. VR0 already has the resulting range. */
8069 return;
8070 }
8071
8072 if (vr1->type == VR_VARYING)
8073 {
8074 set_value_range_to_varying (vr0);
8075 return;
8076 }
8077
8078 saved = *vr0;
8079 union_ranges (&vr0->type, &vr0->min, &vr0->max,
8080 vr1->type, vr1->min, vr1->max);
8081 if (vr0->type == VR_VARYING)
8082 {
8083 /* Failed to find an efficient meet. Before giving up and setting
8084 the result to VARYING, see if we can at least derive a useful
8085 anti-range. FIXME, all this nonsense about distinguishing
8086 anti-ranges from ranges is necessary because of the odd
8087 semantics of range_includes_zero_p and friends. */
8088 if (((saved.type == VR_RANGE
8089 && range_includes_zero_p (saved.min, saved.max) == 0)
8090 || (saved.type == VR_ANTI_RANGE
8091 && range_includes_zero_p (saved.min, saved.max) == 1))
8092 && ((vr1->type == VR_RANGE
8093 && range_includes_zero_p (vr1->min, vr1->max) == 0)
8094 || (vr1->type == VR_ANTI_RANGE
8095 && range_includes_zero_p (vr1->min, vr1->max) == 1)))
8096 {
8097 set_value_range_to_nonnull (vr0, TREE_TYPE (saved.min));
8098
8099 /* Since this meet operation did not result from the meeting of
8100 two equivalent names, VR0 cannot have any equivalences. */
8101 if (vr0->equiv)
8102 bitmap_clear (vr0->equiv);
8103 return;
8104 }
8105
8106 set_value_range_to_varying (vr0);
8107 return;
8108 }
8109 set_and_canonicalize_value_range (vr0, vr0->type, vr0->min, vr0->max,
8110 vr0->equiv);
8111 if (vr0->type == VR_VARYING)
8112 return;
8113
8114 /* The resulting set of equivalences is always the intersection of
8115 the two sets. */
8116 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
8117 bitmap_and_into (vr0->equiv, vr1->equiv);
8118 else if (vr0->equiv && !vr1->equiv)
8119 bitmap_clear (vr0->equiv);
8120 }
8121
8122 static void
8123 vrp_meet (value_range_t *vr0, value_range_t *vr1)
8124 {
8125 if (dump_file && (dump_flags & TDF_DETAILS))
8126 {
8127 fprintf (dump_file, "Meeting\n ");
8128 dump_value_range (dump_file, vr0);
8129 fprintf (dump_file, "\nand\n ");
8130 dump_value_range (dump_file, vr1);
8131 fprintf (dump_file, "\n");
8132 }
8133 vrp_meet_1 (vr0, vr1);
8134 if (dump_file && (dump_flags & TDF_DETAILS))
8135 {
8136 fprintf (dump_file, "to\n ");
8137 dump_value_range (dump_file, vr0);
8138 fprintf (dump_file, "\n");
8139 }
8140 }
8141
8142
8143 /* Visit all arguments for PHI node PHI that flow through executable
8144 edges. If a valid value range can be derived from all the incoming
8145 value ranges, set a new range for the LHS of PHI. */
8146
8147 static enum ssa_prop_result
8148 vrp_visit_phi_node (gimple phi)
8149 {
8150 size_t i;
8151 tree lhs = PHI_RESULT (phi);
8152 value_range_t *lhs_vr = get_value_range (lhs);
8153 value_range_t vr_result = VR_INITIALIZER;
8154 bool first = true;
8155 int edges, old_edges;
8156 struct loop *l;
8157
8158 if (dump_file && (dump_flags & TDF_DETAILS))
8159 {
8160 fprintf (dump_file, "\nVisiting PHI node: ");
8161 print_gimple_stmt (dump_file, phi, 0, dump_flags);
8162 }
8163
8164 edges = 0;
8165 for (i = 0; i < gimple_phi_num_args (phi); i++)
8166 {
8167 edge e = gimple_phi_arg_edge (phi, i);
8168
8169 if (dump_file && (dump_flags & TDF_DETAILS))
8170 {
8171 fprintf (dump_file,
8172 "\n Argument #%d (%d -> %d %sexecutable)\n",
8173 (int) i, e->src->index, e->dest->index,
8174 (e->flags & EDGE_EXECUTABLE) ? "" : "not ");
8175 }
8176
8177 if (e->flags & EDGE_EXECUTABLE)
8178 {
8179 tree arg = PHI_ARG_DEF (phi, i);
8180 value_range_t vr_arg;
8181
8182 ++edges;
8183
8184 if (TREE_CODE (arg) == SSA_NAME)
8185 {
8186 vr_arg = *(get_value_range (arg));
8187 /* Do not allow equivalences or symbolic ranges to leak in from
8188 backedges. That creates invalid equivalencies.
8189 See PR53465 and PR54767. */
8190 if (e->flags & EDGE_DFS_BACK
8191 && (vr_arg.type == VR_RANGE
8192 || vr_arg.type == VR_ANTI_RANGE))
8193 {
8194 vr_arg.equiv = NULL;
8195 if (symbolic_range_p (&vr_arg))
8196 {
8197 vr_arg.type = VR_VARYING;
8198 vr_arg.min = NULL_TREE;
8199 vr_arg.max = NULL_TREE;
8200 }
8201 }
8202 }
8203 else
8204 {
8205 if (is_overflow_infinity (arg))
8206 {
8207 arg = copy_node (arg);
8208 TREE_OVERFLOW (arg) = 0;
8209 }
8210
8211 vr_arg.type = VR_RANGE;
8212 vr_arg.min = arg;
8213 vr_arg.max = arg;
8214 vr_arg.equiv = NULL;
8215 }
8216
8217 if (dump_file && (dump_flags & TDF_DETAILS))
8218 {
8219 fprintf (dump_file, "\t");
8220 print_generic_expr (dump_file, arg, dump_flags);
8221 fprintf (dump_file, "\n\tValue: ");
8222 dump_value_range (dump_file, &vr_arg);
8223 fprintf (dump_file, "\n");
8224 }
8225
8226 if (first)
8227 copy_value_range (&vr_result, &vr_arg);
8228 else
8229 vrp_meet (&vr_result, &vr_arg);
8230 first = false;
8231
8232 if (vr_result.type == VR_VARYING)
8233 break;
8234 }
8235 }
8236
8237 if (vr_result.type == VR_VARYING)
8238 goto varying;
8239 else if (vr_result.type == VR_UNDEFINED)
8240 goto update_range;
8241
8242 old_edges = vr_phi_edge_counts[SSA_NAME_VERSION (lhs)];
8243 vr_phi_edge_counts[SSA_NAME_VERSION (lhs)] = edges;
8244
8245 /* To prevent infinite iterations in the algorithm, derive ranges
8246 when the new value is slightly bigger or smaller than the
8247 previous one. We don't do this if we have seen a new executable
8248 edge; this helps us avoid an overflow infinity for conditionals
8249 which are not in a loop. If the old value-range was VR_UNDEFINED
8250 use the updated range and iterate one more time. */
8251 if (edges > 0
8252 && gimple_phi_num_args (phi) > 1
8253 && edges == old_edges
8254 && lhs_vr->type != VR_UNDEFINED)
8255 {
8256 int cmp_min = compare_values (lhs_vr->min, vr_result.min);
8257 int cmp_max = compare_values (lhs_vr->max, vr_result.max);
8258
8259 /* For non VR_RANGE or for pointers fall back to varying if
8260 the range changed. */
8261 if ((lhs_vr->type != VR_RANGE || vr_result.type != VR_RANGE
8262 || POINTER_TYPE_P (TREE_TYPE (lhs)))
8263 && (cmp_min != 0 || cmp_max != 0))
8264 goto varying;
8265
8266 /* If the new minimum is smaller or larger than the previous
8267 one, go all the way to -INF. In the first case, to avoid
8268 iterating millions of times to reach -INF, and in the
8269 other case to avoid infinite bouncing between different
8270 minimums. */
8271 if (cmp_min > 0 || cmp_min < 0)
8272 {
8273 if (!needs_overflow_infinity (TREE_TYPE (vr_result.min))
8274 || !vrp_var_may_overflow (lhs, phi))
8275 vr_result.min = TYPE_MIN_VALUE (TREE_TYPE (vr_result.min));
8276 else if (supports_overflow_infinity (TREE_TYPE (vr_result.min)))
8277 vr_result.min =
8278 negative_overflow_infinity (TREE_TYPE (vr_result.min));
8279 }
8280
8281 /* Similarly, if the new maximum is smaller or larger than
8282 the previous one, go all the way to +INF. */
8283 if (cmp_max < 0 || cmp_max > 0)
8284 {
8285 if (!needs_overflow_infinity (TREE_TYPE (vr_result.max))
8286 || !vrp_var_may_overflow (lhs, phi))
8287 vr_result.max = TYPE_MAX_VALUE (TREE_TYPE (vr_result.max));
8288 else if (supports_overflow_infinity (TREE_TYPE (vr_result.max)))
8289 vr_result.max =
8290 positive_overflow_infinity (TREE_TYPE (vr_result.max));
8291 }
8292
8293 /* If we dropped either bound to +-INF then if this is a loop
8294 PHI node SCEV may known more about its value-range. */
8295 if ((cmp_min > 0 || cmp_min < 0
8296 || cmp_max < 0 || cmp_max > 0)
8297 && current_loops
8298 && (l = loop_containing_stmt (phi))
8299 && l->header == gimple_bb (phi))
8300 adjust_range_with_scev (&vr_result, l, phi, lhs);
8301
8302 /* If we will end up with a (-INF, +INF) range, set it to
8303 VARYING. Same if the previous max value was invalid for
8304 the type and we end up with vr_result.min > vr_result.max. */
8305 if ((vrp_val_is_max (vr_result.max)
8306 && vrp_val_is_min (vr_result.min))
8307 || compare_values (vr_result.min,
8308 vr_result.max) > 0)
8309 goto varying;
8310 }
8311
8312 /* If the new range is different than the previous value, keep
8313 iterating. */
8314 update_range:
8315 if (update_value_range (lhs, &vr_result))
8316 {
8317 if (dump_file && (dump_flags & TDF_DETAILS))
8318 {
8319 fprintf (dump_file, "Found new range for ");
8320 print_generic_expr (dump_file, lhs, 0);
8321 fprintf (dump_file, ": ");
8322 dump_value_range (dump_file, &vr_result);
8323 fprintf (dump_file, "\n\n");
8324 }
8325
8326 return SSA_PROP_INTERESTING;
8327 }
8328
8329 /* Nothing changed, don't add outgoing edges. */
8330 return SSA_PROP_NOT_INTERESTING;
8331
8332 /* No match found. Set the LHS to VARYING. */
8333 varying:
8334 set_value_range_to_varying (lhs_vr);
8335 return SSA_PROP_VARYING;
8336 }
8337
8338 /* Simplify boolean operations if the source is known
8339 to be already a boolean. */
8340 static bool
8341 simplify_truth_ops_using_ranges (gimple_stmt_iterator *gsi, gimple stmt)
8342 {
8343 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
8344 tree lhs, op0, op1;
8345 bool need_conversion;
8346
8347 /* We handle only !=/== case here. */
8348 gcc_assert (rhs_code == EQ_EXPR || rhs_code == NE_EXPR);
8349
8350 op0 = gimple_assign_rhs1 (stmt);
8351 if (!op_with_boolean_value_range_p (op0))
8352 return false;
8353
8354 op1 = gimple_assign_rhs2 (stmt);
8355 if (!op_with_boolean_value_range_p (op1))
8356 return false;
8357
8358 /* Reduce number of cases to handle to NE_EXPR. As there is no
8359 BIT_XNOR_EXPR we cannot replace A == B with a single statement. */
8360 if (rhs_code == EQ_EXPR)
8361 {
8362 if (TREE_CODE (op1) == INTEGER_CST)
8363 op1 = int_const_binop (BIT_XOR_EXPR, op1, integer_one_node);
8364 else
8365 return false;
8366 }
8367
8368 lhs = gimple_assign_lhs (stmt);
8369 need_conversion
8370 = !useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (op0));
8371
8372 /* Make sure to not sign-extend a 1-bit 1 when converting the result. */
8373 if (need_conversion
8374 && !TYPE_UNSIGNED (TREE_TYPE (op0))
8375 && TYPE_PRECISION (TREE_TYPE (op0)) == 1
8376 && TYPE_PRECISION (TREE_TYPE (lhs)) > 1)
8377 return false;
8378
8379 /* For A != 0 we can substitute A itself. */
8380 if (integer_zerop (op1))
8381 gimple_assign_set_rhs_with_ops (gsi,
8382 need_conversion
8383 ? NOP_EXPR : TREE_CODE (op0),
8384 op0, NULL_TREE);
8385 /* For A != B we substitute A ^ B. Either with conversion. */
8386 else if (need_conversion)
8387 {
8388 tree tem = make_ssa_name (TREE_TYPE (op0), NULL);
8389 gimple newop = gimple_build_assign_with_ops (BIT_XOR_EXPR, tem, op0, op1);
8390 gsi_insert_before (gsi, newop, GSI_SAME_STMT);
8391 gimple_assign_set_rhs_with_ops (gsi, NOP_EXPR, tem, NULL_TREE);
8392 }
8393 /* Or without. */
8394 else
8395 gimple_assign_set_rhs_with_ops (gsi, BIT_XOR_EXPR, op0, op1);
8396 update_stmt (gsi_stmt (*gsi));
8397
8398 return true;
8399 }
8400
8401 /* Simplify a division or modulo operator to a right shift or
8402 bitwise and if the first operand is unsigned or is greater
8403 than zero and the second operand is an exact power of two. */
8404
8405 static bool
8406 simplify_div_or_mod_using_ranges (gimple stmt)
8407 {
8408 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
8409 tree val = NULL;
8410 tree op0 = gimple_assign_rhs1 (stmt);
8411 tree op1 = gimple_assign_rhs2 (stmt);
8412 value_range_t *vr = get_value_range (gimple_assign_rhs1 (stmt));
8413
8414 if (TYPE_UNSIGNED (TREE_TYPE (op0)))
8415 {
8416 val = integer_one_node;
8417 }
8418 else
8419 {
8420 bool sop = false;
8421
8422 val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, &sop);
8423
8424 if (val
8425 && sop
8426 && integer_onep (val)
8427 && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
8428 {
8429 location_t location;
8430
8431 if (!gimple_has_location (stmt))
8432 location = input_location;
8433 else
8434 location = gimple_location (stmt);
8435 warning_at (location, OPT_Wstrict_overflow,
8436 "assuming signed overflow does not occur when "
8437 "simplifying %</%> or %<%%%> to %<>>%> or %<&%>");
8438 }
8439 }
8440
8441 if (val && integer_onep (val))
8442 {
8443 tree t;
8444
8445 if (rhs_code == TRUNC_DIV_EXPR)
8446 {
8447 t = build_int_cst (integer_type_node, tree_log2 (op1));
8448 gimple_assign_set_rhs_code (stmt, RSHIFT_EXPR);
8449 gimple_assign_set_rhs1 (stmt, op0);
8450 gimple_assign_set_rhs2 (stmt, t);
8451 }
8452 else
8453 {
8454 t = build_int_cst (TREE_TYPE (op1), 1);
8455 t = int_const_binop (MINUS_EXPR, op1, t);
8456 t = fold_convert (TREE_TYPE (op0), t);
8457
8458 gimple_assign_set_rhs_code (stmt, BIT_AND_EXPR);
8459 gimple_assign_set_rhs1 (stmt, op0);
8460 gimple_assign_set_rhs2 (stmt, t);
8461 }
8462
8463 update_stmt (stmt);
8464 return true;
8465 }
8466
8467 return false;
8468 }
8469
8470 /* If the operand to an ABS_EXPR is >= 0, then eliminate the
8471 ABS_EXPR. If the operand is <= 0, then simplify the
8472 ABS_EXPR into a NEGATE_EXPR. */
8473
8474 static bool
8475 simplify_abs_using_ranges (gimple stmt)
8476 {
8477 tree val = NULL;
8478 tree op = gimple_assign_rhs1 (stmt);
8479 tree type = TREE_TYPE (op);
8480 value_range_t *vr = get_value_range (op);
8481
8482 if (TYPE_UNSIGNED (type))
8483 {
8484 val = integer_zero_node;
8485 }
8486 else if (vr)
8487 {
8488 bool sop = false;
8489
8490 val = compare_range_with_value (LE_EXPR, vr, integer_zero_node, &sop);
8491 if (!val)
8492 {
8493 sop = false;
8494 val = compare_range_with_value (GE_EXPR, vr, integer_zero_node,
8495 &sop);
8496
8497 if (val)
8498 {
8499 if (integer_zerop (val))
8500 val = integer_one_node;
8501 else if (integer_onep (val))
8502 val = integer_zero_node;
8503 }
8504 }
8505
8506 if (val
8507 && (integer_onep (val) || integer_zerop (val)))
8508 {
8509 if (sop && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
8510 {
8511 location_t location;
8512
8513 if (!gimple_has_location (stmt))
8514 location = input_location;
8515 else
8516 location = gimple_location (stmt);
8517 warning_at (location, OPT_Wstrict_overflow,
8518 "assuming signed overflow does not occur when "
8519 "simplifying %<abs (X)%> to %<X%> or %<-X%>");
8520 }
8521
8522 gimple_assign_set_rhs1 (stmt, op);
8523 if (integer_onep (val))
8524 gimple_assign_set_rhs_code (stmt, NEGATE_EXPR);
8525 else
8526 gimple_assign_set_rhs_code (stmt, SSA_NAME);
8527 update_stmt (stmt);
8528 return true;
8529 }
8530 }
8531
8532 return false;
8533 }
8534
8535 /* Optimize away redundant BIT_AND_EXPR and BIT_IOR_EXPR.
8536 If all the bits that are being cleared by & are already
8537 known to be zero from VR, or all the bits that are being
8538 set by | are already known to be one from VR, the bit
8539 operation is redundant. */
8540
8541 static bool
8542 simplify_bit_ops_using_ranges (gimple_stmt_iterator *gsi, gimple stmt)
8543 {
8544 tree op0 = gimple_assign_rhs1 (stmt);
8545 tree op1 = gimple_assign_rhs2 (stmt);
8546 tree op = NULL_TREE;
8547 value_range_t vr0 = VR_INITIALIZER;
8548 value_range_t vr1 = VR_INITIALIZER;
8549 double_int may_be_nonzero0, may_be_nonzero1;
8550 double_int must_be_nonzero0, must_be_nonzero1;
8551 double_int mask;
8552
8553 if (TREE_CODE (op0) == SSA_NAME)
8554 vr0 = *(get_value_range (op0));
8555 else if (is_gimple_min_invariant (op0))
8556 set_value_range_to_value (&vr0, op0, NULL);
8557 else
8558 return false;
8559
8560 if (TREE_CODE (op1) == SSA_NAME)
8561 vr1 = *(get_value_range (op1));
8562 else if (is_gimple_min_invariant (op1))
8563 set_value_range_to_value (&vr1, op1, NULL);
8564 else
8565 return false;
8566
8567 if (!zero_nonzero_bits_from_vr (&vr0, &may_be_nonzero0, &must_be_nonzero0))
8568 return false;
8569 if (!zero_nonzero_bits_from_vr (&vr1, &may_be_nonzero1, &must_be_nonzero1))
8570 return false;
8571
8572 switch (gimple_assign_rhs_code (stmt))
8573 {
8574 case BIT_AND_EXPR:
8575 mask = may_be_nonzero0.and_not (must_be_nonzero1);
8576 if (mask.is_zero ())
8577 {
8578 op = op0;
8579 break;
8580 }
8581 mask = may_be_nonzero1.and_not (must_be_nonzero0);
8582 if (mask.is_zero ())
8583 {
8584 op = op1;
8585 break;
8586 }
8587 break;
8588 case BIT_IOR_EXPR:
8589 mask = may_be_nonzero0.and_not (must_be_nonzero1);
8590 if (mask.is_zero ())
8591 {
8592 op = op1;
8593 break;
8594 }
8595 mask = may_be_nonzero1.and_not (must_be_nonzero0);
8596 if (mask.is_zero ())
8597 {
8598 op = op0;
8599 break;
8600 }
8601 break;
8602 default:
8603 gcc_unreachable ();
8604 }
8605
8606 if (op == NULL_TREE)
8607 return false;
8608
8609 gimple_assign_set_rhs_with_ops (gsi, TREE_CODE (op), op, NULL);
8610 update_stmt (gsi_stmt (*gsi));
8611 return true;
8612 }
8613
8614 /* We are comparing trees OP0 and OP1 using COND_CODE. OP0 has
8615 a known value range VR.
8616
8617 If there is one and only one value which will satisfy the
8618 conditional, then return that value. Else return NULL. */
8619
8620 static tree
8621 test_for_singularity (enum tree_code cond_code, tree op0,
8622 tree op1, value_range_t *vr)
8623 {
8624 tree min = NULL;
8625 tree max = NULL;
8626
8627 /* Extract minimum/maximum values which satisfy the
8628 the conditional as it was written. */
8629 if (cond_code == LE_EXPR || cond_code == LT_EXPR)
8630 {
8631 /* This should not be negative infinity; there is no overflow
8632 here. */
8633 min = TYPE_MIN_VALUE (TREE_TYPE (op0));
8634
8635 max = op1;
8636 if (cond_code == LT_EXPR && !is_overflow_infinity (max))
8637 {
8638 tree one = build_int_cst (TREE_TYPE (op0), 1);
8639 max = fold_build2 (MINUS_EXPR, TREE_TYPE (op0), max, one);
8640 if (EXPR_P (max))
8641 TREE_NO_WARNING (max) = 1;
8642 }
8643 }
8644 else if (cond_code == GE_EXPR || cond_code == GT_EXPR)
8645 {
8646 /* This should not be positive infinity; there is no overflow
8647 here. */
8648 max = TYPE_MAX_VALUE (TREE_TYPE (op0));
8649
8650 min = op1;
8651 if (cond_code == GT_EXPR && !is_overflow_infinity (min))
8652 {
8653 tree one = build_int_cst (TREE_TYPE (op0), 1);
8654 min = fold_build2 (PLUS_EXPR, TREE_TYPE (op0), min, one);
8655 if (EXPR_P (min))
8656 TREE_NO_WARNING (min) = 1;
8657 }
8658 }
8659
8660 /* Now refine the minimum and maximum values using any
8661 value range information we have for op0. */
8662 if (min && max)
8663 {
8664 if (compare_values (vr->min, min) == 1)
8665 min = vr->min;
8666 if (compare_values (vr->max, max) == -1)
8667 max = vr->max;
8668
8669 /* If the new min/max values have converged to a single value,
8670 then there is only one value which can satisfy the condition,
8671 return that value. */
8672 if (operand_equal_p (min, max, 0) && is_gimple_min_invariant (min))
8673 return min;
8674 }
8675 return NULL;
8676 }
8677
8678 /* Return whether the value range *VR fits in an integer type specified
8679 by PRECISION and UNSIGNED_P. */
8680
8681 static bool
8682 range_fits_type_p (value_range_t *vr, unsigned precision, bool unsigned_p)
8683 {
8684 tree src_type;
8685 unsigned src_precision;
8686 double_int tem;
8687
8688 /* We can only handle integral and pointer types. */
8689 src_type = TREE_TYPE (vr->min);
8690 if (!INTEGRAL_TYPE_P (src_type)
8691 && !POINTER_TYPE_P (src_type))
8692 return false;
8693
8694 /* An extension is fine unless VR is signed and unsigned_p,
8695 and so is an identity transform. */
8696 src_precision = TYPE_PRECISION (TREE_TYPE (vr->min));
8697 if ((src_precision < precision
8698 && !(unsigned_p && !TYPE_UNSIGNED (src_type)))
8699 || (src_precision == precision
8700 && TYPE_UNSIGNED (src_type) == unsigned_p))
8701 return true;
8702
8703 /* Now we can only handle ranges with constant bounds. */
8704 if (vr->type != VR_RANGE
8705 || TREE_CODE (vr->min) != INTEGER_CST
8706 || TREE_CODE (vr->max) != INTEGER_CST)
8707 return false;
8708
8709 /* For sign changes, the MSB of the double_int has to be clear.
8710 An unsigned value with its MSB set cannot be represented by
8711 a signed double_int, while a negative value cannot be represented
8712 by an unsigned double_int. */
8713 if (TYPE_UNSIGNED (src_type) != unsigned_p
8714 && (TREE_INT_CST_HIGH (vr->min) | TREE_INT_CST_HIGH (vr->max)) < 0)
8715 return false;
8716
8717 /* Then we can perform the conversion on both ends and compare
8718 the result for equality. */
8719 tem = tree_to_double_int (vr->min).ext (precision, unsigned_p);
8720 if (tree_to_double_int (vr->min) != tem)
8721 return false;
8722 tem = tree_to_double_int (vr->max).ext (precision, unsigned_p);
8723 if (tree_to_double_int (vr->max) != tem)
8724 return false;
8725
8726 return true;
8727 }
8728
8729 /* Simplify a conditional using a relational operator to an equality
8730 test if the range information indicates only one value can satisfy
8731 the original conditional. */
8732
8733 static bool
8734 simplify_cond_using_ranges (gimple stmt)
8735 {
8736 tree op0 = gimple_cond_lhs (stmt);
8737 tree op1 = gimple_cond_rhs (stmt);
8738 enum tree_code cond_code = gimple_cond_code (stmt);
8739
8740 if (cond_code != NE_EXPR
8741 && cond_code != EQ_EXPR
8742 && TREE_CODE (op0) == SSA_NAME
8743 && INTEGRAL_TYPE_P (TREE_TYPE (op0))
8744 && is_gimple_min_invariant (op1))
8745 {
8746 value_range_t *vr = get_value_range (op0);
8747
8748 /* If we have range information for OP0, then we might be
8749 able to simplify this conditional. */
8750 if (vr->type == VR_RANGE)
8751 {
8752 tree new_tree = test_for_singularity (cond_code, op0, op1, vr);
8753
8754 if (new_tree)
8755 {
8756 if (dump_file)
8757 {
8758 fprintf (dump_file, "Simplified relational ");
8759 print_gimple_stmt (dump_file, stmt, 0, 0);
8760 fprintf (dump_file, " into ");
8761 }
8762
8763 gimple_cond_set_code (stmt, EQ_EXPR);
8764 gimple_cond_set_lhs (stmt, op0);
8765 gimple_cond_set_rhs (stmt, new_tree);
8766
8767 update_stmt (stmt);
8768
8769 if (dump_file)
8770 {
8771 print_gimple_stmt (dump_file, stmt, 0, 0);
8772 fprintf (dump_file, "\n");
8773 }
8774
8775 return true;
8776 }
8777
8778 /* Try again after inverting the condition. We only deal
8779 with integral types here, so no need to worry about
8780 issues with inverting FP comparisons. */
8781 cond_code = invert_tree_comparison (cond_code, false);
8782 new_tree = test_for_singularity (cond_code, op0, op1, vr);
8783
8784 if (new_tree)
8785 {
8786 if (dump_file)
8787 {
8788 fprintf (dump_file, "Simplified relational ");
8789 print_gimple_stmt (dump_file, stmt, 0, 0);
8790 fprintf (dump_file, " into ");
8791 }
8792
8793 gimple_cond_set_code (stmt, NE_EXPR);
8794 gimple_cond_set_lhs (stmt, op0);
8795 gimple_cond_set_rhs (stmt, new_tree);
8796
8797 update_stmt (stmt);
8798
8799 if (dump_file)
8800 {
8801 print_gimple_stmt (dump_file, stmt, 0, 0);
8802 fprintf (dump_file, "\n");
8803 }
8804
8805 return true;
8806 }
8807 }
8808 }
8809
8810 /* If we have a comparison of an SSA_NAME (OP0) against a constant,
8811 see if OP0 was set by a type conversion where the source of
8812 the conversion is another SSA_NAME with a range that fits
8813 into the range of OP0's type.
8814
8815 If so, the conversion is redundant as the earlier SSA_NAME can be
8816 used for the comparison directly if we just massage the constant in the
8817 comparison. */
8818 if (TREE_CODE (op0) == SSA_NAME
8819 && TREE_CODE (op1) == INTEGER_CST)
8820 {
8821 gimple def_stmt = SSA_NAME_DEF_STMT (op0);
8822 tree innerop;
8823
8824 if (!is_gimple_assign (def_stmt)
8825 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)))
8826 return false;
8827
8828 innerop = gimple_assign_rhs1 (def_stmt);
8829
8830 if (TREE_CODE (innerop) == SSA_NAME
8831 && !POINTER_TYPE_P (TREE_TYPE (innerop)))
8832 {
8833 value_range_t *vr = get_value_range (innerop);
8834
8835 if (range_int_cst_p (vr)
8836 && range_fits_type_p (vr,
8837 TYPE_PRECISION (TREE_TYPE (op0)),
8838 TYPE_UNSIGNED (TREE_TYPE (op0)))
8839 && int_fits_type_p (op1, TREE_TYPE (innerop))
8840 /* The range must not have overflowed, or if it did overflow
8841 we must not be wrapping/trapping overflow and optimizing
8842 with strict overflow semantics. */
8843 && ((!is_negative_overflow_infinity (vr->min)
8844 && !is_positive_overflow_infinity (vr->max))
8845 || TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (innerop))))
8846 {
8847 /* If the range overflowed and the user has asked for warnings
8848 when strict overflow semantics were used to optimize code,
8849 issue an appropriate warning. */
8850 if ((is_negative_overflow_infinity (vr->min)
8851 || is_positive_overflow_infinity (vr->max))
8852 && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_CONDITIONAL))
8853 {
8854 location_t location;
8855
8856 if (!gimple_has_location (stmt))
8857 location = input_location;
8858 else
8859 location = gimple_location (stmt);
8860 warning_at (location, OPT_Wstrict_overflow,
8861 "assuming signed overflow does not occur when "
8862 "simplifying conditional");
8863 }
8864
8865 tree newconst = fold_convert (TREE_TYPE (innerop), op1);
8866 gimple_cond_set_lhs (stmt, innerop);
8867 gimple_cond_set_rhs (stmt, newconst);
8868 return true;
8869 }
8870 }
8871 }
8872
8873 return false;
8874 }
8875
8876 /* Simplify a switch statement using the value range of the switch
8877 argument. */
8878
8879 static bool
8880 simplify_switch_using_ranges (gimple stmt)
8881 {
8882 tree op = gimple_switch_index (stmt);
8883 value_range_t *vr;
8884 bool take_default;
8885 edge e;
8886 edge_iterator ei;
8887 size_t i = 0, j = 0, n, n2;
8888 tree vec2;
8889 switch_update su;
8890 size_t k = 1, l = 0;
8891
8892 if (TREE_CODE (op) == SSA_NAME)
8893 {
8894 vr = get_value_range (op);
8895
8896 /* We can only handle integer ranges. */
8897 if ((vr->type != VR_RANGE
8898 && vr->type != VR_ANTI_RANGE)
8899 || symbolic_range_p (vr))
8900 return false;
8901
8902 /* Find case label for min/max of the value range. */
8903 take_default = !find_case_label_ranges (stmt, vr, &i, &j, &k, &l);
8904 }
8905 else if (TREE_CODE (op) == INTEGER_CST)
8906 {
8907 take_default = !find_case_label_index (stmt, 1, op, &i);
8908 if (take_default)
8909 {
8910 i = 1;
8911 j = 0;
8912 }
8913 else
8914 {
8915 j = i;
8916 }
8917 }
8918 else
8919 return false;
8920
8921 n = gimple_switch_num_labels (stmt);
8922
8923 /* Bail out if this is just all edges taken. */
8924 if (i == 1
8925 && j == n - 1
8926 && take_default)
8927 return false;
8928
8929 /* Build a new vector of taken case labels. */
8930 vec2 = make_tree_vec (j - i + 1 + l - k + 1 + (int)take_default);
8931 n2 = 0;
8932
8933 /* Add the default edge, if necessary. */
8934 if (take_default)
8935 TREE_VEC_ELT (vec2, n2++) = gimple_switch_default_label (stmt);
8936
8937 for (; i <= j; ++i, ++n2)
8938 TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, i);
8939
8940 for (; k <= l; ++k, ++n2)
8941 TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, k);
8942
8943 /* Mark needed edges. */
8944 for (i = 0; i < n2; ++i)
8945 {
8946 e = find_edge (gimple_bb (stmt),
8947 label_to_block (CASE_LABEL (TREE_VEC_ELT (vec2, i))));
8948 e->aux = (void *)-1;
8949 }
8950
8951 /* Queue not needed edges for later removal. */
8952 FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
8953 {
8954 if (e->aux == (void *)-1)
8955 {
8956 e->aux = NULL;
8957 continue;
8958 }
8959
8960 if (dump_file && (dump_flags & TDF_DETAILS))
8961 {
8962 fprintf (dump_file, "removing unreachable case label\n");
8963 }
8964 to_remove_edges.safe_push (e);
8965 e->flags &= ~EDGE_EXECUTABLE;
8966 }
8967
8968 /* And queue an update for the stmt. */
8969 su.stmt = stmt;
8970 su.vec = vec2;
8971 to_update_switch_stmts.safe_push (su);
8972 return false;
8973 }
8974
8975 /* Simplify an integral conversion from an SSA name in STMT. */
8976
8977 static bool
8978 simplify_conversion_using_ranges (gimple stmt)
8979 {
8980 tree innerop, middleop, finaltype;
8981 gimple def_stmt;
8982 value_range_t *innervr;
8983 bool inner_unsigned_p, middle_unsigned_p, final_unsigned_p;
8984 unsigned inner_prec, middle_prec, final_prec;
8985 double_int innermin, innermed, innermax, middlemin, middlemed, middlemax;
8986
8987 finaltype = TREE_TYPE (gimple_assign_lhs (stmt));
8988 if (!INTEGRAL_TYPE_P (finaltype))
8989 return false;
8990 middleop = gimple_assign_rhs1 (stmt);
8991 def_stmt = SSA_NAME_DEF_STMT (middleop);
8992 if (!is_gimple_assign (def_stmt)
8993 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)))
8994 return false;
8995 innerop = gimple_assign_rhs1 (def_stmt);
8996 if (TREE_CODE (innerop) != SSA_NAME
8997 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (innerop))
8998 return false;
8999
9000 /* Get the value-range of the inner operand. */
9001 innervr = get_value_range (innerop);
9002 if (innervr->type != VR_RANGE
9003 || TREE_CODE (innervr->min) != INTEGER_CST
9004 || TREE_CODE (innervr->max) != INTEGER_CST)
9005 return false;
9006
9007 /* Simulate the conversion chain to check if the result is equal if
9008 the middle conversion is removed. */
9009 innermin = tree_to_double_int (innervr->min);
9010 innermax = tree_to_double_int (innervr->max);
9011
9012 inner_prec = TYPE_PRECISION (TREE_TYPE (innerop));
9013 middle_prec = TYPE_PRECISION (TREE_TYPE (middleop));
9014 final_prec = TYPE_PRECISION (finaltype);
9015
9016 /* If the first conversion is not injective, the second must not
9017 be widening. */
9018 if ((innermax - innermin).ugt (double_int::mask (middle_prec))
9019 && middle_prec < final_prec)
9020 return false;
9021 /* We also want a medium value so that we can track the effect that
9022 narrowing conversions with sign change have. */
9023 inner_unsigned_p = TYPE_UNSIGNED (TREE_TYPE (innerop));
9024 if (inner_unsigned_p)
9025 innermed = double_int::mask (inner_prec).lrshift (1, inner_prec);
9026 else
9027 innermed = double_int_zero;
9028 if (innermin.cmp (innermed, inner_unsigned_p) >= 0
9029 || innermed.cmp (innermax, inner_unsigned_p) >= 0)
9030 innermed = innermin;
9031
9032 middle_unsigned_p = TYPE_UNSIGNED (TREE_TYPE (middleop));
9033 middlemin = innermin.ext (middle_prec, middle_unsigned_p);
9034 middlemed = innermed.ext (middle_prec, middle_unsigned_p);
9035 middlemax = innermax.ext (middle_prec, middle_unsigned_p);
9036
9037 /* Require that the final conversion applied to both the original
9038 and the intermediate range produces the same result. */
9039 final_unsigned_p = TYPE_UNSIGNED (finaltype);
9040 if (middlemin.ext (final_prec, final_unsigned_p)
9041 != innermin.ext (final_prec, final_unsigned_p)
9042 || middlemed.ext (final_prec, final_unsigned_p)
9043 != innermed.ext (final_prec, final_unsigned_p)
9044 || middlemax.ext (final_prec, final_unsigned_p)
9045 != innermax.ext (final_prec, final_unsigned_p))
9046 return false;
9047
9048 gimple_assign_set_rhs1 (stmt, innerop);
9049 update_stmt (stmt);
9050 return true;
9051 }
9052
9053 /* Simplify a conversion from integral SSA name to float in STMT. */
9054
9055 static bool
9056 simplify_float_conversion_using_ranges (gimple_stmt_iterator *gsi, gimple stmt)
9057 {
9058 tree rhs1 = gimple_assign_rhs1 (stmt);
9059 value_range_t *vr = get_value_range (rhs1);
9060 enum machine_mode fltmode = TYPE_MODE (TREE_TYPE (gimple_assign_lhs (stmt)));
9061 enum machine_mode mode;
9062 tree tem;
9063 gimple conv;
9064
9065 /* We can only handle constant ranges. */
9066 if (vr->type != VR_RANGE
9067 || TREE_CODE (vr->min) != INTEGER_CST
9068 || TREE_CODE (vr->max) != INTEGER_CST)
9069 return false;
9070
9071 /* First check if we can use a signed type in place of an unsigned. */
9072 if (TYPE_UNSIGNED (TREE_TYPE (rhs1))
9073 && (can_float_p (fltmode, TYPE_MODE (TREE_TYPE (rhs1)), 0)
9074 != CODE_FOR_nothing)
9075 && range_fits_type_p (vr, GET_MODE_PRECISION
9076 (TYPE_MODE (TREE_TYPE (rhs1))), 0))
9077 mode = TYPE_MODE (TREE_TYPE (rhs1));
9078 /* If we can do the conversion in the current input mode do nothing. */
9079 else if (can_float_p (fltmode, TYPE_MODE (TREE_TYPE (rhs1)),
9080 TYPE_UNSIGNED (TREE_TYPE (rhs1))) != CODE_FOR_nothing)
9081 return false;
9082 /* Otherwise search for a mode we can use, starting from the narrowest
9083 integer mode available. */
9084 else
9085 {
9086 mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
9087 do
9088 {
9089 /* If we cannot do a signed conversion to float from mode
9090 or if the value-range does not fit in the signed type
9091 try with a wider mode. */
9092 if (can_float_p (fltmode, mode, 0) != CODE_FOR_nothing
9093 && range_fits_type_p (vr, GET_MODE_PRECISION (mode), 0))
9094 break;
9095
9096 mode = GET_MODE_WIDER_MODE (mode);
9097 /* But do not widen the input. Instead leave that to the
9098 optabs expansion code. */
9099 if (GET_MODE_PRECISION (mode) > TYPE_PRECISION (TREE_TYPE (rhs1)))
9100 return false;
9101 }
9102 while (mode != VOIDmode);
9103 if (mode == VOIDmode)
9104 return false;
9105 }
9106
9107 /* It works, insert a truncation or sign-change before the
9108 float conversion. */
9109 tem = make_ssa_name (build_nonstandard_integer_type
9110 (GET_MODE_PRECISION (mode), 0), NULL);
9111 conv = gimple_build_assign_with_ops (NOP_EXPR, tem, rhs1, NULL_TREE);
9112 gsi_insert_before (gsi, conv, GSI_SAME_STMT);
9113 gimple_assign_set_rhs1 (stmt, tem);
9114 update_stmt (stmt);
9115
9116 return true;
9117 }
9118
9119 /* Simplify STMT using ranges if possible. */
9120
9121 static bool
9122 simplify_stmt_using_ranges (gimple_stmt_iterator *gsi)
9123 {
9124 gimple stmt = gsi_stmt (*gsi);
9125 if (is_gimple_assign (stmt))
9126 {
9127 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
9128 tree rhs1 = gimple_assign_rhs1 (stmt);
9129
9130 switch (rhs_code)
9131 {
9132 case EQ_EXPR:
9133 case NE_EXPR:
9134 /* Transform EQ_EXPR, NE_EXPR into BIT_XOR_EXPR or identity
9135 if the RHS is zero or one, and the LHS are known to be boolean
9136 values. */
9137 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
9138 return simplify_truth_ops_using_ranges (gsi, stmt);
9139 break;
9140
9141 /* Transform TRUNC_DIV_EXPR and TRUNC_MOD_EXPR into RSHIFT_EXPR
9142 and BIT_AND_EXPR respectively if the first operand is greater
9143 than zero and the second operand is an exact power of two. */
9144 case TRUNC_DIV_EXPR:
9145 case TRUNC_MOD_EXPR:
9146 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
9147 && integer_pow2p (gimple_assign_rhs2 (stmt)))
9148 return simplify_div_or_mod_using_ranges (stmt);
9149 break;
9150
9151 /* Transform ABS (X) into X or -X as appropriate. */
9152 case ABS_EXPR:
9153 if (TREE_CODE (rhs1) == SSA_NAME
9154 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
9155 return simplify_abs_using_ranges (stmt);
9156 break;
9157
9158 case BIT_AND_EXPR:
9159 case BIT_IOR_EXPR:
9160 /* Optimize away BIT_AND_EXPR and BIT_IOR_EXPR
9161 if all the bits being cleared are already cleared or
9162 all the bits being set are already set. */
9163 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
9164 return simplify_bit_ops_using_ranges (gsi, stmt);
9165 break;
9166
9167 CASE_CONVERT:
9168 if (TREE_CODE (rhs1) == SSA_NAME
9169 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
9170 return simplify_conversion_using_ranges (stmt);
9171 break;
9172
9173 case FLOAT_EXPR:
9174 if (TREE_CODE (rhs1) == SSA_NAME
9175 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
9176 return simplify_float_conversion_using_ranges (gsi, stmt);
9177 break;
9178
9179 default:
9180 break;
9181 }
9182 }
9183 else if (gimple_code (stmt) == GIMPLE_COND)
9184 return simplify_cond_using_ranges (stmt);
9185 else if (gimple_code (stmt) == GIMPLE_SWITCH)
9186 return simplify_switch_using_ranges (stmt);
9187
9188 return false;
9189 }
9190
9191 /* If the statement pointed by SI has a predicate whose value can be
9192 computed using the value range information computed by VRP, compute
9193 its value and return true. Otherwise, return false. */
9194
9195 static bool
9196 fold_predicate_in (gimple_stmt_iterator *si)
9197 {
9198 bool assignment_p = false;
9199 tree val;
9200 gimple stmt = gsi_stmt (*si);
9201
9202 if (is_gimple_assign (stmt)
9203 && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison)
9204 {
9205 assignment_p = true;
9206 val = vrp_evaluate_conditional (gimple_assign_rhs_code (stmt),
9207 gimple_assign_rhs1 (stmt),
9208 gimple_assign_rhs2 (stmt),
9209 stmt);
9210 }
9211 else if (gimple_code (stmt) == GIMPLE_COND)
9212 val = vrp_evaluate_conditional (gimple_cond_code (stmt),
9213 gimple_cond_lhs (stmt),
9214 gimple_cond_rhs (stmt),
9215 stmt);
9216 else
9217 return false;
9218
9219 if (val)
9220 {
9221 if (assignment_p)
9222 val = fold_convert (gimple_expr_type (stmt), val);
9223
9224 if (dump_file)
9225 {
9226 fprintf (dump_file, "Folding predicate ");
9227 print_gimple_expr (dump_file, stmt, 0, 0);
9228 fprintf (dump_file, " to ");
9229 print_generic_expr (dump_file, val, 0);
9230 fprintf (dump_file, "\n");
9231 }
9232
9233 if (is_gimple_assign (stmt))
9234 gimple_assign_set_rhs_from_tree (si, val);
9235 else
9236 {
9237 gcc_assert (gimple_code (stmt) == GIMPLE_COND);
9238 if (integer_zerop (val))
9239 gimple_cond_make_false (stmt);
9240 else if (integer_onep (val))
9241 gimple_cond_make_true (stmt);
9242 else
9243 gcc_unreachable ();
9244 }
9245
9246 return true;
9247 }
9248
9249 return false;
9250 }
9251
9252 /* Callback for substitute_and_fold folding the stmt at *SI. */
9253
9254 static bool
9255 vrp_fold_stmt (gimple_stmt_iterator *si)
9256 {
9257 if (fold_predicate_in (si))
9258 return true;
9259
9260 return simplify_stmt_using_ranges (si);
9261 }
9262
9263 /* Stack of dest,src equivalency pairs that need to be restored after
9264 each attempt to thread a block's incoming edge to an outgoing edge.
9265
9266 A NULL entry is used to mark the end of pairs which need to be
9267 restored. */
9268 static vec<tree> equiv_stack;
9269
9270 /* A trivial wrapper so that we can present the generic jump threading
9271 code with a simple API for simplifying statements. STMT is the
9272 statement we want to simplify, WITHIN_STMT provides the location
9273 for any overflow warnings. */
9274
9275 static tree
9276 simplify_stmt_for_jump_threading (gimple stmt, gimple within_stmt)
9277 {
9278 if (gimple_code (stmt) == GIMPLE_COND)
9279 return vrp_evaluate_conditional (gimple_cond_code (stmt),
9280 gimple_cond_lhs (stmt),
9281 gimple_cond_rhs (stmt), within_stmt);
9282
9283 if (gimple_code (stmt) == GIMPLE_ASSIGN)
9284 {
9285 value_range_t new_vr = VR_INITIALIZER;
9286 tree lhs = gimple_assign_lhs (stmt);
9287
9288 if (TREE_CODE (lhs) == SSA_NAME
9289 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
9290 || POINTER_TYPE_P (TREE_TYPE (lhs))))
9291 {
9292 extract_range_from_assignment (&new_vr, stmt);
9293 if (range_int_cst_singleton_p (&new_vr))
9294 return new_vr.min;
9295 }
9296 }
9297
9298 return NULL_TREE;
9299 }
9300
9301 /* Blocks which have more than one predecessor and more than
9302 one successor present jump threading opportunities, i.e.,
9303 when the block is reached from a specific predecessor, we
9304 may be able to determine which of the outgoing edges will
9305 be traversed. When this optimization applies, we are able
9306 to avoid conditionals at runtime and we may expose secondary
9307 optimization opportunities.
9308
9309 This routine is effectively a driver for the generic jump
9310 threading code. It basically just presents the generic code
9311 with edges that may be suitable for jump threading.
9312
9313 Unlike DOM, we do not iterate VRP if jump threading was successful.
9314 While iterating may expose new opportunities for VRP, it is expected
9315 those opportunities would be very limited and the compile time cost
9316 to expose those opportunities would be significant.
9317
9318 As jump threading opportunities are discovered, they are registered
9319 for later realization. */
9320
9321 static void
9322 identify_jump_threads (void)
9323 {
9324 basic_block bb;
9325 gimple dummy;
9326 int i;
9327 edge e;
9328
9329 /* Ugh. When substituting values earlier in this pass we can
9330 wipe the dominance information. So rebuild the dominator
9331 information as we need it within the jump threading code. */
9332 calculate_dominance_info (CDI_DOMINATORS);
9333
9334 /* We do not allow VRP information to be used for jump threading
9335 across a back edge in the CFG. Otherwise it becomes too
9336 difficult to avoid eliminating loop exit tests. Of course
9337 EDGE_DFS_BACK is not accurate at this time so we have to
9338 recompute it. */
9339 mark_dfs_back_edges ();
9340
9341 /* Do not thread across edges we are about to remove. Just marking
9342 them as EDGE_DFS_BACK will do. */
9343 FOR_EACH_VEC_ELT (to_remove_edges, i, e)
9344 e->flags |= EDGE_DFS_BACK;
9345
9346 /* Allocate our unwinder stack to unwind any temporary equivalences
9347 that might be recorded. */
9348 equiv_stack.create (20);
9349
9350 /* To avoid lots of silly node creation, we create a single
9351 conditional and just modify it in-place when attempting to
9352 thread jumps. */
9353 dummy = gimple_build_cond (EQ_EXPR,
9354 integer_zero_node, integer_zero_node,
9355 NULL, NULL);
9356
9357 /* Walk through all the blocks finding those which present a
9358 potential jump threading opportunity. We could set this up
9359 as a dominator walker and record data during the walk, but
9360 I doubt it's worth the effort for the classes of jump
9361 threading opportunities we are trying to identify at this
9362 point in compilation. */
9363 FOR_EACH_BB (bb)
9364 {
9365 gimple last;
9366
9367 /* If the generic jump threading code does not find this block
9368 interesting, then there is nothing to do. */
9369 if (! potentially_threadable_block (bb))
9370 continue;
9371
9372 /* We only care about blocks ending in a COND_EXPR. While there
9373 may be some value in handling SWITCH_EXPR here, I doubt it's
9374 terribly important. */
9375 last = gsi_stmt (gsi_last_bb (bb));
9376
9377 /* We're basically looking for a switch or any kind of conditional with
9378 integral or pointer type arguments. Note the type of the second
9379 argument will be the same as the first argument, so no need to
9380 check it explicitly. */
9381 if (gimple_code (last) == GIMPLE_SWITCH
9382 || (gimple_code (last) == GIMPLE_COND
9383 && TREE_CODE (gimple_cond_lhs (last)) == SSA_NAME
9384 && (INTEGRAL_TYPE_P (TREE_TYPE (gimple_cond_lhs (last)))
9385 || POINTER_TYPE_P (TREE_TYPE (gimple_cond_lhs (last))))
9386 && (TREE_CODE (gimple_cond_rhs (last)) == SSA_NAME
9387 || is_gimple_min_invariant (gimple_cond_rhs (last)))))
9388 {
9389 edge_iterator ei;
9390
9391 /* We've got a block with multiple predecessors and multiple
9392 successors which also ends in a suitable conditional or
9393 switch statement. For each predecessor, see if we can thread
9394 it to a specific successor. */
9395 FOR_EACH_EDGE (e, ei, bb->preds)
9396 {
9397 /* Do not thread across back edges or abnormal edges
9398 in the CFG. */
9399 if (e->flags & (EDGE_DFS_BACK | EDGE_COMPLEX))
9400 continue;
9401
9402 thread_across_edge (dummy, e, true, &equiv_stack,
9403 simplify_stmt_for_jump_threading);
9404 }
9405 }
9406 }
9407
9408 /* We do not actually update the CFG or SSA graphs at this point as
9409 ASSERT_EXPRs are still in the IL and cfg cleanup code does not yet
9410 handle ASSERT_EXPRs gracefully. */
9411 }
9412
9413 /* We identified all the jump threading opportunities earlier, but could
9414 not transform the CFG at that time. This routine transforms the
9415 CFG and arranges for the dominator tree to be rebuilt if necessary.
9416
9417 Note the SSA graph update will occur during the normal TODO
9418 processing by the pass manager. */
9419 static void
9420 finalize_jump_threads (void)
9421 {
9422 thread_through_all_blocks (false);
9423 equiv_stack.release ();
9424 }
9425
9426
9427 /* Traverse all the blocks folding conditionals with known ranges. */
9428
9429 static void
9430 vrp_finalize (void)
9431 {
9432 size_t i;
9433
9434 values_propagated = true;
9435
9436 if (dump_file)
9437 {
9438 fprintf (dump_file, "\nValue ranges after VRP:\n\n");
9439 dump_all_value_ranges (dump_file);
9440 fprintf (dump_file, "\n");
9441 }
9442
9443 substitute_and_fold (op_with_constant_singleton_value_range,
9444 vrp_fold_stmt, false);
9445
9446 if (warn_array_bounds)
9447 check_all_array_refs ();
9448
9449 /* We must identify jump threading opportunities before we release
9450 the datastructures built by VRP. */
9451 identify_jump_threads ();
9452
9453 /* Set value range to non pointer SSA_NAMEs. */
9454 for (i = 0; i < num_vr_values; i++)
9455 if (vr_value[i])
9456 {
9457 tree name = ssa_name (i);
9458
9459 if (!name
9460 || POINTER_TYPE_P (TREE_TYPE (name))
9461 || (vr_value[i]->type == VR_VARYING)
9462 || (vr_value[i]->type == VR_UNDEFINED))
9463 continue;
9464
9465 if ((TREE_CODE (vr_value[i]->min) == INTEGER_CST)
9466 && (TREE_CODE (vr_value[i]->max) == INTEGER_CST))
9467 {
9468 if (vr_value[i]->type == VR_RANGE)
9469 set_range_info (name,
9470 tree_to_double_int (vr_value[i]->min),
9471 tree_to_double_int (vr_value[i]->max));
9472 else if (vr_value[i]->type == VR_ANTI_RANGE)
9473 {
9474 /* VR_ANTI_RANGE ~[min, max] is encoded compactly as
9475 [max + 1, min - 1] without additional attributes.
9476 When min value > max value, we know that it is
9477 VR_ANTI_RANGE; it is VR_RANGE otherwise. */
9478
9479 /* ~[0,0] anti-range is represented as
9480 range. */
9481 if (TYPE_UNSIGNED (TREE_TYPE (name))
9482 && integer_zerop (vr_value[i]->min)
9483 && integer_zerop (vr_value[i]->max))
9484 set_range_info (name,
9485 double_int_one,
9486 double_int::max_value
9487 (TYPE_PRECISION (TREE_TYPE (name)), true));
9488 else
9489 set_range_info (name,
9490 tree_to_double_int (vr_value[i]->max)
9491 + double_int_one,
9492 tree_to_double_int (vr_value[i]->min)
9493 - double_int_one);
9494 }
9495 }
9496 }
9497
9498 /* Free allocated memory. */
9499 for (i = 0; i < num_vr_values; i++)
9500 if (vr_value[i])
9501 {
9502 BITMAP_FREE (vr_value[i]->equiv);
9503 free (vr_value[i]);
9504 }
9505
9506 free (vr_value);
9507 free (vr_phi_edge_counts);
9508
9509 /* So that we can distinguish between VRP data being available
9510 and not available. */
9511 vr_value = NULL;
9512 vr_phi_edge_counts = NULL;
9513 }
9514
9515
9516 /* Main entry point to VRP (Value Range Propagation). This pass is
9517 loosely based on J. R. C. Patterson, ``Accurate Static Branch
9518 Prediction by Value Range Propagation,'' in SIGPLAN Conference on
9519 Programming Language Design and Implementation, pp. 67-78, 1995.
9520 Also available at http://citeseer.ist.psu.edu/patterson95accurate.html
9521
9522 This is essentially an SSA-CCP pass modified to deal with ranges
9523 instead of constants.
9524
9525 While propagating ranges, we may find that two or more SSA name
9526 have equivalent, though distinct ranges. For instance,
9527
9528 1 x_9 = p_3->a;
9529 2 p_4 = ASSERT_EXPR <p_3, p_3 != 0>
9530 3 if (p_4 == q_2)
9531 4 p_5 = ASSERT_EXPR <p_4, p_4 == q_2>;
9532 5 endif
9533 6 if (q_2)
9534
9535 In the code above, pointer p_5 has range [q_2, q_2], but from the
9536 code we can also determine that p_5 cannot be NULL and, if q_2 had
9537 a non-varying range, p_5's range should also be compatible with it.
9538
9539 These equivalences are created by two expressions: ASSERT_EXPR and
9540 copy operations. Since p_5 is an assertion on p_4, and p_4 was the
9541 result of another assertion, then we can use the fact that p_5 and
9542 p_4 are equivalent when evaluating p_5's range.
9543
9544 Together with value ranges, we also propagate these equivalences
9545 between names so that we can take advantage of information from
9546 multiple ranges when doing final replacement. Note that this
9547 equivalency relation is transitive but not symmetric.
9548
9549 In the example above, p_5 is equivalent to p_4, q_2 and p_3, but we
9550 cannot assert that q_2 is equivalent to p_5 because q_2 may be used
9551 in contexts where that assertion does not hold (e.g., in line 6).
9552
9553 TODO, the main difference between this pass and Patterson's is that
9554 we do not propagate edge probabilities. We only compute whether
9555 edges can be taken or not. That is, instead of having a spectrum
9556 of jump probabilities between 0 and 1, we only deal with 0, 1 and
9557 DON'T KNOW. In the future, it may be worthwhile to propagate
9558 probabilities to aid branch prediction. */
9559
9560 static unsigned int
9561 execute_vrp (void)
9562 {
9563 int i;
9564 edge e;
9565 switch_update *su;
9566
9567 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
9568 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
9569 scev_initialize ();
9570
9571 /* ??? This ends up using stale EDGE_DFS_BACK for liveness computation.
9572 Inserting assertions may split edges which will invalidate
9573 EDGE_DFS_BACK. */
9574 insert_range_assertions ();
9575
9576 to_remove_edges.create (10);
9577 to_update_switch_stmts.create (5);
9578 threadedge_initialize_values ();
9579
9580 /* For visiting PHI nodes we need EDGE_DFS_BACK computed. */
9581 mark_dfs_back_edges ();
9582
9583 vrp_initialize ();
9584 ssa_propagate (vrp_visit_stmt, vrp_visit_phi_node);
9585 vrp_finalize ();
9586
9587 free_numbers_of_iterations_estimates ();
9588
9589 /* ASSERT_EXPRs must be removed before finalizing jump threads
9590 as finalizing jump threads calls the CFG cleanup code which
9591 does not properly handle ASSERT_EXPRs. */
9592 remove_range_assertions ();
9593
9594 /* If we exposed any new variables, go ahead and put them into
9595 SSA form now, before we handle jump threading. This simplifies
9596 interactions between rewriting of _DECL nodes into SSA form
9597 and rewriting SSA_NAME nodes into SSA form after block
9598 duplication and CFG manipulation. */
9599 update_ssa (TODO_update_ssa);
9600
9601 finalize_jump_threads ();
9602
9603 /* Remove dead edges from SWITCH_EXPR optimization. This leaves the
9604 CFG in a broken state and requires a cfg_cleanup run. */
9605 FOR_EACH_VEC_ELT (to_remove_edges, i, e)
9606 remove_edge (e);
9607 /* Update SWITCH_EXPR case label vector. */
9608 FOR_EACH_VEC_ELT (to_update_switch_stmts, i, su)
9609 {
9610 size_t j;
9611 size_t n = TREE_VEC_LENGTH (su->vec);
9612 tree label;
9613 gimple_switch_set_num_labels (su->stmt, n);
9614 for (j = 0; j < n; j++)
9615 gimple_switch_set_label (su->stmt, j, TREE_VEC_ELT (su->vec, j));
9616 /* As we may have replaced the default label with a regular one
9617 make sure to make it a real default label again. This ensures
9618 optimal expansion. */
9619 label = gimple_switch_label (su->stmt, 0);
9620 CASE_LOW (label) = NULL_TREE;
9621 CASE_HIGH (label) = NULL_TREE;
9622 }
9623
9624 if (to_remove_edges.length () > 0)
9625 {
9626 free_dominance_info (CDI_DOMINATORS);
9627 if (current_loops)
9628 loops_state_set (LOOPS_NEED_FIXUP);
9629 }
9630
9631 to_remove_edges.release ();
9632 to_update_switch_stmts.release ();
9633 threadedge_finalize_values ();
9634
9635 scev_finalize ();
9636 loop_optimizer_finalize ();
9637 return 0;
9638 }
9639
9640 static bool
9641 gate_vrp (void)
9642 {
9643 return flag_tree_vrp != 0;
9644 }
9645
9646 namespace {
9647
9648 const pass_data pass_data_vrp =
9649 {
9650 GIMPLE_PASS, /* type */
9651 "vrp", /* name */
9652 OPTGROUP_NONE, /* optinfo_flags */
9653 true, /* has_gate */
9654 true, /* has_execute */
9655 TV_TREE_VRP, /* tv_id */
9656 PROP_ssa, /* properties_required */
9657 0, /* properties_provided */
9658 0, /* properties_destroyed */
9659 0, /* todo_flags_start */
9660 ( TODO_cleanup_cfg | TODO_update_ssa
9661 | TODO_verify_ssa
9662 | TODO_verify_flow ), /* todo_flags_finish */
9663 };
9664
9665 class pass_vrp : public gimple_opt_pass
9666 {
9667 public:
9668 pass_vrp (gcc::context *ctxt)
9669 : gimple_opt_pass (pass_data_vrp, ctxt)
9670 {}
9671
9672 /* opt_pass methods: */
9673 opt_pass * clone () { return new pass_vrp (ctxt_); }
9674 bool gate () { return gate_vrp (); }
9675 unsigned int execute () { return execute_vrp (); }
9676
9677 }; // class pass_vrp
9678
9679 } // anon namespace
9680
9681 gimple_opt_pass *
9682 make_pass_vrp (gcc::context *ctxt)
9683 {
9684 return new pass_vrp (ctxt);
9685 }