tree-flow.h: Remove some prototypes.
[gcc.git] / gcc / tree-vrp.c
1 /* Support routines for Value Range Propagation (VRP).
2 Copyright (C) 2005-2013 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "ggc.h"
26 #include "flags.h"
27 #include "tree.h"
28 #include "basic-block.h"
29 #include "tree-ssa.h"
30 #include "tree-pass.h"
31 #include "tree-dump.h"
32 #include "gimple-pretty-print.h"
33 #include "diagnostic-core.h"
34 #include "intl.h"
35 #include "cfgloop.h"
36 #include "tree-scalar-evolution.h"
37 #include "tree-ssa-propagate.h"
38 #include "tree-chrec.h"
39 #include "tree-ssa-threadupdate.h"
40 #include "expr.h"
41 #include "optabs.h"
42
43
44
45 /* Range of values that can be associated with an SSA_NAME after VRP
46 has executed. */
47 struct value_range_d
48 {
49 /* Lattice value represented by this range. */
50 enum value_range_type type;
51
52 /* Minimum and maximum values represented by this range. These
53 values should be interpreted as follows:
54
55 - If TYPE is VR_UNDEFINED or VR_VARYING then MIN and MAX must
56 be NULL.
57
58 - If TYPE == VR_RANGE then MIN holds the minimum value and
59 MAX holds the maximum value of the range [MIN, MAX].
60
61 - If TYPE == ANTI_RANGE the variable is known to NOT
62 take any values in the range [MIN, MAX]. */
63 tree min;
64 tree max;
65
66 /* Set of SSA names whose value ranges are equivalent to this one.
67 This set is only valid when TYPE is VR_RANGE or VR_ANTI_RANGE. */
68 bitmap equiv;
69 };
70
71 typedef struct value_range_d value_range_t;
72
73 #define VR_INITIALIZER { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL }
74
75 /* Set of SSA names found live during the RPO traversal of the function
76 for still active basic-blocks. */
77 static sbitmap *live;
78
79 /* Return true if the SSA name NAME is live on the edge E. */
80
81 static bool
82 live_on_edge (edge e, tree name)
83 {
84 return (live[e->dest->index]
85 && bitmap_bit_p (live[e->dest->index], SSA_NAME_VERSION (name)));
86 }
87
88 /* Local functions. */
89 static int compare_values (tree val1, tree val2);
90 static int compare_values_warnv (tree val1, tree val2, bool *);
91 static void vrp_meet (value_range_t *, value_range_t *);
92 static void vrp_intersect_ranges (value_range_t *, value_range_t *);
93 static tree vrp_evaluate_conditional_warnv_with_ops (enum tree_code,
94 tree, tree, bool, bool *,
95 bool *);
96
97 /* Location information for ASSERT_EXPRs. Each instance of this
98 structure describes an ASSERT_EXPR for an SSA name. Since a single
99 SSA name may have more than one assertion associated with it, these
100 locations are kept in a linked list attached to the corresponding
101 SSA name. */
102 struct assert_locus_d
103 {
104 /* Basic block where the assertion would be inserted. */
105 basic_block bb;
106
107 /* Some assertions need to be inserted on an edge (e.g., assertions
108 generated by COND_EXPRs). In those cases, BB will be NULL. */
109 edge e;
110
111 /* Pointer to the statement that generated this assertion. */
112 gimple_stmt_iterator si;
113
114 /* Predicate code for the ASSERT_EXPR. Must be COMPARISON_CLASS_P. */
115 enum tree_code comp_code;
116
117 /* Value being compared against. */
118 tree val;
119
120 /* Expression to compare. */
121 tree expr;
122
123 /* Next node in the linked list. */
124 struct assert_locus_d *next;
125 };
126
127 typedef struct assert_locus_d *assert_locus_t;
128
129 /* If bit I is present, it means that SSA name N_i has a list of
130 assertions that should be inserted in the IL. */
131 static bitmap need_assert_for;
132
133 /* Array of locations lists where to insert assertions. ASSERTS_FOR[I]
134 holds a list of ASSERT_LOCUS_T nodes that describe where
135 ASSERT_EXPRs for SSA name N_I should be inserted. */
136 static assert_locus_t *asserts_for;
137
138 /* Value range array. After propagation, VR_VALUE[I] holds the range
139 of values that SSA name N_I may take. */
140 static unsigned num_vr_values;
141 static value_range_t **vr_value;
142 static bool values_propagated;
143
144 /* For a PHI node which sets SSA name N_I, VR_COUNTS[I] holds the
145 number of executable edges we saw the last time we visited the
146 node. */
147 static int *vr_phi_edge_counts;
148
149 typedef struct {
150 gimple stmt;
151 tree vec;
152 } switch_update;
153
154 static vec<edge> to_remove_edges;
155 static vec<switch_update> to_update_switch_stmts;
156
157
158 /* Return the maximum value for TYPE. */
159
160 static inline tree
161 vrp_val_max (const_tree type)
162 {
163 if (!INTEGRAL_TYPE_P (type))
164 return NULL_TREE;
165
166 return TYPE_MAX_VALUE (type);
167 }
168
169 /* Return the minimum value for TYPE. */
170
171 static inline tree
172 vrp_val_min (const_tree type)
173 {
174 if (!INTEGRAL_TYPE_P (type))
175 return NULL_TREE;
176
177 return TYPE_MIN_VALUE (type);
178 }
179
180 /* Return whether VAL is equal to the maximum value of its type. This
181 will be true for a positive overflow infinity. We can't do a
182 simple equality comparison with TYPE_MAX_VALUE because C typedefs
183 and Ada subtypes can produce types whose TYPE_MAX_VALUE is not ==
184 to the integer constant with the same value in the type. */
185
186 static inline bool
187 vrp_val_is_max (const_tree val)
188 {
189 tree type_max = vrp_val_max (TREE_TYPE (val));
190 return (val == type_max
191 || (type_max != NULL_TREE
192 && operand_equal_p (val, type_max, 0)));
193 }
194
195 /* Return whether VAL is equal to the minimum value of its type. This
196 will be true for a negative overflow infinity. */
197
198 static inline bool
199 vrp_val_is_min (const_tree val)
200 {
201 tree type_min = vrp_val_min (TREE_TYPE (val));
202 return (val == type_min
203 || (type_min != NULL_TREE
204 && operand_equal_p (val, type_min, 0)));
205 }
206
207
208 /* Return whether TYPE should use an overflow infinity distinct from
209 TYPE_{MIN,MAX}_VALUE. We use an overflow infinity value to
210 represent a signed overflow during VRP computations. An infinity
211 is distinct from a half-range, which will go from some number to
212 TYPE_{MIN,MAX}_VALUE. */
213
214 static inline bool
215 needs_overflow_infinity (const_tree type)
216 {
217 return INTEGRAL_TYPE_P (type) && !TYPE_OVERFLOW_WRAPS (type);
218 }
219
220 /* Return whether TYPE can support our overflow infinity
221 representation: we use the TREE_OVERFLOW flag, which only exists
222 for constants. If TYPE doesn't support this, we don't optimize
223 cases which would require signed overflow--we drop them to
224 VARYING. */
225
226 static inline bool
227 supports_overflow_infinity (const_tree type)
228 {
229 tree min = vrp_val_min (type), max = vrp_val_max (type);
230 #ifdef ENABLE_CHECKING
231 gcc_assert (needs_overflow_infinity (type));
232 #endif
233 return (min != NULL_TREE
234 && CONSTANT_CLASS_P (min)
235 && max != NULL_TREE
236 && CONSTANT_CLASS_P (max));
237 }
238
239 /* VAL is the maximum or minimum value of a type. Return a
240 corresponding overflow infinity. */
241
242 static inline tree
243 make_overflow_infinity (tree val)
244 {
245 gcc_checking_assert (val != NULL_TREE && CONSTANT_CLASS_P (val));
246 val = copy_node (val);
247 TREE_OVERFLOW (val) = 1;
248 return val;
249 }
250
251 /* Return a negative overflow infinity for TYPE. */
252
253 static inline tree
254 negative_overflow_infinity (tree type)
255 {
256 gcc_checking_assert (supports_overflow_infinity (type));
257 return make_overflow_infinity (vrp_val_min (type));
258 }
259
260 /* Return a positive overflow infinity for TYPE. */
261
262 static inline tree
263 positive_overflow_infinity (tree type)
264 {
265 gcc_checking_assert (supports_overflow_infinity (type));
266 return make_overflow_infinity (vrp_val_max (type));
267 }
268
269 /* Return whether VAL is a negative overflow infinity. */
270
271 static inline bool
272 is_negative_overflow_infinity (const_tree val)
273 {
274 return (needs_overflow_infinity (TREE_TYPE (val))
275 && CONSTANT_CLASS_P (val)
276 && TREE_OVERFLOW (val)
277 && vrp_val_is_min (val));
278 }
279
280 /* Return whether VAL is a positive overflow infinity. */
281
282 static inline bool
283 is_positive_overflow_infinity (const_tree val)
284 {
285 return (needs_overflow_infinity (TREE_TYPE (val))
286 && CONSTANT_CLASS_P (val)
287 && TREE_OVERFLOW (val)
288 && vrp_val_is_max (val));
289 }
290
291 /* Return whether VAL is a positive or negative overflow infinity. */
292
293 static inline bool
294 is_overflow_infinity (const_tree val)
295 {
296 return (needs_overflow_infinity (TREE_TYPE (val))
297 && CONSTANT_CLASS_P (val)
298 && TREE_OVERFLOW (val)
299 && (vrp_val_is_min (val) || vrp_val_is_max (val)));
300 }
301
302 /* Return whether STMT has a constant rhs that is_overflow_infinity. */
303
304 static inline bool
305 stmt_overflow_infinity (gimple stmt)
306 {
307 if (is_gimple_assign (stmt)
308 && get_gimple_rhs_class (gimple_assign_rhs_code (stmt)) ==
309 GIMPLE_SINGLE_RHS)
310 return is_overflow_infinity (gimple_assign_rhs1 (stmt));
311 return false;
312 }
313
314 /* If VAL is now an overflow infinity, return VAL. Otherwise, return
315 the same value with TREE_OVERFLOW clear. This can be used to avoid
316 confusing a regular value with an overflow value. */
317
318 static inline tree
319 avoid_overflow_infinity (tree val)
320 {
321 if (!is_overflow_infinity (val))
322 return val;
323
324 if (vrp_val_is_max (val))
325 return vrp_val_max (TREE_TYPE (val));
326 else
327 {
328 gcc_checking_assert (vrp_val_is_min (val));
329 return vrp_val_min (TREE_TYPE (val));
330 }
331 }
332
333
334 /* Return true if ARG is marked with the nonnull attribute in the
335 current function signature. */
336
337 static bool
338 nonnull_arg_p (const_tree arg)
339 {
340 tree t, attrs, fntype;
341 unsigned HOST_WIDE_INT arg_num;
342
343 gcc_assert (TREE_CODE (arg) == PARM_DECL && POINTER_TYPE_P (TREE_TYPE (arg)));
344
345 /* The static chain decl is always non null. */
346 if (arg == cfun->static_chain_decl)
347 return true;
348
349 fntype = TREE_TYPE (current_function_decl);
350 for (attrs = TYPE_ATTRIBUTES (fntype); attrs; attrs = TREE_CHAIN (attrs))
351 {
352 attrs = lookup_attribute ("nonnull", attrs);
353
354 /* If "nonnull" wasn't specified, we know nothing about the argument. */
355 if (attrs == NULL_TREE)
356 return false;
357
358 /* If "nonnull" applies to all the arguments, then ARG is non-null. */
359 if (TREE_VALUE (attrs) == NULL_TREE)
360 return true;
361
362 /* Get the position number for ARG in the function signature. */
363 for (arg_num = 1, t = DECL_ARGUMENTS (current_function_decl);
364 t;
365 t = DECL_CHAIN (t), arg_num++)
366 {
367 if (t == arg)
368 break;
369 }
370
371 gcc_assert (t == arg);
372
373 /* Now see if ARG_NUM is mentioned in the nonnull list. */
374 for (t = TREE_VALUE (attrs); t; t = TREE_CHAIN (t))
375 {
376 if (compare_tree_int (TREE_VALUE (t), arg_num) == 0)
377 return true;
378 }
379 }
380
381 return false;
382 }
383
384
385 /* Set value range VR to VR_UNDEFINED. */
386
387 static inline void
388 set_value_range_to_undefined (value_range_t *vr)
389 {
390 vr->type = VR_UNDEFINED;
391 vr->min = vr->max = NULL_TREE;
392 if (vr->equiv)
393 bitmap_clear (vr->equiv);
394 }
395
396
397 /* Set value range VR to VR_VARYING. */
398
399 static inline void
400 set_value_range_to_varying (value_range_t *vr)
401 {
402 vr->type = VR_VARYING;
403 vr->min = vr->max = NULL_TREE;
404 if (vr->equiv)
405 bitmap_clear (vr->equiv);
406 }
407
408
409 /* Set value range VR to {T, MIN, MAX, EQUIV}. */
410
411 static void
412 set_value_range (value_range_t *vr, enum value_range_type t, tree min,
413 tree max, bitmap equiv)
414 {
415 #if defined ENABLE_CHECKING
416 /* Check the validity of the range. */
417 if (t == VR_RANGE || t == VR_ANTI_RANGE)
418 {
419 int cmp;
420
421 gcc_assert (min && max);
422
423 if (INTEGRAL_TYPE_P (TREE_TYPE (min)) && t == VR_ANTI_RANGE)
424 gcc_assert (!vrp_val_is_min (min) || !vrp_val_is_max (max));
425
426 cmp = compare_values (min, max);
427 gcc_assert (cmp == 0 || cmp == -1 || cmp == -2);
428
429 if (needs_overflow_infinity (TREE_TYPE (min)))
430 gcc_assert (!is_overflow_infinity (min)
431 || !is_overflow_infinity (max));
432 }
433
434 if (t == VR_UNDEFINED || t == VR_VARYING)
435 gcc_assert (min == NULL_TREE && max == NULL_TREE);
436
437 if (t == VR_UNDEFINED || t == VR_VARYING)
438 gcc_assert (equiv == NULL || bitmap_empty_p (equiv));
439 #endif
440
441 vr->type = t;
442 vr->min = min;
443 vr->max = max;
444
445 /* Since updating the equivalence set involves deep copying the
446 bitmaps, only do it if absolutely necessary. */
447 if (vr->equiv == NULL
448 && equiv != NULL)
449 vr->equiv = BITMAP_ALLOC (NULL);
450
451 if (equiv != vr->equiv)
452 {
453 if (equiv && !bitmap_empty_p (equiv))
454 bitmap_copy (vr->equiv, equiv);
455 else
456 bitmap_clear (vr->equiv);
457 }
458 }
459
460
461 /* Set value range VR to the canonical form of {T, MIN, MAX, EQUIV}.
462 This means adjusting T, MIN and MAX representing the case of a
463 wrapping range with MAX < MIN covering [MIN, type_max] U [type_min, MAX]
464 as anti-rage ~[MAX+1, MIN-1]. Likewise for wrapping anti-ranges.
465 In corner cases where MAX+1 or MIN-1 wraps this will fall back
466 to varying.
467 This routine exists to ease canonicalization in the case where we
468 extract ranges from var + CST op limit. */
469
470 static void
471 set_and_canonicalize_value_range (value_range_t *vr, enum value_range_type t,
472 tree min, tree max, bitmap equiv)
473 {
474 /* Use the canonical setters for VR_UNDEFINED and VR_VARYING. */
475 if (t == VR_UNDEFINED)
476 {
477 set_value_range_to_undefined (vr);
478 return;
479 }
480 else if (t == VR_VARYING)
481 {
482 set_value_range_to_varying (vr);
483 return;
484 }
485
486 /* Nothing to canonicalize for symbolic ranges. */
487 if (TREE_CODE (min) != INTEGER_CST
488 || TREE_CODE (max) != INTEGER_CST)
489 {
490 set_value_range (vr, t, min, max, equiv);
491 return;
492 }
493
494 /* Wrong order for min and max, to swap them and the VR type we need
495 to adjust them. */
496 if (tree_int_cst_lt (max, min))
497 {
498 tree one, tmp;
499
500 /* For one bit precision if max < min, then the swapped
501 range covers all values, so for VR_RANGE it is varying and
502 for VR_ANTI_RANGE empty range, so drop to varying as well. */
503 if (TYPE_PRECISION (TREE_TYPE (min)) == 1)
504 {
505 set_value_range_to_varying (vr);
506 return;
507 }
508
509 one = build_int_cst (TREE_TYPE (min), 1);
510 tmp = int_const_binop (PLUS_EXPR, max, one);
511 max = int_const_binop (MINUS_EXPR, min, one);
512 min = tmp;
513
514 /* There's one corner case, if we had [C+1, C] before we now have
515 that again. But this represents an empty value range, so drop
516 to varying in this case. */
517 if (tree_int_cst_lt (max, min))
518 {
519 set_value_range_to_varying (vr);
520 return;
521 }
522
523 t = t == VR_RANGE ? VR_ANTI_RANGE : VR_RANGE;
524 }
525
526 /* Anti-ranges that can be represented as ranges should be so. */
527 if (t == VR_ANTI_RANGE)
528 {
529 bool is_min = vrp_val_is_min (min);
530 bool is_max = vrp_val_is_max (max);
531
532 if (is_min && is_max)
533 {
534 /* We cannot deal with empty ranges, drop to varying.
535 ??? This could be VR_UNDEFINED instead. */
536 set_value_range_to_varying (vr);
537 return;
538 }
539 else if (TYPE_PRECISION (TREE_TYPE (min)) == 1
540 && (is_min || is_max))
541 {
542 /* Non-empty boolean ranges can always be represented
543 as a singleton range. */
544 if (is_min)
545 min = max = vrp_val_max (TREE_TYPE (min));
546 else
547 min = max = vrp_val_min (TREE_TYPE (min));
548 t = VR_RANGE;
549 }
550 else if (is_min
551 /* As a special exception preserve non-null ranges. */
552 && !(TYPE_UNSIGNED (TREE_TYPE (min))
553 && integer_zerop (max)))
554 {
555 tree one = build_int_cst (TREE_TYPE (max), 1);
556 min = int_const_binop (PLUS_EXPR, max, one);
557 max = vrp_val_max (TREE_TYPE (max));
558 t = VR_RANGE;
559 }
560 else if (is_max)
561 {
562 tree one = build_int_cst (TREE_TYPE (min), 1);
563 max = int_const_binop (MINUS_EXPR, min, one);
564 min = vrp_val_min (TREE_TYPE (min));
565 t = VR_RANGE;
566 }
567 }
568
569 /* Drop [-INF(OVF), +INF(OVF)] to varying. */
570 if (needs_overflow_infinity (TREE_TYPE (min))
571 && is_overflow_infinity (min)
572 && is_overflow_infinity (max))
573 {
574 set_value_range_to_varying (vr);
575 return;
576 }
577
578 set_value_range (vr, t, min, max, equiv);
579 }
580
581 /* Copy value range FROM into value range TO. */
582
583 static inline void
584 copy_value_range (value_range_t *to, value_range_t *from)
585 {
586 set_value_range (to, from->type, from->min, from->max, from->equiv);
587 }
588
589 /* Set value range VR to a single value. This function is only called
590 with values we get from statements, and exists to clear the
591 TREE_OVERFLOW flag so that we don't think we have an overflow
592 infinity when we shouldn't. */
593
594 static inline void
595 set_value_range_to_value (value_range_t *vr, tree val, bitmap equiv)
596 {
597 gcc_assert (is_gimple_min_invariant (val));
598 val = avoid_overflow_infinity (val);
599 set_value_range (vr, VR_RANGE, val, val, equiv);
600 }
601
602 /* Set value range VR to a non-negative range of type TYPE.
603 OVERFLOW_INFINITY indicates whether to use an overflow infinity
604 rather than TYPE_MAX_VALUE; this should be true if we determine
605 that the range is nonnegative based on the assumption that signed
606 overflow does not occur. */
607
608 static inline void
609 set_value_range_to_nonnegative (value_range_t *vr, tree type,
610 bool overflow_infinity)
611 {
612 tree zero;
613
614 if (overflow_infinity && !supports_overflow_infinity (type))
615 {
616 set_value_range_to_varying (vr);
617 return;
618 }
619
620 zero = build_int_cst (type, 0);
621 set_value_range (vr, VR_RANGE, zero,
622 (overflow_infinity
623 ? positive_overflow_infinity (type)
624 : TYPE_MAX_VALUE (type)),
625 vr->equiv);
626 }
627
628 /* Set value range VR to a non-NULL range of type TYPE. */
629
630 static inline void
631 set_value_range_to_nonnull (value_range_t *vr, tree type)
632 {
633 tree zero = build_int_cst (type, 0);
634 set_value_range (vr, VR_ANTI_RANGE, zero, zero, vr->equiv);
635 }
636
637
638 /* Set value range VR to a NULL range of type TYPE. */
639
640 static inline void
641 set_value_range_to_null (value_range_t *vr, tree type)
642 {
643 set_value_range_to_value (vr, build_int_cst (type, 0), vr->equiv);
644 }
645
646
647 /* Set value range VR to a range of a truthvalue of type TYPE. */
648
649 static inline void
650 set_value_range_to_truthvalue (value_range_t *vr, tree type)
651 {
652 if (TYPE_PRECISION (type) == 1)
653 set_value_range_to_varying (vr);
654 else
655 set_value_range (vr, VR_RANGE,
656 build_int_cst (type, 0), build_int_cst (type, 1),
657 vr->equiv);
658 }
659
660
661 /* If abs (min) < abs (max), set VR to [-max, max], if
662 abs (min) >= abs (max), set VR to [-min, min]. */
663
664 static void
665 abs_extent_range (value_range_t *vr, tree min, tree max)
666 {
667 int cmp;
668
669 gcc_assert (TREE_CODE (min) == INTEGER_CST);
670 gcc_assert (TREE_CODE (max) == INTEGER_CST);
671 gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (min)));
672 gcc_assert (!TYPE_UNSIGNED (TREE_TYPE (min)));
673 min = fold_unary (ABS_EXPR, TREE_TYPE (min), min);
674 max = fold_unary (ABS_EXPR, TREE_TYPE (max), max);
675 if (TREE_OVERFLOW (min) || TREE_OVERFLOW (max))
676 {
677 set_value_range_to_varying (vr);
678 return;
679 }
680 cmp = compare_values (min, max);
681 if (cmp == -1)
682 min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), max);
683 else if (cmp == 0 || cmp == 1)
684 {
685 max = min;
686 min = fold_unary (NEGATE_EXPR, TREE_TYPE (min), min);
687 }
688 else
689 {
690 set_value_range_to_varying (vr);
691 return;
692 }
693 set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL);
694 }
695
696
697 /* Return value range information for VAR.
698
699 If we have no values ranges recorded (ie, VRP is not running), then
700 return NULL. Otherwise create an empty range if none existed for VAR. */
701
702 static value_range_t *
703 get_value_range (const_tree var)
704 {
705 static const struct value_range_d vr_const_varying
706 = { VR_VARYING, NULL_TREE, NULL_TREE, NULL };
707 value_range_t *vr;
708 tree sym;
709 unsigned ver = SSA_NAME_VERSION (var);
710
711 /* If we have no recorded ranges, then return NULL. */
712 if (! vr_value)
713 return NULL;
714
715 /* If we query the range for a new SSA name return an unmodifiable VARYING.
716 We should get here at most from the substitute-and-fold stage which
717 will never try to change values. */
718 if (ver >= num_vr_values)
719 return CONST_CAST (value_range_t *, &vr_const_varying);
720
721 vr = vr_value[ver];
722 if (vr)
723 return vr;
724
725 /* After propagation finished do not allocate new value-ranges. */
726 if (values_propagated)
727 return CONST_CAST (value_range_t *, &vr_const_varying);
728
729 /* Create a default value range. */
730 vr_value[ver] = vr = XCNEW (value_range_t);
731
732 /* Defer allocating the equivalence set. */
733 vr->equiv = NULL;
734
735 /* If VAR is a default definition of a parameter, the variable can
736 take any value in VAR's type. */
737 if (SSA_NAME_IS_DEFAULT_DEF (var))
738 {
739 sym = SSA_NAME_VAR (var);
740 if (TREE_CODE (sym) == PARM_DECL)
741 {
742 /* Try to use the "nonnull" attribute to create ~[0, 0]
743 anti-ranges for pointers. Note that this is only valid with
744 default definitions of PARM_DECLs. */
745 if (POINTER_TYPE_P (TREE_TYPE (sym))
746 && nonnull_arg_p (sym))
747 set_value_range_to_nonnull (vr, TREE_TYPE (sym));
748 else
749 set_value_range_to_varying (vr);
750 }
751 else if (TREE_CODE (sym) == RESULT_DECL
752 && DECL_BY_REFERENCE (sym))
753 set_value_range_to_nonnull (vr, TREE_TYPE (sym));
754 }
755
756 return vr;
757 }
758
759 /* Return true, if VAL1 and VAL2 are equal values for VRP purposes. */
760
761 static inline bool
762 vrp_operand_equal_p (const_tree val1, const_tree val2)
763 {
764 if (val1 == val2)
765 return true;
766 if (!val1 || !val2 || !operand_equal_p (val1, val2, 0))
767 return false;
768 if (is_overflow_infinity (val1))
769 return is_overflow_infinity (val2);
770 return true;
771 }
772
773 /* Return true, if the bitmaps B1 and B2 are equal. */
774
775 static inline bool
776 vrp_bitmap_equal_p (const_bitmap b1, const_bitmap b2)
777 {
778 return (b1 == b2
779 || ((!b1 || bitmap_empty_p (b1))
780 && (!b2 || bitmap_empty_p (b2)))
781 || (b1 && b2
782 && bitmap_equal_p (b1, b2)));
783 }
784
785 /* Update the value range and equivalence set for variable VAR to
786 NEW_VR. Return true if NEW_VR is different from VAR's previous
787 value.
788
789 NOTE: This function assumes that NEW_VR is a temporary value range
790 object created for the sole purpose of updating VAR's range. The
791 storage used by the equivalence set from NEW_VR will be freed by
792 this function. Do not call update_value_range when NEW_VR
793 is the range object associated with another SSA name. */
794
795 static inline bool
796 update_value_range (const_tree var, value_range_t *new_vr)
797 {
798 value_range_t *old_vr;
799 bool is_new;
800
801 /* Update the value range, if necessary. */
802 old_vr = get_value_range (var);
803 is_new = old_vr->type != new_vr->type
804 || !vrp_operand_equal_p (old_vr->min, new_vr->min)
805 || !vrp_operand_equal_p (old_vr->max, new_vr->max)
806 || !vrp_bitmap_equal_p (old_vr->equiv, new_vr->equiv);
807
808 if (is_new)
809 {
810 /* Do not allow transitions up the lattice. The following
811 is slightly more awkward than just new_vr->type < old_vr->type
812 because VR_RANGE and VR_ANTI_RANGE need to be considered
813 the same. We may not have is_new when transitioning to
814 UNDEFINED or from VARYING. */
815 if (new_vr->type == VR_UNDEFINED
816 || old_vr->type == VR_VARYING)
817 set_value_range_to_varying (old_vr);
818 else
819 set_value_range (old_vr, new_vr->type, new_vr->min, new_vr->max,
820 new_vr->equiv);
821 }
822
823 BITMAP_FREE (new_vr->equiv);
824
825 return is_new;
826 }
827
828
829 /* Add VAR and VAR's equivalence set to EQUIV. This is the central
830 point where equivalence processing can be turned on/off. */
831
832 static void
833 add_equivalence (bitmap *equiv, const_tree var)
834 {
835 unsigned ver = SSA_NAME_VERSION (var);
836 value_range_t *vr = vr_value[ver];
837
838 if (*equiv == NULL)
839 *equiv = BITMAP_ALLOC (NULL);
840 bitmap_set_bit (*equiv, ver);
841 if (vr && vr->equiv)
842 bitmap_ior_into (*equiv, vr->equiv);
843 }
844
845
846 /* Return true if VR is ~[0, 0]. */
847
848 static inline bool
849 range_is_nonnull (value_range_t *vr)
850 {
851 return vr->type == VR_ANTI_RANGE
852 && integer_zerop (vr->min)
853 && integer_zerop (vr->max);
854 }
855
856
857 /* Return true if VR is [0, 0]. */
858
859 static inline bool
860 range_is_null (value_range_t *vr)
861 {
862 return vr->type == VR_RANGE
863 && integer_zerop (vr->min)
864 && integer_zerop (vr->max);
865 }
866
867 /* Return true if max and min of VR are INTEGER_CST. It's not necessary
868 a singleton. */
869
870 static inline bool
871 range_int_cst_p (value_range_t *vr)
872 {
873 return (vr->type == VR_RANGE
874 && TREE_CODE (vr->max) == INTEGER_CST
875 && TREE_CODE (vr->min) == INTEGER_CST);
876 }
877
878 /* Return true if VR is a INTEGER_CST singleton. */
879
880 static inline bool
881 range_int_cst_singleton_p (value_range_t *vr)
882 {
883 return (range_int_cst_p (vr)
884 && !TREE_OVERFLOW (vr->min)
885 && !TREE_OVERFLOW (vr->max)
886 && tree_int_cst_equal (vr->min, vr->max));
887 }
888
889 /* Return true if value range VR involves at least one symbol. */
890
891 static inline bool
892 symbolic_range_p (value_range_t *vr)
893 {
894 return (!is_gimple_min_invariant (vr->min)
895 || !is_gimple_min_invariant (vr->max));
896 }
897
898 /* Return true if value range VR uses an overflow infinity. */
899
900 static inline bool
901 overflow_infinity_range_p (value_range_t *vr)
902 {
903 return (vr->type == VR_RANGE
904 && (is_overflow_infinity (vr->min)
905 || is_overflow_infinity (vr->max)));
906 }
907
908 /* Return false if we can not make a valid comparison based on VR;
909 this will be the case if it uses an overflow infinity and overflow
910 is not undefined (i.e., -fno-strict-overflow is in effect).
911 Otherwise return true, and set *STRICT_OVERFLOW_P to true if VR
912 uses an overflow infinity. */
913
914 static bool
915 usable_range_p (value_range_t *vr, bool *strict_overflow_p)
916 {
917 gcc_assert (vr->type == VR_RANGE);
918 if (is_overflow_infinity (vr->min))
919 {
920 *strict_overflow_p = true;
921 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr->min)))
922 return false;
923 }
924 if (is_overflow_infinity (vr->max))
925 {
926 *strict_overflow_p = true;
927 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (vr->max)))
928 return false;
929 }
930 return true;
931 }
932
933
934 /* Return true if the result of assignment STMT is know to be non-negative.
935 If the return value is based on the assumption that signed overflow is
936 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
937 *STRICT_OVERFLOW_P.*/
938
939 static bool
940 gimple_assign_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p)
941 {
942 enum tree_code code = gimple_assign_rhs_code (stmt);
943 switch (get_gimple_rhs_class (code))
944 {
945 case GIMPLE_UNARY_RHS:
946 return tree_unary_nonnegative_warnv_p (gimple_assign_rhs_code (stmt),
947 gimple_expr_type (stmt),
948 gimple_assign_rhs1 (stmt),
949 strict_overflow_p);
950 case GIMPLE_BINARY_RHS:
951 return tree_binary_nonnegative_warnv_p (gimple_assign_rhs_code (stmt),
952 gimple_expr_type (stmt),
953 gimple_assign_rhs1 (stmt),
954 gimple_assign_rhs2 (stmt),
955 strict_overflow_p);
956 case GIMPLE_TERNARY_RHS:
957 return false;
958 case GIMPLE_SINGLE_RHS:
959 return tree_single_nonnegative_warnv_p (gimple_assign_rhs1 (stmt),
960 strict_overflow_p);
961 case GIMPLE_INVALID_RHS:
962 gcc_unreachable ();
963 default:
964 gcc_unreachable ();
965 }
966 }
967
968 /* Return true if return value of call STMT is know to be non-negative.
969 If the return value is based on the assumption that signed overflow is
970 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
971 *STRICT_OVERFLOW_P.*/
972
973 static bool
974 gimple_call_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p)
975 {
976 tree arg0 = gimple_call_num_args (stmt) > 0 ?
977 gimple_call_arg (stmt, 0) : NULL_TREE;
978 tree arg1 = gimple_call_num_args (stmt) > 1 ?
979 gimple_call_arg (stmt, 1) : NULL_TREE;
980
981 return tree_call_nonnegative_warnv_p (gimple_expr_type (stmt),
982 gimple_call_fndecl (stmt),
983 arg0,
984 arg1,
985 strict_overflow_p);
986 }
987
988 /* Return true if STMT is know to to compute a non-negative value.
989 If the return value is based on the assumption that signed overflow is
990 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
991 *STRICT_OVERFLOW_P.*/
992
993 static bool
994 gimple_stmt_nonnegative_warnv_p (gimple stmt, bool *strict_overflow_p)
995 {
996 switch (gimple_code (stmt))
997 {
998 case GIMPLE_ASSIGN:
999 return gimple_assign_nonnegative_warnv_p (stmt, strict_overflow_p);
1000 case GIMPLE_CALL:
1001 return gimple_call_nonnegative_warnv_p (stmt, strict_overflow_p);
1002 default:
1003 gcc_unreachable ();
1004 }
1005 }
1006
1007 /* Return true if the result of assignment STMT is know to be non-zero.
1008 If the return value is based on the assumption that signed overflow is
1009 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
1010 *STRICT_OVERFLOW_P.*/
1011
1012 static bool
1013 gimple_assign_nonzero_warnv_p (gimple stmt, bool *strict_overflow_p)
1014 {
1015 enum tree_code code = gimple_assign_rhs_code (stmt);
1016 switch (get_gimple_rhs_class (code))
1017 {
1018 case GIMPLE_UNARY_RHS:
1019 return tree_unary_nonzero_warnv_p (gimple_assign_rhs_code (stmt),
1020 gimple_expr_type (stmt),
1021 gimple_assign_rhs1 (stmt),
1022 strict_overflow_p);
1023 case GIMPLE_BINARY_RHS:
1024 return tree_binary_nonzero_warnv_p (gimple_assign_rhs_code (stmt),
1025 gimple_expr_type (stmt),
1026 gimple_assign_rhs1 (stmt),
1027 gimple_assign_rhs2 (stmt),
1028 strict_overflow_p);
1029 case GIMPLE_TERNARY_RHS:
1030 return false;
1031 case GIMPLE_SINGLE_RHS:
1032 return tree_single_nonzero_warnv_p (gimple_assign_rhs1 (stmt),
1033 strict_overflow_p);
1034 case GIMPLE_INVALID_RHS:
1035 gcc_unreachable ();
1036 default:
1037 gcc_unreachable ();
1038 }
1039 }
1040
1041 /* Return true if STMT is know to to compute a non-zero value.
1042 If the return value is based on the assumption that signed overflow is
1043 undefined, set *STRICT_OVERFLOW_P to true; otherwise, don't change
1044 *STRICT_OVERFLOW_P.*/
1045
1046 static bool
1047 gimple_stmt_nonzero_warnv_p (gimple stmt, bool *strict_overflow_p)
1048 {
1049 switch (gimple_code (stmt))
1050 {
1051 case GIMPLE_ASSIGN:
1052 return gimple_assign_nonzero_warnv_p (stmt, strict_overflow_p);
1053 case GIMPLE_CALL:
1054 return gimple_alloca_call_p (stmt);
1055 default:
1056 gcc_unreachable ();
1057 }
1058 }
1059
1060 /* Like tree_expr_nonzero_warnv_p, but this function uses value ranges
1061 obtained so far. */
1062
1063 static bool
1064 vrp_stmt_computes_nonzero (gimple stmt, bool *strict_overflow_p)
1065 {
1066 if (gimple_stmt_nonzero_warnv_p (stmt, strict_overflow_p))
1067 return true;
1068
1069 /* If we have an expression of the form &X->a, then the expression
1070 is nonnull if X is nonnull. */
1071 if (is_gimple_assign (stmt)
1072 && gimple_assign_rhs_code (stmt) == ADDR_EXPR)
1073 {
1074 tree expr = gimple_assign_rhs1 (stmt);
1075 tree base = get_base_address (TREE_OPERAND (expr, 0));
1076
1077 if (base != NULL_TREE
1078 && TREE_CODE (base) == MEM_REF
1079 && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME)
1080 {
1081 value_range_t *vr = get_value_range (TREE_OPERAND (base, 0));
1082 if (range_is_nonnull (vr))
1083 return true;
1084 }
1085 }
1086
1087 return false;
1088 }
1089
1090 /* Returns true if EXPR is a valid value (as expected by compare_values) --
1091 a gimple invariant, or SSA_NAME +- CST. */
1092
1093 static bool
1094 valid_value_p (tree expr)
1095 {
1096 if (TREE_CODE (expr) == SSA_NAME)
1097 return true;
1098
1099 if (TREE_CODE (expr) == PLUS_EXPR
1100 || TREE_CODE (expr) == MINUS_EXPR)
1101 return (TREE_CODE (TREE_OPERAND (expr, 0)) == SSA_NAME
1102 && TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST);
1103
1104 return is_gimple_min_invariant (expr);
1105 }
1106
1107 /* Return
1108 1 if VAL < VAL2
1109 0 if !(VAL < VAL2)
1110 -2 if those are incomparable. */
1111 static inline int
1112 operand_less_p (tree val, tree val2)
1113 {
1114 /* LT is folded faster than GE and others. Inline the common case. */
1115 if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
1116 {
1117 if (TYPE_UNSIGNED (TREE_TYPE (val)))
1118 return INT_CST_LT_UNSIGNED (val, val2);
1119 else
1120 {
1121 if (INT_CST_LT (val, val2))
1122 return 1;
1123 }
1124 }
1125 else
1126 {
1127 tree tcmp;
1128
1129 fold_defer_overflow_warnings ();
1130
1131 tcmp = fold_binary_to_constant (LT_EXPR, boolean_type_node, val, val2);
1132
1133 fold_undefer_and_ignore_overflow_warnings ();
1134
1135 if (!tcmp
1136 || TREE_CODE (tcmp) != INTEGER_CST)
1137 return -2;
1138
1139 if (!integer_zerop (tcmp))
1140 return 1;
1141 }
1142
1143 /* val >= val2, not considering overflow infinity. */
1144 if (is_negative_overflow_infinity (val))
1145 return is_negative_overflow_infinity (val2) ? 0 : 1;
1146 else if (is_positive_overflow_infinity (val2))
1147 return is_positive_overflow_infinity (val) ? 0 : 1;
1148
1149 return 0;
1150 }
1151
1152 /* Compare two values VAL1 and VAL2. Return
1153
1154 -2 if VAL1 and VAL2 cannot be compared at compile-time,
1155 -1 if VAL1 < VAL2,
1156 0 if VAL1 == VAL2,
1157 +1 if VAL1 > VAL2, and
1158 +2 if VAL1 != VAL2
1159
1160 This is similar to tree_int_cst_compare but supports pointer values
1161 and values that cannot be compared at compile time.
1162
1163 If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to
1164 true if the return value is only valid if we assume that signed
1165 overflow is undefined. */
1166
1167 static int
1168 compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p)
1169 {
1170 if (val1 == val2)
1171 return 0;
1172
1173 /* Below we rely on the fact that VAL1 and VAL2 are both pointers or
1174 both integers. */
1175 gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1))
1176 == POINTER_TYPE_P (TREE_TYPE (val2)));
1177 /* Convert the two values into the same type. This is needed because
1178 sizetype causes sign extension even for unsigned types. */
1179 val2 = fold_convert (TREE_TYPE (val1), val2);
1180 STRIP_USELESS_TYPE_CONVERSION (val2);
1181
1182 if ((TREE_CODE (val1) == SSA_NAME
1183 || TREE_CODE (val1) == PLUS_EXPR
1184 || TREE_CODE (val1) == MINUS_EXPR)
1185 && (TREE_CODE (val2) == SSA_NAME
1186 || TREE_CODE (val2) == PLUS_EXPR
1187 || TREE_CODE (val2) == MINUS_EXPR))
1188 {
1189 tree n1, c1, n2, c2;
1190 enum tree_code code1, code2;
1191
1192 /* If VAL1 and VAL2 are of the form 'NAME [+-] CST' or 'NAME',
1193 return -1 or +1 accordingly. If VAL1 and VAL2 don't use the
1194 same name, return -2. */
1195 if (TREE_CODE (val1) == SSA_NAME)
1196 {
1197 code1 = SSA_NAME;
1198 n1 = val1;
1199 c1 = NULL_TREE;
1200 }
1201 else
1202 {
1203 code1 = TREE_CODE (val1);
1204 n1 = TREE_OPERAND (val1, 0);
1205 c1 = TREE_OPERAND (val1, 1);
1206 if (tree_int_cst_sgn (c1) == -1)
1207 {
1208 if (is_negative_overflow_infinity (c1))
1209 return -2;
1210 c1 = fold_unary_to_constant (NEGATE_EXPR, TREE_TYPE (c1), c1);
1211 if (!c1)
1212 return -2;
1213 code1 = code1 == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR;
1214 }
1215 }
1216
1217 if (TREE_CODE (val2) == SSA_NAME)
1218 {
1219 code2 = SSA_NAME;
1220 n2 = val2;
1221 c2 = NULL_TREE;
1222 }
1223 else
1224 {
1225 code2 = TREE_CODE (val2);
1226 n2 = TREE_OPERAND (val2, 0);
1227 c2 = TREE_OPERAND (val2, 1);
1228 if (tree_int_cst_sgn (c2) == -1)
1229 {
1230 if (is_negative_overflow_infinity (c2))
1231 return -2;
1232 c2 = fold_unary_to_constant (NEGATE_EXPR, TREE_TYPE (c2), c2);
1233 if (!c2)
1234 return -2;
1235 code2 = code2 == MINUS_EXPR ? PLUS_EXPR : MINUS_EXPR;
1236 }
1237 }
1238
1239 /* Both values must use the same name. */
1240 if (n1 != n2)
1241 return -2;
1242
1243 if (code1 == SSA_NAME
1244 && code2 == SSA_NAME)
1245 /* NAME == NAME */
1246 return 0;
1247
1248 /* If overflow is defined we cannot simplify more. */
1249 if (!TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1)))
1250 return -2;
1251
1252 if (strict_overflow_p != NULL
1253 && (code1 == SSA_NAME || !TREE_NO_WARNING (val1))
1254 && (code2 == SSA_NAME || !TREE_NO_WARNING (val2)))
1255 *strict_overflow_p = true;
1256
1257 if (code1 == SSA_NAME)
1258 {
1259 if (code2 == PLUS_EXPR)
1260 /* NAME < NAME + CST */
1261 return -1;
1262 else if (code2 == MINUS_EXPR)
1263 /* NAME > NAME - CST */
1264 return 1;
1265 }
1266 else if (code1 == PLUS_EXPR)
1267 {
1268 if (code2 == SSA_NAME)
1269 /* NAME + CST > NAME */
1270 return 1;
1271 else if (code2 == PLUS_EXPR)
1272 /* NAME + CST1 > NAME + CST2, if CST1 > CST2 */
1273 return compare_values_warnv (c1, c2, strict_overflow_p);
1274 else if (code2 == MINUS_EXPR)
1275 /* NAME + CST1 > NAME - CST2 */
1276 return 1;
1277 }
1278 else if (code1 == MINUS_EXPR)
1279 {
1280 if (code2 == SSA_NAME)
1281 /* NAME - CST < NAME */
1282 return -1;
1283 else if (code2 == PLUS_EXPR)
1284 /* NAME - CST1 < NAME + CST2 */
1285 return -1;
1286 else if (code2 == MINUS_EXPR)
1287 /* NAME - CST1 > NAME - CST2, if CST1 < CST2. Notice that
1288 C1 and C2 are swapped in the call to compare_values. */
1289 return compare_values_warnv (c2, c1, strict_overflow_p);
1290 }
1291
1292 gcc_unreachable ();
1293 }
1294
1295 /* We cannot compare non-constants. */
1296 if (!is_gimple_min_invariant (val1) || !is_gimple_min_invariant (val2))
1297 return -2;
1298
1299 if (!POINTER_TYPE_P (TREE_TYPE (val1)))
1300 {
1301 /* We cannot compare overflowed values, except for overflow
1302 infinities. */
1303 if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2))
1304 {
1305 if (strict_overflow_p != NULL)
1306 *strict_overflow_p = true;
1307 if (is_negative_overflow_infinity (val1))
1308 return is_negative_overflow_infinity (val2) ? 0 : -1;
1309 else if (is_negative_overflow_infinity (val2))
1310 return 1;
1311 else if (is_positive_overflow_infinity (val1))
1312 return is_positive_overflow_infinity (val2) ? 0 : 1;
1313 else if (is_positive_overflow_infinity (val2))
1314 return -1;
1315 return -2;
1316 }
1317
1318 return tree_int_cst_compare (val1, val2);
1319 }
1320 else
1321 {
1322 tree t;
1323
1324 /* First see if VAL1 and VAL2 are not the same. */
1325 if (val1 == val2 || operand_equal_p (val1, val2, 0))
1326 return 0;
1327
1328 /* If VAL1 is a lower address than VAL2, return -1. */
1329 if (operand_less_p (val1, val2) == 1)
1330 return -1;
1331
1332 /* If VAL1 is a higher address than VAL2, return +1. */
1333 if (operand_less_p (val2, val1) == 1)
1334 return 1;
1335
1336 /* If VAL1 is different than VAL2, return +2.
1337 For integer constants we either have already returned -1 or 1
1338 or they are equivalent. We still might succeed in proving
1339 something about non-trivial operands. */
1340 if (TREE_CODE (val1) != INTEGER_CST
1341 || TREE_CODE (val2) != INTEGER_CST)
1342 {
1343 t = fold_binary_to_constant (NE_EXPR, boolean_type_node, val1, val2);
1344 if (t && integer_onep (t))
1345 return 2;
1346 }
1347
1348 return -2;
1349 }
1350 }
1351
1352 /* Compare values like compare_values_warnv, but treat comparisons of
1353 nonconstants which rely on undefined overflow as incomparable. */
1354
1355 static int
1356 compare_values (tree val1, tree val2)
1357 {
1358 bool sop;
1359 int ret;
1360
1361 sop = false;
1362 ret = compare_values_warnv (val1, val2, &sop);
1363 if (sop
1364 && (!is_gimple_min_invariant (val1) || !is_gimple_min_invariant (val2)))
1365 ret = -2;
1366 return ret;
1367 }
1368
1369
1370 /* Return 1 if VAL is inside value range MIN <= VAL <= MAX,
1371 0 if VAL is not inside [MIN, MAX],
1372 -2 if we cannot tell either way.
1373
1374 Benchmark compile/20001226-1.c compilation time after changing this
1375 function. */
1376
1377 static inline int
1378 value_inside_range (tree val, tree min, tree max)
1379 {
1380 int cmp1, cmp2;
1381
1382 cmp1 = operand_less_p (val, min);
1383 if (cmp1 == -2)
1384 return -2;
1385 if (cmp1 == 1)
1386 return 0;
1387
1388 cmp2 = operand_less_p (max, val);
1389 if (cmp2 == -2)
1390 return -2;
1391
1392 return !cmp2;
1393 }
1394
1395
1396 /* Return true if value ranges VR0 and VR1 have a non-empty
1397 intersection.
1398
1399 Benchmark compile/20001226-1.c compilation time after changing this
1400 function.
1401 */
1402
1403 static inline bool
1404 value_ranges_intersect_p (value_range_t *vr0, value_range_t *vr1)
1405 {
1406 /* The value ranges do not intersect if the maximum of the first range is
1407 less than the minimum of the second range or vice versa.
1408 When those relations are unknown, we can't do any better. */
1409 if (operand_less_p (vr0->max, vr1->min) != 0)
1410 return false;
1411 if (operand_less_p (vr1->max, vr0->min) != 0)
1412 return false;
1413 return true;
1414 }
1415
1416
1417 /* Return 1 if [MIN, MAX] includes the value zero, 0 if it does not
1418 include the value zero, -2 if we cannot tell. */
1419
1420 static inline int
1421 range_includes_zero_p (tree min, tree max)
1422 {
1423 tree zero = build_int_cst (TREE_TYPE (min), 0);
1424 return value_inside_range (zero, min, max);
1425 }
1426
1427 /* Return true if *VR is know to only contain nonnegative values. */
1428
1429 static inline bool
1430 value_range_nonnegative_p (value_range_t *vr)
1431 {
1432 /* Testing for VR_ANTI_RANGE is not useful here as any anti-range
1433 which would return a useful value should be encoded as a
1434 VR_RANGE. */
1435 if (vr->type == VR_RANGE)
1436 {
1437 int result = compare_values (vr->min, integer_zero_node);
1438 return (result == 0 || result == 1);
1439 }
1440
1441 return false;
1442 }
1443
1444 /* Return true if T, an SSA_NAME, is known to be nonnegative. Return
1445 false otherwise or if no value range information is available. */
1446
1447 bool
1448 ssa_name_nonnegative_p (const_tree t)
1449 {
1450 value_range_t *vr = get_value_range (t);
1451
1452 if (INTEGRAL_TYPE_P (t)
1453 && TYPE_UNSIGNED (t))
1454 return true;
1455
1456 if (!vr)
1457 return false;
1458
1459 return value_range_nonnegative_p (vr);
1460 }
1461
1462 /* If *VR has a value rante that is a single constant value return that,
1463 otherwise return NULL_TREE. */
1464
1465 static tree
1466 value_range_constant_singleton (value_range_t *vr)
1467 {
1468 if (vr->type == VR_RANGE
1469 && operand_equal_p (vr->min, vr->max, 0)
1470 && is_gimple_min_invariant (vr->min))
1471 return vr->min;
1472
1473 return NULL_TREE;
1474 }
1475
1476 /* If OP has a value range with a single constant value return that,
1477 otherwise return NULL_TREE. This returns OP itself if OP is a
1478 constant. */
1479
1480 static tree
1481 op_with_constant_singleton_value_range (tree op)
1482 {
1483 if (is_gimple_min_invariant (op))
1484 return op;
1485
1486 if (TREE_CODE (op) != SSA_NAME)
1487 return NULL_TREE;
1488
1489 return value_range_constant_singleton (get_value_range (op));
1490 }
1491
1492 /* Return true if op is in a boolean [0, 1] value-range. */
1493
1494 static bool
1495 op_with_boolean_value_range_p (tree op)
1496 {
1497 value_range_t *vr;
1498
1499 if (TYPE_PRECISION (TREE_TYPE (op)) == 1)
1500 return true;
1501
1502 if (integer_zerop (op)
1503 || integer_onep (op))
1504 return true;
1505
1506 if (TREE_CODE (op) != SSA_NAME)
1507 return false;
1508
1509 vr = get_value_range (op);
1510 return (vr->type == VR_RANGE
1511 && integer_zerop (vr->min)
1512 && integer_onep (vr->max));
1513 }
1514
1515 /* Extract value range information from an ASSERT_EXPR EXPR and store
1516 it in *VR_P. */
1517
1518 static void
1519 extract_range_from_assert (value_range_t *vr_p, tree expr)
1520 {
1521 tree var, cond, limit, min, max, type;
1522 value_range_t *limit_vr;
1523 enum tree_code cond_code;
1524
1525 var = ASSERT_EXPR_VAR (expr);
1526 cond = ASSERT_EXPR_COND (expr);
1527
1528 gcc_assert (COMPARISON_CLASS_P (cond));
1529
1530 /* Find VAR in the ASSERT_EXPR conditional. */
1531 if (var == TREE_OPERAND (cond, 0)
1532 || TREE_CODE (TREE_OPERAND (cond, 0)) == PLUS_EXPR
1533 || TREE_CODE (TREE_OPERAND (cond, 0)) == NOP_EXPR)
1534 {
1535 /* If the predicate is of the form VAR COMP LIMIT, then we just
1536 take LIMIT from the RHS and use the same comparison code. */
1537 cond_code = TREE_CODE (cond);
1538 limit = TREE_OPERAND (cond, 1);
1539 cond = TREE_OPERAND (cond, 0);
1540 }
1541 else
1542 {
1543 /* If the predicate is of the form LIMIT COMP VAR, then we need
1544 to flip around the comparison code to create the proper range
1545 for VAR. */
1546 cond_code = swap_tree_comparison (TREE_CODE (cond));
1547 limit = TREE_OPERAND (cond, 0);
1548 cond = TREE_OPERAND (cond, 1);
1549 }
1550
1551 limit = avoid_overflow_infinity (limit);
1552
1553 type = TREE_TYPE (var);
1554 gcc_assert (limit != var);
1555
1556 /* For pointer arithmetic, we only keep track of pointer equality
1557 and inequality. */
1558 if (POINTER_TYPE_P (type) && cond_code != NE_EXPR && cond_code != EQ_EXPR)
1559 {
1560 set_value_range_to_varying (vr_p);
1561 return;
1562 }
1563
1564 /* If LIMIT is another SSA name and LIMIT has a range of its own,
1565 try to use LIMIT's range to avoid creating symbolic ranges
1566 unnecessarily. */
1567 limit_vr = (TREE_CODE (limit) == SSA_NAME) ? get_value_range (limit) : NULL;
1568
1569 /* LIMIT's range is only interesting if it has any useful information. */
1570 if (limit_vr
1571 && (limit_vr->type == VR_UNDEFINED
1572 || limit_vr->type == VR_VARYING
1573 || symbolic_range_p (limit_vr)))
1574 limit_vr = NULL;
1575
1576 /* Initially, the new range has the same set of equivalences of
1577 VAR's range. This will be revised before returning the final
1578 value. Since assertions may be chained via mutually exclusive
1579 predicates, we will need to trim the set of equivalences before
1580 we are done. */
1581 gcc_assert (vr_p->equiv == NULL);
1582 add_equivalence (&vr_p->equiv, var);
1583
1584 /* Extract a new range based on the asserted comparison for VAR and
1585 LIMIT's value range. Notice that if LIMIT has an anti-range, we
1586 will only use it for equality comparisons (EQ_EXPR). For any
1587 other kind of assertion, we cannot derive a range from LIMIT's
1588 anti-range that can be used to describe the new range. For
1589 instance, ASSERT_EXPR <x_2, x_2 <= b_4>. If b_4 is ~[2, 10],
1590 then b_4 takes on the ranges [-INF, 1] and [11, +INF]. There is
1591 no single range for x_2 that could describe LE_EXPR, so we might
1592 as well build the range [b_4, +INF] for it.
1593 One special case we handle is extracting a range from a
1594 range test encoded as (unsigned)var + CST <= limit. */
1595 if (TREE_CODE (cond) == NOP_EXPR
1596 || TREE_CODE (cond) == PLUS_EXPR)
1597 {
1598 if (TREE_CODE (cond) == PLUS_EXPR)
1599 {
1600 min = fold_build1 (NEGATE_EXPR, TREE_TYPE (TREE_OPERAND (cond, 1)),
1601 TREE_OPERAND (cond, 1));
1602 max = int_const_binop (PLUS_EXPR, limit, min);
1603 cond = TREE_OPERAND (cond, 0);
1604 }
1605 else
1606 {
1607 min = build_int_cst (TREE_TYPE (var), 0);
1608 max = limit;
1609 }
1610
1611 /* Make sure to not set TREE_OVERFLOW on the final type
1612 conversion. We are willingly interpreting large positive
1613 unsigned values as negative singed values here. */
1614 min = force_fit_type_double (TREE_TYPE (var), tree_to_double_int (min),
1615 0, false);
1616 max = force_fit_type_double (TREE_TYPE (var), tree_to_double_int (max),
1617 0, false);
1618
1619 /* We can transform a max, min range to an anti-range or
1620 vice-versa. Use set_and_canonicalize_value_range which does
1621 this for us. */
1622 if (cond_code == LE_EXPR)
1623 set_and_canonicalize_value_range (vr_p, VR_RANGE,
1624 min, max, vr_p->equiv);
1625 else if (cond_code == GT_EXPR)
1626 set_and_canonicalize_value_range (vr_p, VR_ANTI_RANGE,
1627 min, max, vr_p->equiv);
1628 else
1629 gcc_unreachable ();
1630 }
1631 else if (cond_code == EQ_EXPR)
1632 {
1633 enum value_range_type range_type;
1634
1635 if (limit_vr)
1636 {
1637 range_type = limit_vr->type;
1638 min = limit_vr->min;
1639 max = limit_vr->max;
1640 }
1641 else
1642 {
1643 range_type = VR_RANGE;
1644 min = limit;
1645 max = limit;
1646 }
1647
1648 set_value_range (vr_p, range_type, min, max, vr_p->equiv);
1649
1650 /* When asserting the equality VAR == LIMIT and LIMIT is another
1651 SSA name, the new range will also inherit the equivalence set
1652 from LIMIT. */
1653 if (TREE_CODE (limit) == SSA_NAME)
1654 add_equivalence (&vr_p->equiv, limit);
1655 }
1656 else if (cond_code == NE_EXPR)
1657 {
1658 /* As described above, when LIMIT's range is an anti-range and
1659 this assertion is an inequality (NE_EXPR), then we cannot
1660 derive anything from the anti-range. For instance, if
1661 LIMIT's range was ~[0, 0], the assertion 'VAR != LIMIT' does
1662 not imply that VAR's range is [0, 0]. So, in the case of
1663 anti-ranges, we just assert the inequality using LIMIT and
1664 not its anti-range.
1665
1666 If LIMIT_VR is a range, we can only use it to build a new
1667 anti-range if LIMIT_VR is a single-valued range. For
1668 instance, if LIMIT_VR is [0, 1], the predicate
1669 VAR != [0, 1] does not mean that VAR's range is ~[0, 1].
1670 Rather, it means that for value 0 VAR should be ~[0, 0]
1671 and for value 1, VAR should be ~[1, 1]. We cannot
1672 represent these ranges.
1673
1674 The only situation in which we can build a valid
1675 anti-range is when LIMIT_VR is a single-valued range
1676 (i.e., LIMIT_VR->MIN == LIMIT_VR->MAX). In that case,
1677 build the anti-range ~[LIMIT_VR->MIN, LIMIT_VR->MAX]. */
1678 if (limit_vr
1679 && limit_vr->type == VR_RANGE
1680 && compare_values (limit_vr->min, limit_vr->max) == 0)
1681 {
1682 min = limit_vr->min;
1683 max = limit_vr->max;
1684 }
1685 else
1686 {
1687 /* In any other case, we cannot use LIMIT's range to build a
1688 valid anti-range. */
1689 min = max = limit;
1690 }
1691
1692 /* If MIN and MAX cover the whole range for their type, then
1693 just use the original LIMIT. */
1694 if (INTEGRAL_TYPE_P (type)
1695 && vrp_val_is_min (min)
1696 && vrp_val_is_max (max))
1697 min = max = limit;
1698
1699 set_and_canonicalize_value_range (vr_p, VR_ANTI_RANGE,
1700 min, max, vr_p->equiv);
1701 }
1702 else if (cond_code == LE_EXPR || cond_code == LT_EXPR)
1703 {
1704 min = TYPE_MIN_VALUE (type);
1705
1706 if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE)
1707 max = limit;
1708 else
1709 {
1710 /* If LIMIT_VR is of the form [N1, N2], we need to build the
1711 range [MIN, N2] for LE_EXPR and [MIN, N2 - 1] for
1712 LT_EXPR. */
1713 max = limit_vr->max;
1714 }
1715
1716 /* If the maximum value forces us to be out of bounds, simply punt.
1717 It would be pointless to try and do anything more since this
1718 all should be optimized away above us. */
1719 if ((cond_code == LT_EXPR
1720 && compare_values (max, min) == 0)
1721 || (CONSTANT_CLASS_P (max) && TREE_OVERFLOW (max)))
1722 set_value_range_to_varying (vr_p);
1723 else
1724 {
1725 /* For LT_EXPR, we create the range [MIN, MAX - 1]. */
1726 if (cond_code == LT_EXPR)
1727 {
1728 if (TYPE_PRECISION (TREE_TYPE (max)) == 1
1729 && !TYPE_UNSIGNED (TREE_TYPE (max)))
1730 max = fold_build2 (PLUS_EXPR, TREE_TYPE (max), max,
1731 build_int_cst (TREE_TYPE (max), -1));
1732 else
1733 max = fold_build2 (MINUS_EXPR, TREE_TYPE (max), max,
1734 build_int_cst (TREE_TYPE (max), 1));
1735 if (EXPR_P (max))
1736 TREE_NO_WARNING (max) = 1;
1737 }
1738
1739 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1740 }
1741 }
1742 else if (cond_code == GE_EXPR || cond_code == GT_EXPR)
1743 {
1744 max = TYPE_MAX_VALUE (type);
1745
1746 if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE)
1747 min = limit;
1748 else
1749 {
1750 /* If LIMIT_VR is of the form [N1, N2], we need to build the
1751 range [N1, MAX] for GE_EXPR and [N1 + 1, MAX] for
1752 GT_EXPR. */
1753 min = limit_vr->min;
1754 }
1755
1756 /* If the minimum value forces us to be out of bounds, simply punt.
1757 It would be pointless to try and do anything more since this
1758 all should be optimized away above us. */
1759 if ((cond_code == GT_EXPR
1760 && compare_values (min, max) == 0)
1761 || (CONSTANT_CLASS_P (min) && TREE_OVERFLOW (min)))
1762 set_value_range_to_varying (vr_p);
1763 else
1764 {
1765 /* For GT_EXPR, we create the range [MIN + 1, MAX]. */
1766 if (cond_code == GT_EXPR)
1767 {
1768 if (TYPE_PRECISION (TREE_TYPE (min)) == 1
1769 && !TYPE_UNSIGNED (TREE_TYPE (min)))
1770 min = fold_build2 (MINUS_EXPR, TREE_TYPE (min), min,
1771 build_int_cst (TREE_TYPE (min), -1));
1772 else
1773 min = fold_build2 (PLUS_EXPR, TREE_TYPE (min), min,
1774 build_int_cst (TREE_TYPE (min), 1));
1775 if (EXPR_P (min))
1776 TREE_NO_WARNING (min) = 1;
1777 }
1778
1779 set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
1780 }
1781 }
1782 else
1783 gcc_unreachable ();
1784
1785 /* Finally intersect the new range with what we already know about var. */
1786 vrp_intersect_ranges (vr_p, get_value_range (var));
1787 }
1788
1789
1790 /* Extract range information from SSA name VAR and store it in VR. If
1791 VAR has an interesting range, use it. Otherwise, create the
1792 range [VAR, VAR] and return it. This is useful in situations where
1793 we may have conditionals testing values of VARYING names. For
1794 instance,
1795
1796 x_3 = y_5;
1797 if (x_3 > y_5)
1798 ...
1799
1800 Even if y_5 is deemed VARYING, we can determine that x_3 > y_5 is
1801 always false. */
1802
1803 static void
1804 extract_range_from_ssa_name (value_range_t *vr, tree var)
1805 {
1806 value_range_t *var_vr = get_value_range (var);
1807
1808 if (var_vr->type != VR_UNDEFINED && var_vr->type != VR_VARYING)
1809 copy_value_range (vr, var_vr);
1810 else
1811 set_value_range (vr, VR_RANGE, var, var, NULL);
1812
1813 add_equivalence (&vr->equiv, var);
1814 }
1815
1816
1817 /* Wrapper around int_const_binop. If the operation overflows and we
1818 are not using wrapping arithmetic, then adjust the result to be
1819 -INF or +INF depending on CODE, VAL1 and VAL2. This can return
1820 NULL_TREE if we need to use an overflow infinity representation but
1821 the type does not support it. */
1822
1823 static tree
1824 vrp_int_const_binop (enum tree_code code, tree val1, tree val2)
1825 {
1826 tree res;
1827
1828 res = int_const_binop (code, val1, val2);
1829
1830 /* If we are using unsigned arithmetic, operate symbolically
1831 on -INF and +INF as int_const_binop only handles signed overflow. */
1832 if (TYPE_UNSIGNED (TREE_TYPE (val1)))
1833 {
1834 int checkz = compare_values (res, val1);
1835 bool overflow = false;
1836
1837 /* Ensure that res = val1 [+*] val2 >= val1
1838 or that res = val1 - val2 <= val1. */
1839 if ((code == PLUS_EXPR
1840 && !(checkz == 1 || checkz == 0))
1841 || (code == MINUS_EXPR
1842 && !(checkz == 0 || checkz == -1)))
1843 {
1844 overflow = true;
1845 }
1846 /* Checking for multiplication overflow is done by dividing the
1847 output of the multiplication by the first input of the
1848 multiplication. If the result of that division operation is
1849 not equal to the second input of the multiplication, then the
1850 multiplication overflowed. */
1851 else if (code == MULT_EXPR && !integer_zerop (val1))
1852 {
1853 tree tmp = int_const_binop (TRUNC_DIV_EXPR,
1854 res,
1855 val1);
1856 int check = compare_values (tmp, val2);
1857
1858 if (check != 0)
1859 overflow = true;
1860 }
1861
1862 if (overflow)
1863 {
1864 res = copy_node (res);
1865 TREE_OVERFLOW (res) = 1;
1866 }
1867
1868 }
1869 else if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (val1)))
1870 /* If the singed operation wraps then int_const_binop has done
1871 everything we want. */
1872 ;
1873 else if ((TREE_OVERFLOW (res)
1874 && !TREE_OVERFLOW (val1)
1875 && !TREE_OVERFLOW (val2))
1876 || is_overflow_infinity (val1)
1877 || is_overflow_infinity (val2))
1878 {
1879 /* If the operation overflowed but neither VAL1 nor VAL2 are
1880 overflown, return -INF or +INF depending on the operation
1881 and the combination of signs of the operands. */
1882 int sgn1 = tree_int_cst_sgn (val1);
1883 int sgn2 = tree_int_cst_sgn (val2);
1884
1885 if (needs_overflow_infinity (TREE_TYPE (res))
1886 && !supports_overflow_infinity (TREE_TYPE (res)))
1887 return NULL_TREE;
1888
1889 /* We have to punt on adding infinities of different signs,
1890 since we can't tell what the sign of the result should be.
1891 Likewise for subtracting infinities of the same sign. */
1892 if (((code == PLUS_EXPR && sgn1 != sgn2)
1893 || (code == MINUS_EXPR && sgn1 == sgn2))
1894 && is_overflow_infinity (val1)
1895 && is_overflow_infinity (val2))
1896 return NULL_TREE;
1897
1898 /* Don't try to handle division or shifting of infinities. */
1899 if ((code == TRUNC_DIV_EXPR
1900 || code == FLOOR_DIV_EXPR
1901 || code == CEIL_DIV_EXPR
1902 || code == EXACT_DIV_EXPR
1903 || code == ROUND_DIV_EXPR
1904 || code == RSHIFT_EXPR)
1905 && (is_overflow_infinity (val1)
1906 || is_overflow_infinity (val2)))
1907 return NULL_TREE;
1908
1909 /* Notice that we only need to handle the restricted set of
1910 operations handled by extract_range_from_binary_expr.
1911 Among them, only multiplication, addition and subtraction
1912 can yield overflow without overflown operands because we
1913 are working with integral types only... except in the
1914 case VAL1 = -INF and VAL2 = -1 which overflows to +INF
1915 for division too. */
1916
1917 /* For multiplication, the sign of the overflow is given
1918 by the comparison of the signs of the operands. */
1919 if ((code == MULT_EXPR && sgn1 == sgn2)
1920 /* For addition, the operands must be of the same sign
1921 to yield an overflow. Its sign is therefore that
1922 of one of the operands, for example the first. For
1923 infinite operands X + -INF is negative, not positive. */
1924 || (code == PLUS_EXPR
1925 && (sgn1 >= 0
1926 ? !is_negative_overflow_infinity (val2)
1927 : is_positive_overflow_infinity (val2)))
1928 /* For subtraction, non-infinite operands must be of
1929 different signs to yield an overflow. Its sign is
1930 therefore that of the first operand or the opposite of
1931 that of the second operand. A first operand of 0 counts
1932 as positive here, for the corner case 0 - (-INF), which
1933 overflows, but must yield +INF. For infinite operands 0
1934 - INF is negative, not positive. */
1935 || (code == MINUS_EXPR
1936 && (sgn1 >= 0
1937 ? !is_positive_overflow_infinity (val2)
1938 : is_negative_overflow_infinity (val2)))
1939 /* We only get in here with positive shift count, so the
1940 overflow direction is the same as the sign of val1.
1941 Actually rshift does not overflow at all, but we only
1942 handle the case of shifting overflowed -INF and +INF. */
1943 || (code == RSHIFT_EXPR
1944 && sgn1 >= 0)
1945 /* For division, the only case is -INF / -1 = +INF. */
1946 || code == TRUNC_DIV_EXPR
1947 || code == FLOOR_DIV_EXPR
1948 || code == CEIL_DIV_EXPR
1949 || code == EXACT_DIV_EXPR
1950 || code == ROUND_DIV_EXPR)
1951 return (needs_overflow_infinity (TREE_TYPE (res))
1952 ? positive_overflow_infinity (TREE_TYPE (res))
1953 : TYPE_MAX_VALUE (TREE_TYPE (res)));
1954 else
1955 return (needs_overflow_infinity (TREE_TYPE (res))
1956 ? negative_overflow_infinity (TREE_TYPE (res))
1957 : TYPE_MIN_VALUE (TREE_TYPE (res)));
1958 }
1959
1960 return res;
1961 }
1962
1963
1964 /* For range VR compute two double_int bitmasks. In *MAY_BE_NONZERO
1965 bitmask if some bit is unset, it means for all numbers in the range
1966 the bit is 0, otherwise it might be 0 or 1. In *MUST_BE_NONZERO
1967 bitmask if some bit is set, it means for all numbers in the range
1968 the bit is 1, otherwise it might be 0 or 1. */
1969
1970 static bool
1971 zero_nonzero_bits_from_vr (value_range_t *vr,
1972 double_int *may_be_nonzero,
1973 double_int *must_be_nonzero)
1974 {
1975 *may_be_nonzero = double_int_minus_one;
1976 *must_be_nonzero = double_int_zero;
1977 if (!range_int_cst_p (vr)
1978 || TREE_OVERFLOW (vr->min)
1979 || TREE_OVERFLOW (vr->max))
1980 return false;
1981
1982 if (range_int_cst_singleton_p (vr))
1983 {
1984 *may_be_nonzero = tree_to_double_int (vr->min);
1985 *must_be_nonzero = *may_be_nonzero;
1986 }
1987 else if (tree_int_cst_sgn (vr->min) >= 0
1988 || tree_int_cst_sgn (vr->max) < 0)
1989 {
1990 double_int dmin = tree_to_double_int (vr->min);
1991 double_int dmax = tree_to_double_int (vr->max);
1992 double_int xor_mask = dmin ^ dmax;
1993 *may_be_nonzero = dmin | dmax;
1994 *must_be_nonzero = dmin & dmax;
1995 if (xor_mask.high != 0)
1996 {
1997 unsigned HOST_WIDE_INT mask
1998 = ((unsigned HOST_WIDE_INT) 1
1999 << floor_log2 (xor_mask.high)) - 1;
2000 may_be_nonzero->low = ALL_ONES;
2001 may_be_nonzero->high |= mask;
2002 must_be_nonzero->low = 0;
2003 must_be_nonzero->high &= ~mask;
2004 }
2005 else if (xor_mask.low != 0)
2006 {
2007 unsigned HOST_WIDE_INT mask
2008 = ((unsigned HOST_WIDE_INT) 1
2009 << floor_log2 (xor_mask.low)) - 1;
2010 may_be_nonzero->low |= mask;
2011 must_be_nonzero->low &= ~mask;
2012 }
2013 }
2014
2015 return true;
2016 }
2017
2018 /* Create two value-ranges in *VR0 and *VR1 from the anti-range *AR
2019 so that *VR0 U *VR1 == *AR. Returns true if that is possible,
2020 false otherwise. If *AR can be represented with a single range
2021 *VR1 will be VR_UNDEFINED. */
2022
2023 static bool
2024 ranges_from_anti_range (value_range_t *ar,
2025 value_range_t *vr0, value_range_t *vr1)
2026 {
2027 tree type = TREE_TYPE (ar->min);
2028
2029 vr0->type = VR_UNDEFINED;
2030 vr1->type = VR_UNDEFINED;
2031
2032 if (ar->type != VR_ANTI_RANGE
2033 || TREE_CODE (ar->min) != INTEGER_CST
2034 || TREE_CODE (ar->max) != INTEGER_CST
2035 || !vrp_val_min (type)
2036 || !vrp_val_max (type))
2037 return false;
2038
2039 if (!vrp_val_is_min (ar->min))
2040 {
2041 vr0->type = VR_RANGE;
2042 vr0->min = vrp_val_min (type);
2043 vr0->max
2044 = double_int_to_tree (type,
2045 tree_to_double_int (ar->min) - double_int_one);
2046 }
2047 if (!vrp_val_is_max (ar->max))
2048 {
2049 vr1->type = VR_RANGE;
2050 vr1->min
2051 = double_int_to_tree (type,
2052 tree_to_double_int (ar->max) + double_int_one);
2053 vr1->max = vrp_val_max (type);
2054 }
2055 if (vr0->type == VR_UNDEFINED)
2056 {
2057 *vr0 = *vr1;
2058 vr1->type = VR_UNDEFINED;
2059 }
2060
2061 return vr0->type != VR_UNDEFINED;
2062 }
2063
2064 /* Helper to extract a value-range *VR for a multiplicative operation
2065 *VR0 CODE *VR1. */
2066
2067 static void
2068 extract_range_from_multiplicative_op_1 (value_range_t *vr,
2069 enum tree_code code,
2070 value_range_t *vr0, value_range_t *vr1)
2071 {
2072 enum value_range_type type;
2073 tree val[4];
2074 size_t i;
2075 tree min, max;
2076 bool sop;
2077 int cmp;
2078
2079 /* Multiplications, divisions and shifts are a bit tricky to handle,
2080 depending on the mix of signs we have in the two ranges, we
2081 need to operate on different values to get the minimum and
2082 maximum values for the new range. One approach is to figure
2083 out all the variations of range combinations and do the
2084 operations.
2085
2086 However, this involves several calls to compare_values and it
2087 is pretty convoluted. It's simpler to do the 4 operations
2088 (MIN0 OP MIN1, MIN0 OP MAX1, MAX0 OP MIN1 and MAX0 OP MAX0 OP
2089 MAX1) and then figure the smallest and largest values to form
2090 the new range. */
2091 gcc_assert (code == MULT_EXPR
2092 || code == TRUNC_DIV_EXPR
2093 || code == FLOOR_DIV_EXPR
2094 || code == CEIL_DIV_EXPR
2095 || code == EXACT_DIV_EXPR
2096 || code == ROUND_DIV_EXPR
2097 || code == RSHIFT_EXPR
2098 || code == LSHIFT_EXPR);
2099 gcc_assert ((vr0->type == VR_RANGE
2100 || (code == MULT_EXPR && vr0->type == VR_ANTI_RANGE))
2101 && vr0->type == vr1->type);
2102
2103 type = vr0->type;
2104
2105 /* Compute the 4 cross operations. */
2106 sop = false;
2107 val[0] = vrp_int_const_binop (code, vr0->min, vr1->min);
2108 if (val[0] == NULL_TREE)
2109 sop = true;
2110
2111 if (vr1->max == vr1->min)
2112 val[1] = NULL_TREE;
2113 else
2114 {
2115 val[1] = vrp_int_const_binop (code, vr0->min, vr1->max);
2116 if (val[1] == NULL_TREE)
2117 sop = true;
2118 }
2119
2120 if (vr0->max == vr0->min)
2121 val[2] = NULL_TREE;
2122 else
2123 {
2124 val[2] = vrp_int_const_binop (code, vr0->max, vr1->min);
2125 if (val[2] == NULL_TREE)
2126 sop = true;
2127 }
2128
2129 if (vr0->min == vr0->max || vr1->min == vr1->max)
2130 val[3] = NULL_TREE;
2131 else
2132 {
2133 val[3] = vrp_int_const_binop (code, vr0->max, vr1->max);
2134 if (val[3] == NULL_TREE)
2135 sop = true;
2136 }
2137
2138 if (sop)
2139 {
2140 set_value_range_to_varying (vr);
2141 return;
2142 }
2143
2144 /* Set MIN to the minimum of VAL[i] and MAX to the maximum
2145 of VAL[i]. */
2146 min = val[0];
2147 max = val[0];
2148 for (i = 1; i < 4; i++)
2149 {
2150 if (!is_gimple_min_invariant (min)
2151 || (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
2152 || !is_gimple_min_invariant (max)
2153 || (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
2154 break;
2155
2156 if (val[i])
2157 {
2158 if (!is_gimple_min_invariant (val[i])
2159 || (TREE_OVERFLOW (val[i])
2160 && !is_overflow_infinity (val[i])))
2161 {
2162 /* If we found an overflowed value, set MIN and MAX
2163 to it so that we set the resulting range to
2164 VARYING. */
2165 min = max = val[i];
2166 break;
2167 }
2168
2169 if (compare_values (val[i], min) == -1)
2170 min = val[i];
2171
2172 if (compare_values (val[i], max) == 1)
2173 max = val[i];
2174 }
2175 }
2176
2177 /* If either MIN or MAX overflowed, then set the resulting range to
2178 VARYING. But we do accept an overflow infinity
2179 representation. */
2180 if (min == NULL_TREE
2181 || !is_gimple_min_invariant (min)
2182 || (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
2183 || max == NULL_TREE
2184 || !is_gimple_min_invariant (max)
2185 || (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
2186 {
2187 set_value_range_to_varying (vr);
2188 return;
2189 }
2190
2191 /* We punt if:
2192 1) [-INF, +INF]
2193 2) [-INF, +-INF(OVF)]
2194 3) [+-INF(OVF), +INF]
2195 4) [+-INF(OVF), +-INF(OVF)]
2196 We learn nothing when we have INF and INF(OVF) on both sides.
2197 Note that we do accept [-INF, -INF] and [+INF, +INF] without
2198 overflow. */
2199 if ((vrp_val_is_min (min) || is_overflow_infinity (min))
2200 && (vrp_val_is_max (max) || is_overflow_infinity (max)))
2201 {
2202 set_value_range_to_varying (vr);
2203 return;
2204 }
2205
2206 cmp = compare_values (min, max);
2207 if (cmp == -2 || cmp == 1)
2208 {
2209 /* If the new range has its limits swapped around (MIN > MAX),
2210 then the operation caused one of them to wrap around, mark
2211 the new range VARYING. */
2212 set_value_range_to_varying (vr);
2213 }
2214 else
2215 set_value_range (vr, type, min, max, NULL);
2216 }
2217
2218 /* Some quadruple precision helpers. */
2219 static int
2220 quad_int_cmp (double_int l0, double_int h0,
2221 double_int l1, double_int h1, bool uns)
2222 {
2223 int c = h0.cmp (h1, uns);
2224 if (c != 0) return c;
2225 return l0.ucmp (l1);
2226 }
2227
2228 static void
2229 quad_int_pair_sort (double_int *l0, double_int *h0,
2230 double_int *l1, double_int *h1, bool uns)
2231 {
2232 if (quad_int_cmp (*l0, *h0, *l1, *h1, uns) > 0)
2233 {
2234 double_int tmp;
2235 tmp = *l0; *l0 = *l1; *l1 = tmp;
2236 tmp = *h0; *h0 = *h1; *h1 = tmp;
2237 }
2238 }
2239
2240 /* Extract range information from a binary operation CODE based on
2241 the ranges of each of its operands, *VR0 and *VR1 with resulting
2242 type EXPR_TYPE. The resulting range is stored in *VR. */
2243
2244 static void
2245 extract_range_from_binary_expr_1 (value_range_t *vr,
2246 enum tree_code code, tree expr_type,
2247 value_range_t *vr0_, value_range_t *vr1_)
2248 {
2249 value_range_t vr0 = *vr0_, vr1 = *vr1_;
2250 value_range_t vrtem0 = VR_INITIALIZER, vrtem1 = VR_INITIALIZER;
2251 enum value_range_type type;
2252 tree min = NULL_TREE, max = NULL_TREE;
2253 int cmp;
2254
2255 if (!INTEGRAL_TYPE_P (expr_type)
2256 && !POINTER_TYPE_P (expr_type))
2257 {
2258 set_value_range_to_varying (vr);
2259 return;
2260 }
2261
2262 /* Not all binary expressions can be applied to ranges in a
2263 meaningful way. Handle only arithmetic operations. */
2264 if (code != PLUS_EXPR
2265 && code != MINUS_EXPR
2266 && code != POINTER_PLUS_EXPR
2267 && code != MULT_EXPR
2268 && code != TRUNC_DIV_EXPR
2269 && code != FLOOR_DIV_EXPR
2270 && code != CEIL_DIV_EXPR
2271 && code != EXACT_DIV_EXPR
2272 && code != ROUND_DIV_EXPR
2273 && code != TRUNC_MOD_EXPR
2274 && code != RSHIFT_EXPR
2275 && code != LSHIFT_EXPR
2276 && code != MIN_EXPR
2277 && code != MAX_EXPR
2278 && code != BIT_AND_EXPR
2279 && code != BIT_IOR_EXPR
2280 && code != BIT_XOR_EXPR)
2281 {
2282 set_value_range_to_varying (vr);
2283 return;
2284 }
2285
2286 /* If both ranges are UNDEFINED, so is the result. */
2287 if (vr0.type == VR_UNDEFINED && vr1.type == VR_UNDEFINED)
2288 {
2289 set_value_range_to_undefined (vr);
2290 return;
2291 }
2292 /* If one of the ranges is UNDEFINED drop it to VARYING for the following
2293 code. At some point we may want to special-case operations that
2294 have UNDEFINED result for all or some value-ranges of the not UNDEFINED
2295 operand. */
2296 else if (vr0.type == VR_UNDEFINED)
2297 set_value_range_to_varying (&vr0);
2298 else if (vr1.type == VR_UNDEFINED)
2299 set_value_range_to_varying (&vr1);
2300
2301 /* Now canonicalize anti-ranges to ranges when they are not symbolic
2302 and express ~[] op X as ([]' op X) U ([]'' op X). */
2303 if (vr0.type == VR_ANTI_RANGE
2304 && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1))
2305 {
2306 extract_range_from_binary_expr_1 (vr, code, expr_type, &vrtem0, vr1_);
2307 if (vrtem1.type != VR_UNDEFINED)
2308 {
2309 value_range_t vrres = VR_INITIALIZER;
2310 extract_range_from_binary_expr_1 (&vrres, code, expr_type,
2311 &vrtem1, vr1_);
2312 vrp_meet (vr, &vrres);
2313 }
2314 return;
2315 }
2316 /* Likewise for X op ~[]. */
2317 if (vr1.type == VR_ANTI_RANGE
2318 && ranges_from_anti_range (&vr1, &vrtem0, &vrtem1))
2319 {
2320 extract_range_from_binary_expr_1 (vr, code, expr_type, vr0_, &vrtem0);
2321 if (vrtem1.type != VR_UNDEFINED)
2322 {
2323 value_range_t vrres = VR_INITIALIZER;
2324 extract_range_from_binary_expr_1 (&vrres, code, expr_type,
2325 vr0_, &vrtem1);
2326 vrp_meet (vr, &vrres);
2327 }
2328 return;
2329 }
2330
2331 /* The type of the resulting value range defaults to VR0.TYPE. */
2332 type = vr0.type;
2333
2334 /* Refuse to operate on VARYING ranges, ranges of different kinds
2335 and symbolic ranges. As an exception, we allow BIT_AND_EXPR
2336 because we may be able to derive a useful range even if one of
2337 the operands is VR_VARYING or symbolic range. Similarly for
2338 divisions. TODO, we may be able to derive anti-ranges in
2339 some cases. */
2340 if (code != BIT_AND_EXPR
2341 && code != BIT_IOR_EXPR
2342 && code != TRUNC_DIV_EXPR
2343 && code != FLOOR_DIV_EXPR
2344 && code != CEIL_DIV_EXPR
2345 && code != EXACT_DIV_EXPR
2346 && code != ROUND_DIV_EXPR
2347 && code != TRUNC_MOD_EXPR
2348 && code != MIN_EXPR
2349 && code != MAX_EXPR
2350 && (vr0.type == VR_VARYING
2351 || vr1.type == VR_VARYING
2352 || vr0.type != vr1.type
2353 || symbolic_range_p (&vr0)
2354 || symbolic_range_p (&vr1)))
2355 {
2356 set_value_range_to_varying (vr);
2357 return;
2358 }
2359
2360 /* Now evaluate the expression to determine the new range. */
2361 if (POINTER_TYPE_P (expr_type))
2362 {
2363 if (code == MIN_EXPR || code == MAX_EXPR)
2364 {
2365 /* For MIN/MAX expressions with pointers, we only care about
2366 nullness, if both are non null, then the result is nonnull.
2367 If both are null, then the result is null. Otherwise they
2368 are varying. */
2369 if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1))
2370 set_value_range_to_nonnull (vr, expr_type);
2371 else if (range_is_null (&vr0) && range_is_null (&vr1))
2372 set_value_range_to_null (vr, expr_type);
2373 else
2374 set_value_range_to_varying (vr);
2375 }
2376 else if (code == POINTER_PLUS_EXPR)
2377 {
2378 /* For pointer types, we are really only interested in asserting
2379 whether the expression evaluates to non-NULL. */
2380 if (range_is_nonnull (&vr0) || range_is_nonnull (&vr1))
2381 set_value_range_to_nonnull (vr, expr_type);
2382 else if (range_is_null (&vr0) && range_is_null (&vr1))
2383 set_value_range_to_null (vr, expr_type);
2384 else
2385 set_value_range_to_varying (vr);
2386 }
2387 else if (code == BIT_AND_EXPR)
2388 {
2389 /* For pointer types, we are really only interested in asserting
2390 whether the expression evaluates to non-NULL. */
2391 if (range_is_nonnull (&vr0) && range_is_nonnull (&vr1))
2392 set_value_range_to_nonnull (vr, expr_type);
2393 else if (range_is_null (&vr0) || range_is_null (&vr1))
2394 set_value_range_to_null (vr, expr_type);
2395 else
2396 set_value_range_to_varying (vr);
2397 }
2398 else
2399 set_value_range_to_varying (vr);
2400
2401 return;
2402 }
2403
2404 /* For integer ranges, apply the operation to each end of the
2405 range and see what we end up with. */
2406 if (code == PLUS_EXPR || code == MINUS_EXPR)
2407 {
2408 /* If we have a PLUS_EXPR with two VR_RANGE integer constant
2409 ranges compute the precise range for such case if possible. */
2410 if (range_int_cst_p (&vr0)
2411 && range_int_cst_p (&vr1)
2412 /* We need as many bits as the possibly unsigned inputs. */
2413 && TYPE_PRECISION (expr_type) <= HOST_BITS_PER_DOUBLE_INT)
2414 {
2415 double_int min0 = tree_to_double_int (vr0.min);
2416 double_int max0 = tree_to_double_int (vr0.max);
2417 double_int min1 = tree_to_double_int (vr1.min);
2418 double_int max1 = tree_to_double_int (vr1.max);
2419 bool uns = TYPE_UNSIGNED (expr_type);
2420 double_int type_min
2421 = double_int::min_value (TYPE_PRECISION (expr_type), uns);
2422 double_int type_max
2423 = double_int::max_value (TYPE_PRECISION (expr_type), uns);
2424 double_int dmin, dmax;
2425 int min_ovf = 0;
2426 int max_ovf = 0;
2427
2428 if (code == PLUS_EXPR)
2429 {
2430 dmin = min0 + min1;
2431 dmax = max0 + max1;
2432
2433 /* Check for overflow in double_int. */
2434 if (min1.cmp (double_int_zero, uns) != dmin.cmp (min0, uns))
2435 min_ovf = min0.cmp (dmin, uns);
2436 if (max1.cmp (double_int_zero, uns) != dmax.cmp (max0, uns))
2437 max_ovf = max0.cmp (dmax, uns);
2438 }
2439 else /* if (code == MINUS_EXPR) */
2440 {
2441 dmin = min0 - max1;
2442 dmax = max0 - min1;
2443
2444 if (double_int_zero.cmp (max1, uns) != dmin.cmp (min0, uns))
2445 min_ovf = min0.cmp (max1, uns);
2446 if (double_int_zero.cmp (min1, uns) != dmax.cmp (max0, uns))
2447 max_ovf = max0.cmp (min1, uns);
2448 }
2449
2450 /* For non-wrapping arithmetic look at possibly smaller
2451 value-ranges of the type. */
2452 if (!TYPE_OVERFLOW_WRAPS (expr_type))
2453 {
2454 if (vrp_val_min (expr_type))
2455 type_min = tree_to_double_int (vrp_val_min (expr_type));
2456 if (vrp_val_max (expr_type))
2457 type_max = tree_to_double_int (vrp_val_max (expr_type));
2458 }
2459
2460 /* Check for type overflow. */
2461 if (min_ovf == 0)
2462 {
2463 if (dmin.cmp (type_min, uns) == -1)
2464 min_ovf = -1;
2465 else if (dmin.cmp (type_max, uns) == 1)
2466 min_ovf = 1;
2467 }
2468 if (max_ovf == 0)
2469 {
2470 if (dmax.cmp (type_min, uns) == -1)
2471 max_ovf = -1;
2472 else if (dmax.cmp (type_max, uns) == 1)
2473 max_ovf = 1;
2474 }
2475
2476 if (TYPE_OVERFLOW_WRAPS (expr_type))
2477 {
2478 /* If overflow wraps, truncate the values and adjust the
2479 range kind and bounds appropriately. */
2480 double_int tmin
2481 = dmin.ext (TYPE_PRECISION (expr_type), uns);
2482 double_int tmax
2483 = dmax.ext (TYPE_PRECISION (expr_type), uns);
2484 if (min_ovf == max_ovf)
2485 {
2486 /* No overflow or both overflow or underflow. The
2487 range kind stays VR_RANGE. */
2488 min = double_int_to_tree (expr_type, tmin);
2489 max = double_int_to_tree (expr_type, tmax);
2490 }
2491 else if (min_ovf == -1
2492 && max_ovf == 1)
2493 {
2494 /* Underflow and overflow, drop to VR_VARYING. */
2495 set_value_range_to_varying (vr);
2496 return;
2497 }
2498 else
2499 {
2500 /* Min underflow or max overflow. The range kind
2501 changes to VR_ANTI_RANGE. */
2502 bool covers = false;
2503 double_int tem = tmin;
2504 gcc_assert ((min_ovf == -1 && max_ovf == 0)
2505 || (max_ovf == 1 && min_ovf == 0));
2506 type = VR_ANTI_RANGE;
2507 tmin = tmax + double_int_one;
2508 if (tmin.cmp (tmax, uns) < 0)
2509 covers = true;
2510 tmax = tem + double_int_minus_one;
2511 if (tmax.cmp (tem, uns) > 0)
2512 covers = true;
2513 /* If the anti-range would cover nothing, drop to varying.
2514 Likewise if the anti-range bounds are outside of the
2515 types values. */
2516 if (covers || tmin.cmp (tmax, uns) > 0)
2517 {
2518 set_value_range_to_varying (vr);
2519 return;
2520 }
2521 min = double_int_to_tree (expr_type, tmin);
2522 max = double_int_to_tree (expr_type, tmax);
2523 }
2524 }
2525 else
2526 {
2527 /* If overflow does not wrap, saturate to the types min/max
2528 value. */
2529 if (min_ovf == -1)
2530 {
2531 if (needs_overflow_infinity (expr_type)
2532 && supports_overflow_infinity (expr_type))
2533 min = negative_overflow_infinity (expr_type);
2534 else
2535 min = double_int_to_tree (expr_type, type_min);
2536 }
2537 else if (min_ovf == 1)
2538 {
2539 if (needs_overflow_infinity (expr_type)
2540 && supports_overflow_infinity (expr_type))
2541 min = positive_overflow_infinity (expr_type);
2542 else
2543 min = double_int_to_tree (expr_type, type_max);
2544 }
2545 else
2546 min = double_int_to_tree (expr_type, dmin);
2547
2548 if (max_ovf == -1)
2549 {
2550 if (needs_overflow_infinity (expr_type)
2551 && supports_overflow_infinity (expr_type))
2552 max = negative_overflow_infinity (expr_type);
2553 else
2554 max = double_int_to_tree (expr_type, type_min);
2555 }
2556 else if (max_ovf == 1)
2557 {
2558 if (needs_overflow_infinity (expr_type)
2559 && supports_overflow_infinity (expr_type))
2560 max = positive_overflow_infinity (expr_type);
2561 else
2562 max = double_int_to_tree (expr_type, type_max);
2563 }
2564 else
2565 max = double_int_to_tree (expr_type, dmax);
2566 }
2567 if (needs_overflow_infinity (expr_type)
2568 && supports_overflow_infinity (expr_type))
2569 {
2570 if (is_negative_overflow_infinity (vr0.min)
2571 || (code == PLUS_EXPR
2572 ? is_negative_overflow_infinity (vr1.min)
2573 : is_positive_overflow_infinity (vr1.max)))
2574 min = negative_overflow_infinity (expr_type);
2575 if (is_positive_overflow_infinity (vr0.max)
2576 || (code == PLUS_EXPR
2577 ? is_positive_overflow_infinity (vr1.max)
2578 : is_negative_overflow_infinity (vr1.min)))
2579 max = positive_overflow_infinity (expr_type);
2580 }
2581 }
2582 else
2583 {
2584 /* For other cases, for example if we have a PLUS_EXPR with two
2585 VR_ANTI_RANGEs, drop to VR_VARYING. It would take more effort
2586 to compute a precise range for such a case.
2587 ??? General even mixed range kind operations can be expressed
2588 by for example transforming ~[3, 5] + [1, 2] to range-only
2589 operations and a union primitive:
2590 [-INF, 2] + [1, 2] U [5, +INF] + [1, 2]
2591 [-INF+1, 4] U [6, +INF(OVF)]
2592 though usually the union is not exactly representable with
2593 a single range or anti-range as the above is
2594 [-INF+1, +INF(OVF)] intersected with ~[5, 5]
2595 but one could use a scheme similar to equivalences for this. */
2596 set_value_range_to_varying (vr);
2597 return;
2598 }
2599 }
2600 else if (code == MIN_EXPR
2601 || code == MAX_EXPR)
2602 {
2603 if (vr0.type == VR_RANGE
2604 && !symbolic_range_p (&vr0))
2605 {
2606 type = VR_RANGE;
2607 if (vr1.type == VR_RANGE
2608 && !symbolic_range_p (&vr1))
2609 {
2610 /* For operations that make the resulting range directly
2611 proportional to the original ranges, apply the operation to
2612 the same end of each range. */
2613 min = vrp_int_const_binop (code, vr0.min, vr1.min);
2614 max = vrp_int_const_binop (code, vr0.max, vr1.max);
2615 }
2616 else if (code == MIN_EXPR)
2617 {
2618 min = vrp_val_min (expr_type);
2619 max = vr0.max;
2620 }
2621 else if (code == MAX_EXPR)
2622 {
2623 min = vr0.min;
2624 max = vrp_val_max (expr_type);
2625 }
2626 }
2627 else if (vr1.type == VR_RANGE
2628 && !symbolic_range_p (&vr1))
2629 {
2630 type = VR_RANGE;
2631 if (code == MIN_EXPR)
2632 {
2633 min = vrp_val_min (expr_type);
2634 max = vr1.max;
2635 }
2636 else if (code == MAX_EXPR)
2637 {
2638 min = vr1.min;
2639 max = vrp_val_max (expr_type);
2640 }
2641 }
2642 else
2643 {
2644 set_value_range_to_varying (vr);
2645 return;
2646 }
2647 }
2648 else if (code == MULT_EXPR)
2649 {
2650 /* Fancy code so that with unsigned, [-3,-1]*[-3,-1] does not
2651 drop to varying. */
2652 if (range_int_cst_p (&vr0)
2653 && range_int_cst_p (&vr1)
2654 && TYPE_OVERFLOW_WRAPS (expr_type))
2655 {
2656 double_int min0, max0, min1, max1, sizem1, size;
2657 double_int prod0l, prod0h, prod1l, prod1h,
2658 prod2l, prod2h, prod3l, prod3h;
2659 bool uns0, uns1, uns;
2660
2661 sizem1 = double_int::max_value (TYPE_PRECISION (expr_type), true);
2662 size = sizem1 + double_int_one;
2663
2664 min0 = tree_to_double_int (vr0.min);
2665 max0 = tree_to_double_int (vr0.max);
2666 min1 = tree_to_double_int (vr1.min);
2667 max1 = tree_to_double_int (vr1.max);
2668
2669 uns0 = TYPE_UNSIGNED (expr_type);
2670 uns1 = uns0;
2671
2672 /* Canonicalize the intervals. */
2673 if (TYPE_UNSIGNED (expr_type))
2674 {
2675 double_int min2 = size - min0;
2676 if (!min2.is_zero () && min2.cmp (max0, true) < 0)
2677 {
2678 min0 = -min2;
2679 max0 -= size;
2680 uns0 = false;
2681 }
2682
2683 min2 = size - min1;
2684 if (!min2.is_zero () && min2.cmp (max1, true) < 0)
2685 {
2686 min1 = -min2;
2687 max1 -= size;
2688 uns1 = false;
2689 }
2690 }
2691 uns = uns0 & uns1;
2692
2693 bool overflow;
2694 prod0l = min0.wide_mul_with_sign (min1, true, &prod0h, &overflow);
2695 if (!uns0 && min0.is_negative ())
2696 prod0h -= min1;
2697 if (!uns1 && min1.is_negative ())
2698 prod0h -= min0;
2699
2700 prod1l = min0.wide_mul_with_sign (max1, true, &prod1h, &overflow);
2701 if (!uns0 && min0.is_negative ())
2702 prod1h -= max1;
2703 if (!uns1 && max1.is_negative ())
2704 prod1h -= min0;
2705
2706 prod2l = max0.wide_mul_with_sign (min1, true, &prod2h, &overflow);
2707 if (!uns0 && max0.is_negative ())
2708 prod2h -= min1;
2709 if (!uns1 && min1.is_negative ())
2710 prod2h -= max0;
2711
2712 prod3l = max0.wide_mul_with_sign (max1, true, &prod3h, &overflow);
2713 if (!uns0 && max0.is_negative ())
2714 prod3h -= max1;
2715 if (!uns1 && max1.is_negative ())
2716 prod3h -= max0;
2717
2718 /* Sort the 4 products. */
2719 quad_int_pair_sort (&prod0l, &prod0h, &prod3l, &prod3h, uns);
2720 quad_int_pair_sort (&prod1l, &prod1h, &prod2l, &prod2h, uns);
2721 quad_int_pair_sort (&prod0l, &prod0h, &prod1l, &prod1h, uns);
2722 quad_int_pair_sort (&prod2l, &prod2h, &prod3l, &prod3h, uns);
2723
2724 /* Max - min. */
2725 if (prod0l.is_zero ())
2726 {
2727 prod1l = double_int_zero;
2728 prod1h = -prod0h;
2729 }
2730 else
2731 {
2732 prod1l = -prod0l;
2733 prod1h = ~prod0h;
2734 }
2735 prod2l = prod3l + prod1l;
2736 prod2h = prod3h + prod1h;
2737 if (prod2l.ult (prod3l))
2738 prod2h += double_int_one; /* carry */
2739
2740 if (!prod2h.is_zero ()
2741 || prod2l.cmp (sizem1, true) >= 0)
2742 {
2743 /* the range covers all values. */
2744 set_value_range_to_varying (vr);
2745 return;
2746 }
2747
2748 /* The following should handle the wrapping and selecting
2749 VR_ANTI_RANGE for us. */
2750 min = double_int_to_tree (expr_type, prod0l);
2751 max = double_int_to_tree (expr_type, prod3l);
2752 set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL);
2753 return;
2754 }
2755
2756 /* If we have an unsigned MULT_EXPR with two VR_ANTI_RANGEs,
2757 drop to VR_VARYING. It would take more effort to compute a
2758 precise range for such a case. For example, if we have
2759 op0 == 65536 and op1 == 65536 with their ranges both being
2760 ~[0,0] on a 32-bit machine, we would have op0 * op1 == 0, so
2761 we cannot claim that the product is in ~[0,0]. Note that we
2762 are guaranteed to have vr0.type == vr1.type at this
2763 point. */
2764 if (vr0.type == VR_ANTI_RANGE
2765 && !TYPE_OVERFLOW_UNDEFINED (expr_type))
2766 {
2767 set_value_range_to_varying (vr);
2768 return;
2769 }
2770
2771 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2772 return;
2773 }
2774 else if (code == RSHIFT_EXPR
2775 || code == LSHIFT_EXPR)
2776 {
2777 /* If we have a RSHIFT_EXPR with any shift values outside [0..prec-1],
2778 then drop to VR_VARYING. Outside of this range we get undefined
2779 behavior from the shift operation. We cannot even trust
2780 SHIFT_COUNT_TRUNCATED at this stage, because that applies to rtl
2781 shifts, and the operation at the tree level may be widened. */
2782 if (range_int_cst_p (&vr1)
2783 && compare_tree_int (vr1.min, 0) >= 0
2784 && compare_tree_int (vr1.max, TYPE_PRECISION (expr_type)) == -1)
2785 {
2786 if (code == RSHIFT_EXPR)
2787 {
2788 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2789 return;
2790 }
2791 /* We can map lshifts by constants to MULT_EXPR handling. */
2792 else if (code == LSHIFT_EXPR
2793 && range_int_cst_singleton_p (&vr1))
2794 {
2795 bool saved_flag_wrapv;
2796 value_range_t vr1p = VR_INITIALIZER;
2797 vr1p.type = VR_RANGE;
2798 vr1p.min
2799 = double_int_to_tree (expr_type,
2800 double_int_one
2801 .llshift (TREE_INT_CST_LOW (vr1.min),
2802 TYPE_PRECISION (expr_type)));
2803 vr1p.max = vr1p.min;
2804 /* We have to use a wrapping multiply though as signed overflow
2805 on lshifts is implementation defined in C89. */
2806 saved_flag_wrapv = flag_wrapv;
2807 flag_wrapv = 1;
2808 extract_range_from_binary_expr_1 (vr, MULT_EXPR, expr_type,
2809 &vr0, &vr1p);
2810 flag_wrapv = saved_flag_wrapv;
2811 return;
2812 }
2813 else if (code == LSHIFT_EXPR
2814 && range_int_cst_p (&vr0))
2815 {
2816 int prec = TYPE_PRECISION (expr_type);
2817 int overflow_pos = prec;
2818 int bound_shift;
2819 double_int bound, complement, low_bound, high_bound;
2820 bool uns = TYPE_UNSIGNED (expr_type);
2821 bool in_bounds = false;
2822
2823 if (!uns)
2824 overflow_pos -= 1;
2825
2826 bound_shift = overflow_pos - TREE_INT_CST_LOW (vr1.max);
2827 /* If bound_shift == HOST_BITS_PER_DOUBLE_INT, the llshift can
2828 overflow. However, for that to happen, vr1.max needs to be
2829 zero, which means vr1 is a singleton range of zero, which
2830 means it should be handled by the previous LSHIFT_EXPR
2831 if-clause. */
2832 bound = double_int_one.llshift (bound_shift, prec);
2833 complement = ~(bound - double_int_one);
2834
2835 if (uns)
2836 {
2837 low_bound = bound.zext (prec);
2838 high_bound = complement.zext (prec);
2839 if (tree_to_double_int (vr0.max).ult (low_bound))
2840 {
2841 /* [5, 6] << [1, 2] == [10, 24]. */
2842 /* We're shifting out only zeroes, the value increases
2843 monotonically. */
2844 in_bounds = true;
2845 }
2846 else if (high_bound.ult (tree_to_double_int (vr0.min)))
2847 {
2848 /* [0xffffff00, 0xffffffff] << [1, 2]
2849 == [0xfffffc00, 0xfffffffe]. */
2850 /* We're shifting out only ones, the value decreases
2851 monotonically. */
2852 in_bounds = true;
2853 }
2854 }
2855 else
2856 {
2857 /* [-1, 1] << [1, 2] == [-4, 4]. */
2858 low_bound = complement.sext (prec);
2859 high_bound = bound;
2860 if (tree_to_double_int (vr0.max).slt (high_bound)
2861 && low_bound.slt (tree_to_double_int (vr0.min)))
2862 {
2863 /* For non-negative numbers, we're shifting out only
2864 zeroes, the value increases monotonically.
2865 For negative numbers, we're shifting out only ones, the
2866 value decreases monotomically. */
2867 in_bounds = true;
2868 }
2869 }
2870
2871 if (in_bounds)
2872 {
2873 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2874 return;
2875 }
2876 }
2877 }
2878 set_value_range_to_varying (vr);
2879 return;
2880 }
2881 else if (code == TRUNC_DIV_EXPR
2882 || code == FLOOR_DIV_EXPR
2883 || code == CEIL_DIV_EXPR
2884 || code == EXACT_DIV_EXPR
2885 || code == ROUND_DIV_EXPR)
2886 {
2887 if (vr0.type != VR_RANGE || symbolic_range_p (&vr0))
2888 {
2889 /* For division, if op1 has VR_RANGE but op0 does not, something
2890 can be deduced just from that range. Say [min, max] / [4, max]
2891 gives [min / 4, max / 4] range. */
2892 if (vr1.type == VR_RANGE
2893 && !symbolic_range_p (&vr1)
2894 && range_includes_zero_p (vr1.min, vr1.max) == 0)
2895 {
2896 vr0.type = type = VR_RANGE;
2897 vr0.min = vrp_val_min (expr_type);
2898 vr0.max = vrp_val_max (expr_type);
2899 }
2900 else
2901 {
2902 set_value_range_to_varying (vr);
2903 return;
2904 }
2905 }
2906
2907 /* For divisions, if flag_non_call_exceptions is true, we must
2908 not eliminate a division by zero. */
2909 if (cfun->can_throw_non_call_exceptions
2910 && (vr1.type != VR_RANGE
2911 || range_includes_zero_p (vr1.min, vr1.max) != 0))
2912 {
2913 set_value_range_to_varying (vr);
2914 return;
2915 }
2916
2917 /* For divisions, if op0 is VR_RANGE, we can deduce a range
2918 even if op1 is VR_VARYING, VR_ANTI_RANGE, symbolic or can
2919 include 0. */
2920 if (vr0.type == VR_RANGE
2921 && (vr1.type != VR_RANGE
2922 || range_includes_zero_p (vr1.min, vr1.max) != 0))
2923 {
2924 tree zero = build_int_cst (TREE_TYPE (vr0.min), 0);
2925 int cmp;
2926
2927 min = NULL_TREE;
2928 max = NULL_TREE;
2929 if (TYPE_UNSIGNED (expr_type)
2930 || value_range_nonnegative_p (&vr1))
2931 {
2932 /* For unsigned division or when divisor is known
2933 to be non-negative, the range has to cover
2934 all numbers from 0 to max for positive max
2935 and all numbers from min to 0 for negative min. */
2936 cmp = compare_values (vr0.max, zero);
2937 if (cmp == -1)
2938 max = zero;
2939 else if (cmp == 0 || cmp == 1)
2940 max = vr0.max;
2941 else
2942 type = VR_VARYING;
2943 cmp = compare_values (vr0.min, zero);
2944 if (cmp == 1)
2945 min = zero;
2946 else if (cmp == 0 || cmp == -1)
2947 min = vr0.min;
2948 else
2949 type = VR_VARYING;
2950 }
2951 else
2952 {
2953 /* Otherwise the range is -max .. max or min .. -min
2954 depending on which bound is bigger in absolute value,
2955 as the division can change the sign. */
2956 abs_extent_range (vr, vr0.min, vr0.max);
2957 return;
2958 }
2959 if (type == VR_VARYING)
2960 {
2961 set_value_range_to_varying (vr);
2962 return;
2963 }
2964 }
2965 else
2966 {
2967 extract_range_from_multiplicative_op_1 (vr, code, &vr0, &vr1);
2968 return;
2969 }
2970 }
2971 else if (code == TRUNC_MOD_EXPR)
2972 {
2973 if (vr1.type != VR_RANGE
2974 || range_includes_zero_p (vr1.min, vr1.max) != 0
2975 || vrp_val_is_min (vr1.min))
2976 {
2977 set_value_range_to_varying (vr);
2978 return;
2979 }
2980 type = VR_RANGE;
2981 /* Compute MAX <|vr1.min|, |vr1.max|> - 1. */
2982 max = fold_unary_to_constant (ABS_EXPR, expr_type, vr1.min);
2983 if (tree_int_cst_lt (max, vr1.max))
2984 max = vr1.max;
2985 max = int_const_binop (MINUS_EXPR, max, integer_one_node);
2986 /* If the dividend is non-negative the modulus will be
2987 non-negative as well. */
2988 if (TYPE_UNSIGNED (expr_type)
2989 || value_range_nonnegative_p (&vr0))
2990 min = build_int_cst (TREE_TYPE (max), 0);
2991 else
2992 min = fold_unary_to_constant (NEGATE_EXPR, expr_type, max);
2993 }
2994 else if (code == BIT_AND_EXPR || code == BIT_IOR_EXPR || code == BIT_XOR_EXPR)
2995 {
2996 bool int_cst_range0, int_cst_range1;
2997 double_int may_be_nonzero0, may_be_nonzero1;
2998 double_int must_be_nonzero0, must_be_nonzero1;
2999
3000 int_cst_range0 = zero_nonzero_bits_from_vr (&vr0, &may_be_nonzero0,
3001 &must_be_nonzero0);
3002 int_cst_range1 = zero_nonzero_bits_from_vr (&vr1, &may_be_nonzero1,
3003 &must_be_nonzero1);
3004
3005 type = VR_RANGE;
3006 if (code == BIT_AND_EXPR)
3007 {
3008 double_int dmax;
3009 min = double_int_to_tree (expr_type,
3010 must_be_nonzero0 & must_be_nonzero1);
3011 dmax = may_be_nonzero0 & may_be_nonzero1;
3012 /* If both input ranges contain only negative values we can
3013 truncate the result range maximum to the minimum of the
3014 input range maxima. */
3015 if (int_cst_range0 && int_cst_range1
3016 && tree_int_cst_sgn (vr0.max) < 0
3017 && tree_int_cst_sgn (vr1.max) < 0)
3018 {
3019 dmax = dmax.min (tree_to_double_int (vr0.max),
3020 TYPE_UNSIGNED (expr_type));
3021 dmax = dmax.min (tree_to_double_int (vr1.max),
3022 TYPE_UNSIGNED (expr_type));
3023 }
3024 /* If either input range contains only non-negative values
3025 we can truncate the result range maximum to the respective
3026 maximum of the input range. */
3027 if (int_cst_range0 && tree_int_cst_sgn (vr0.min) >= 0)
3028 dmax = dmax.min (tree_to_double_int (vr0.max),
3029 TYPE_UNSIGNED (expr_type));
3030 if (int_cst_range1 && tree_int_cst_sgn (vr1.min) >= 0)
3031 dmax = dmax.min (tree_to_double_int (vr1.max),
3032 TYPE_UNSIGNED (expr_type));
3033 max = double_int_to_tree (expr_type, dmax);
3034 }
3035 else if (code == BIT_IOR_EXPR)
3036 {
3037 double_int dmin;
3038 max = double_int_to_tree (expr_type,
3039 may_be_nonzero0 | may_be_nonzero1);
3040 dmin = must_be_nonzero0 | must_be_nonzero1;
3041 /* If the input ranges contain only positive values we can
3042 truncate the minimum of the result range to the maximum
3043 of the input range minima. */
3044 if (int_cst_range0 && int_cst_range1
3045 && tree_int_cst_sgn (vr0.min) >= 0
3046 && tree_int_cst_sgn (vr1.min) >= 0)
3047 {
3048 dmin = dmin.max (tree_to_double_int (vr0.min),
3049 TYPE_UNSIGNED (expr_type));
3050 dmin = dmin.max (tree_to_double_int (vr1.min),
3051 TYPE_UNSIGNED (expr_type));
3052 }
3053 /* If either input range contains only negative values
3054 we can truncate the minimum of the result range to the
3055 respective minimum range. */
3056 if (int_cst_range0 && tree_int_cst_sgn (vr0.max) < 0)
3057 dmin = dmin.max (tree_to_double_int (vr0.min),
3058 TYPE_UNSIGNED (expr_type));
3059 if (int_cst_range1 && tree_int_cst_sgn (vr1.max) < 0)
3060 dmin = dmin.max (tree_to_double_int (vr1.min),
3061 TYPE_UNSIGNED (expr_type));
3062 min = double_int_to_tree (expr_type, dmin);
3063 }
3064 else if (code == BIT_XOR_EXPR)
3065 {
3066 double_int result_zero_bits, result_one_bits;
3067 result_zero_bits = (must_be_nonzero0 & must_be_nonzero1)
3068 | ~(may_be_nonzero0 | may_be_nonzero1);
3069 result_one_bits = must_be_nonzero0.and_not (may_be_nonzero1)
3070 | must_be_nonzero1.and_not (may_be_nonzero0);
3071 max = double_int_to_tree (expr_type, ~result_zero_bits);
3072 min = double_int_to_tree (expr_type, result_one_bits);
3073 /* If the range has all positive or all negative values the
3074 result is better than VARYING. */
3075 if (tree_int_cst_sgn (min) < 0
3076 || tree_int_cst_sgn (max) >= 0)
3077 ;
3078 else
3079 max = min = NULL_TREE;
3080 }
3081 }
3082 else
3083 gcc_unreachable ();
3084
3085 /* If either MIN or MAX overflowed, then set the resulting range to
3086 VARYING. But we do accept an overflow infinity
3087 representation. */
3088 if (min == NULL_TREE
3089 || !is_gimple_min_invariant (min)
3090 || (TREE_OVERFLOW (min) && !is_overflow_infinity (min))
3091 || max == NULL_TREE
3092 || !is_gimple_min_invariant (max)
3093 || (TREE_OVERFLOW (max) && !is_overflow_infinity (max)))
3094 {
3095 set_value_range_to_varying (vr);
3096 return;
3097 }
3098
3099 /* We punt if:
3100 1) [-INF, +INF]
3101 2) [-INF, +-INF(OVF)]
3102 3) [+-INF(OVF), +INF]
3103 4) [+-INF(OVF), +-INF(OVF)]
3104 We learn nothing when we have INF and INF(OVF) on both sides.
3105 Note that we do accept [-INF, -INF] and [+INF, +INF] without
3106 overflow. */
3107 if ((vrp_val_is_min (min) || is_overflow_infinity (min))
3108 && (vrp_val_is_max (max) || is_overflow_infinity (max)))
3109 {
3110 set_value_range_to_varying (vr);
3111 return;
3112 }
3113
3114 cmp = compare_values (min, max);
3115 if (cmp == -2 || cmp == 1)
3116 {
3117 /* If the new range has its limits swapped around (MIN > MAX),
3118 then the operation caused one of them to wrap around, mark
3119 the new range VARYING. */
3120 set_value_range_to_varying (vr);
3121 }
3122 else
3123 set_value_range (vr, type, min, max, NULL);
3124 }
3125
3126 /* Extract range information from a binary expression OP0 CODE OP1 based on
3127 the ranges of each of its operands with resulting type EXPR_TYPE.
3128 The resulting range is stored in *VR. */
3129
3130 static void
3131 extract_range_from_binary_expr (value_range_t *vr,
3132 enum tree_code code,
3133 tree expr_type, tree op0, tree op1)
3134 {
3135 value_range_t vr0 = VR_INITIALIZER;
3136 value_range_t vr1 = VR_INITIALIZER;
3137
3138 /* Get value ranges for each operand. For constant operands, create
3139 a new value range with the operand to simplify processing. */
3140 if (TREE_CODE (op0) == SSA_NAME)
3141 vr0 = *(get_value_range (op0));
3142 else if (is_gimple_min_invariant (op0))
3143 set_value_range_to_value (&vr0, op0, NULL);
3144 else
3145 set_value_range_to_varying (&vr0);
3146
3147 if (TREE_CODE (op1) == SSA_NAME)
3148 vr1 = *(get_value_range (op1));
3149 else if (is_gimple_min_invariant (op1))
3150 set_value_range_to_value (&vr1, op1, NULL);
3151 else
3152 set_value_range_to_varying (&vr1);
3153
3154 extract_range_from_binary_expr_1 (vr, code, expr_type, &vr0, &vr1);
3155 }
3156
3157 /* Extract range information from a unary operation CODE based on
3158 the range of its operand *VR0 with type OP0_TYPE with resulting type TYPE.
3159 The The resulting range is stored in *VR. */
3160
3161 static void
3162 extract_range_from_unary_expr_1 (value_range_t *vr,
3163 enum tree_code code, tree type,
3164 value_range_t *vr0_, tree op0_type)
3165 {
3166 value_range_t vr0 = *vr0_, vrtem0 = VR_INITIALIZER, vrtem1 = VR_INITIALIZER;
3167
3168 /* VRP only operates on integral and pointer types. */
3169 if (!(INTEGRAL_TYPE_P (op0_type)
3170 || POINTER_TYPE_P (op0_type))
3171 || !(INTEGRAL_TYPE_P (type)
3172 || POINTER_TYPE_P (type)))
3173 {
3174 set_value_range_to_varying (vr);
3175 return;
3176 }
3177
3178 /* If VR0 is UNDEFINED, so is the result. */
3179 if (vr0.type == VR_UNDEFINED)
3180 {
3181 set_value_range_to_undefined (vr);
3182 return;
3183 }
3184
3185 /* Handle operations that we express in terms of others. */
3186 if (code == PAREN_EXPR)
3187 {
3188 /* PAREN_EXPR is a simple copy. */
3189 copy_value_range (vr, &vr0);
3190 return;
3191 }
3192 else if (code == NEGATE_EXPR)
3193 {
3194 /* -X is simply 0 - X, so re-use existing code that also handles
3195 anti-ranges fine. */
3196 value_range_t zero = VR_INITIALIZER;
3197 set_value_range_to_value (&zero, build_int_cst (type, 0), NULL);
3198 extract_range_from_binary_expr_1 (vr, MINUS_EXPR, type, &zero, &vr0);
3199 return;
3200 }
3201 else if (code == BIT_NOT_EXPR)
3202 {
3203 /* ~X is simply -1 - X, so re-use existing code that also handles
3204 anti-ranges fine. */
3205 value_range_t minusone = VR_INITIALIZER;
3206 set_value_range_to_value (&minusone, build_int_cst (type, -1), NULL);
3207 extract_range_from_binary_expr_1 (vr, MINUS_EXPR,
3208 type, &minusone, &vr0);
3209 return;
3210 }
3211
3212 /* Now canonicalize anti-ranges to ranges when they are not symbolic
3213 and express op ~[] as (op []') U (op []''). */
3214 if (vr0.type == VR_ANTI_RANGE
3215 && ranges_from_anti_range (&vr0, &vrtem0, &vrtem1))
3216 {
3217 extract_range_from_unary_expr_1 (vr, code, type, &vrtem0, op0_type);
3218 if (vrtem1.type != VR_UNDEFINED)
3219 {
3220 value_range_t vrres = VR_INITIALIZER;
3221 extract_range_from_unary_expr_1 (&vrres, code, type,
3222 &vrtem1, op0_type);
3223 vrp_meet (vr, &vrres);
3224 }
3225 return;
3226 }
3227
3228 if (CONVERT_EXPR_CODE_P (code))
3229 {
3230 tree inner_type = op0_type;
3231 tree outer_type = type;
3232
3233 /* If the expression evaluates to a pointer, we are only interested in
3234 determining if it evaluates to NULL [0, 0] or non-NULL (~[0, 0]). */
3235 if (POINTER_TYPE_P (type))
3236 {
3237 if (range_is_nonnull (&vr0))
3238 set_value_range_to_nonnull (vr, type);
3239 else if (range_is_null (&vr0))
3240 set_value_range_to_null (vr, type);
3241 else
3242 set_value_range_to_varying (vr);
3243 return;
3244 }
3245
3246 /* If VR0 is varying and we increase the type precision, assume
3247 a full range for the following transformation. */
3248 if (vr0.type == VR_VARYING
3249 && INTEGRAL_TYPE_P (inner_type)
3250 && TYPE_PRECISION (inner_type) < TYPE_PRECISION (outer_type))
3251 {
3252 vr0.type = VR_RANGE;
3253 vr0.min = TYPE_MIN_VALUE (inner_type);
3254 vr0.max = TYPE_MAX_VALUE (inner_type);
3255 }
3256
3257 /* If VR0 is a constant range or anti-range and the conversion is
3258 not truncating we can convert the min and max values and
3259 canonicalize the resulting range. Otherwise we can do the
3260 conversion if the size of the range is less than what the
3261 precision of the target type can represent and the range is
3262 not an anti-range. */
3263 if ((vr0.type == VR_RANGE
3264 || vr0.type == VR_ANTI_RANGE)
3265 && TREE_CODE (vr0.min) == INTEGER_CST
3266 && TREE_CODE (vr0.max) == INTEGER_CST
3267 && (!is_overflow_infinity (vr0.min)
3268 || (vr0.type == VR_RANGE
3269 && TYPE_PRECISION (outer_type) > TYPE_PRECISION (inner_type)
3270 && needs_overflow_infinity (outer_type)
3271 && supports_overflow_infinity (outer_type)))
3272 && (!is_overflow_infinity (vr0.max)
3273 || (vr0.type == VR_RANGE
3274 && TYPE_PRECISION (outer_type) > TYPE_PRECISION (inner_type)
3275 && needs_overflow_infinity (outer_type)
3276 && supports_overflow_infinity (outer_type)))
3277 && (TYPE_PRECISION (outer_type) >= TYPE_PRECISION (inner_type)
3278 || (vr0.type == VR_RANGE
3279 && integer_zerop (int_const_binop (RSHIFT_EXPR,
3280 int_const_binop (MINUS_EXPR, vr0.max, vr0.min),
3281 size_int (TYPE_PRECISION (outer_type)))))))
3282 {
3283 tree new_min, new_max;
3284 if (is_overflow_infinity (vr0.min))
3285 new_min = negative_overflow_infinity (outer_type);
3286 else
3287 new_min = force_fit_type_double (outer_type,
3288 tree_to_double_int (vr0.min),
3289 0, false);
3290 if (is_overflow_infinity (vr0.max))
3291 new_max = positive_overflow_infinity (outer_type);
3292 else
3293 new_max = force_fit_type_double (outer_type,
3294 tree_to_double_int (vr0.max),
3295 0, false);
3296 set_and_canonicalize_value_range (vr, vr0.type,
3297 new_min, new_max, NULL);
3298 return;
3299 }
3300
3301 set_value_range_to_varying (vr);
3302 return;
3303 }
3304 else if (code == ABS_EXPR)
3305 {
3306 tree min, max;
3307 int cmp;
3308
3309 /* Pass through vr0 in the easy cases. */
3310 if (TYPE_UNSIGNED (type)
3311 || value_range_nonnegative_p (&vr0))
3312 {
3313 copy_value_range (vr, &vr0);
3314 return;
3315 }
3316
3317 /* For the remaining varying or symbolic ranges we can't do anything
3318 useful. */
3319 if (vr0.type == VR_VARYING
3320 || symbolic_range_p (&vr0))
3321 {
3322 set_value_range_to_varying (vr);
3323 return;
3324 }
3325
3326 /* -TYPE_MIN_VALUE = TYPE_MIN_VALUE with flag_wrapv so we can't get a
3327 useful range. */
3328 if (!TYPE_OVERFLOW_UNDEFINED (type)
3329 && ((vr0.type == VR_RANGE
3330 && vrp_val_is_min (vr0.min))
3331 || (vr0.type == VR_ANTI_RANGE
3332 && !vrp_val_is_min (vr0.min))))
3333 {
3334 set_value_range_to_varying (vr);
3335 return;
3336 }
3337
3338 /* ABS_EXPR may flip the range around, if the original range
3339 included negative values. */
3340 if (is_overflow_infinity (vr0.min))
3341 min = positive_overflow_infinity (type);
3342 else if (!vrp_val_is_min (vr0.min))
3343 min = fold_unary_to_constant (code, type, vr0.min);
3344 else if (!needs_overflow_infinity (type))
3345 min = TYPE_MAX_VALUE (type);
3346 else if (supports_overflow_infinity (type))
3347 min = positive_overflow_infinity (type);
3348 else
3349 {
3350 set_value_range_to_varying (vr);
3351 return;
3352 }
3353
3354 if (is_overflow_infinity (vr0.max))
3355 max = positive_overflow_infinity (type);
3356 else if (!vrp_val_is_min (vr0.max))
3357 max = fold_unary_to_constant (code, type, vr0.max);
3358 else if (!needs_overflow_infinity (type))
3359 max = TYPE_MAX_VALUE (type);
3360 else if (supports_overflow_infinity (type)
3361 /* We shouldn't generate [+INF, +INF] as set_value_range
3362 doesn't like this and ICEs. */
3363 && !is_positive_overflow_infinity (min))
3364 max = positive_overflow_infinity (type);
3365 else
3366 {
3367 set_value_range_to_varying (vr);
3368 return;
3369 }
3370
3371 cmp = compare_values (min, max);
3372
3373 /* If a VR_ANTI_RANGEs contains zero, then we have
3374 ~[-INF, min(MIN, MAX)]. */
3375 if (vr0.type == VR_ANTI_RANGE)
3376 {
3377 if (range_includes_zero_p (vr0.min, vr0.max) == 1)
3378 {
3379 /* Take the lower of the two values. */
3380 if (cmp != 1)
3381 max = min;
3382
3383 /* Create ~[-INF, min (abs(MIN), abs(MAX))]
3384 or ~[-INF + 1, min (abs(MIN), abs(MAX))] when
3385 flag_wrapv is set and the original anti-range doesn't include
3386 TYPE_MIN_VALUE, remember -TYPE_MIN_VALUE = TYPE_MIN_VALUE. */
3387 if (TYPE_OVERFLOW_WRAPS (type))
3388 {
3389 tree type_min_value = TYPE_MIN_VALUE (type);
3390
3391 min = (vr0.min != type_min_value
3392 ? int_const_binop (PLUS_EXPR, type_min_value,
3393 integer_one_node)
3394 : type_min_value);
3395 }
3396 else
3397 {
3398 if (overflow_infinity_range_p (&vr0))
3399 min = negative_overflow_infinity (type);
3400 else
3401 min = TYPE_MIN_VALUE (type);
3402 }
3403 }
3404 else
3405 {
3406 /* All else has failed, so create the range [0, INF], even for
3407 flag_wrapv since TYPE_MIN_VALUE is in the original
3408 anti-range. */
3409 vr0.type = VR_RANGE;
3410 min = build_int_cst (type, 0);
3411 if (needs_overflow_infinity (type))
3412 {
3413 if (supports_overflow_infinity (type))
3414 max = positive_overflow_infinity (type);
3415 else
3416 {
3417 set_value_range_to_varying (vr);
3418 return;
3419 }
3420 }
3421 else
3422 max = TYPE_MAX_VALUE (type);
3423 }
3424 }
3425
3426 /* If the range contains zero then we know that the minimum value in the
3427 range will be zero. */
3428 else if (range_includes_zero_p (vr0.min, vr0.max) == 1)
3429 {
3430 if (cmp == 1)
3431 max = min;
3432 min = build_int_cst (type, 0);
3433 }
3434 else
3435 {
3436 /* If the range was reversed, swap MIN and MAX. */
3437 if (cmp == 1)
3438 {
3439 tree t = min;
3440 min = max;
3441 max = t;
3442 }
3443 }
3444
3445 cmp = compare_values (min, max);
3446 if (cmp == -2 || cmp == 1)
3447 {
3448 /* If the new range has its limits swapped around (MIN > MAX),
3449 then the operation caused one of them to wrap around, mark
3450 the new range VARYING. */
3451 set_value_range_to_varying (vr);
3452 }
3453 else
3454 set_value_range (vr, vr0.type, min, max, NULL);
3455 return;
3456 }
3457
3458 /* For unhandled operations fall back to varying. */
3459 set_value_range_to_varying (vr);
3460 return;
3461 }
3462
3463
3464 /* Extract range information from a unary expression CODE OP0 based on
3465 the range of its operand with resulting type TYPE.
3466 The resulting range is stored in *VR. */
3467
3468 static void
3469 extract_range_from_unary_expr (value_range_t *vr, enum tree_code code,
3470 tree type, tree op0)
3471 {
3472 value_range_t vr0 = VR_INITIALIZER;
3473
3474 /* Get value ranges for the operand. For constant operands, create
3475 a new value range with the operand to simplify processing. */
3476 if (TREE_CODE (op0) == SSA_NAME)
3477 vr0 = *(get_value_range (op0));
3478 else if (is_gimple_min_invariant (op0))
3479 set_value_range_to_value (&vr0, op0, NULL);
3480 else
3481 set_value_range_to_varying (&vr0);
3482
3483 extract_range_from_unary_expr_1 (vr, code, type, &vr0, TREE_TYPE (op0));
3484 }
3485
3486
3487 /* Extract range information from a conditional expression STMT based on
3488 the ranges of each of its operands and the expression code. */
3489
3490 static void
3491 extract_range_from_cond_expr (value_range_t *vr, gimple stmt)
3492 {
3493 tree op0, op1;
3494 value_range_t vr0 = VR_INITIALIZER;
3495 value_range_t vr1 = VR_INITIALIZER;
3496
3497 /* Get value ranges for each operand. For constant operands, create
3498 a new value range with the operand to simplify processing. */
3499 op0 = gimple_assign_rhs2 (stmt);
3500 if (TREE_CODE (op0) == SSA_NAME)
3501 vr0 = *(get_value_range (op0));
3502 else if (is_gimple_min_invariant (op0))
3503 set_value_range_to_value (&vr0, op0, NULL);
3504 else
3505 set_value_range_to_varying (&vr0);
3506
3507 op1 = gimple_assign_rhs3 (stmt);
3508 if (TREE_CODE (op1) == SSA_NAME)
3509 vr1 = *(get_value_range (op1));
3510 else if (is_gimple_min_invariant (op1))
3511 set_value_range_to_value (&vr1, op1, NULL);
3512 else
3513 set_value_range_to_varying (&vr1);
3514
3515 /* The resulting value range is the union of the operand ranges */
3516 copy_value_range (vr, &vr0);
3517 vrp_meet (vr, &vr1);
3518 }
3519
3520
3521 /* Extract range information from a comparison expression EXPR based
3522 on the range of its operand and the expression code. */
3523
3524 static void
3525 extract_range_from_comparison (value_range_t *vr, enum tree_code code,
3526 tree type, tree op0, tree op1)
3527 {
3528 bool sop = false;
3529 tree val;
3530
3531 val = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, false, &sop,
3532 NULL);
3533
3534 /* A disadvantage of using a special infinity as an overflow
3535 representation is that we lose the ability to record overflow
3536 when we don't have an infinity. So we have to ignore a result
3537 which relies on overflow. */
3538
3539 if (val && !is_overflow_infinity (val) && !sop)
3540 {
3541 /* Since this expression was found on the RHS of an assignment,
3542 its type may be different from _Bool. Convert VAL to EXPR's
3543 type. */
3544 val = fold_convert (type, val);
3545 if (is_gimple_min_invariant (val))
3546 set_value_range_to_value (vr, val, vr->equiv);
3547 else
3548 set_value_range (vr, VR_RANGE, val, val, vr->equiv);
3549 }
3550 else
3551 /* The result of a comparison is always true or false. */
3552 set_value_range_to_truthvalue (vr, type);
3553 }
3554
3555 /* Try to derive a nonnegative or nonzero range out of STMT relying
3556 primarily on generic routines in fold in conjunction with range data.
3557 Store the result in *VR */
3558
3559 static void
3560 extract_range_basic (value_range_t *vr, gimple stmt)
3561 {
3562 bool sop = false;
3563 tree type = gimple_expr_type (stmt);
3564
3565 if (gimple_call_builtin_p (stmt, BUILT_IN_NORMAL))
3566 {
3567 tree fndecl = gimple_call_fndecl (stmt), arg;
3568 int mini, maxi, zerov = 0, prec;
3569
3570 switch (DECL_FUNCTION_CODE (fndecl))
3571 {
3572 case BUILT_IN_CONSTANT_P:
3573 /* If the call is __builtin_constant_p and the argument is a
3574 function parameter resolve it to false. This avoids bogus
3575 array bound warnings.
3576 ??? We could do this as early as inlining is finished. */
3577 arg = gimple_call_arg (stmt, 0);
3578 if (TREE_CODE (arg) == SSA_NAME
3579 && SSA_NAME_IS_DEFAULT_DEF (arg)
3580 && TREE_CODE (SSA_NAME_VAR (arg)) == PARM_DECL)
3581 {
3582 set_value_range_to_null (vr, type);
3583 return;
3584 }
3585 break;
3586 /* Both __builtin_ffs* and __builtin_popcount return
3587 [0, prec]. */
3588 CASE_INT_FN (BUILT_IN_FFS):
3589 CASE_INT_FN (BUILT_IN_POPCOUNT):
3590 arg = gimple_call_arg (stmt, 0);
3591 prec = TYPE_PRECISION (TREE_TYPE (arg));
3592 mini = 0;
3593 maxi = prec;
3594 if (TREE_CODE (arg) == SSA_NAME)
3595 {
3596 value_range_t *vr0 = get_value_range (arg);
3597 /* If arg is non-zero, then ffs or popcount
3598 are non-zero. */
3599 if (((vr0->type == VR_RANGE
3600 && integer_nonzerop (vr0->min))
3601 || (vr0->type == VR_ANTI_RANGE
3602 && integer_zerop (vr0->min)))
3603 && !TREE_OVERFLOW (vr0->min))
3604 mini = 1;
3605 /* If some high bits are known to be zero,
3606 we can decrease the maximum. */
3607 if (vr0->type == VR_RANGE
3608 && TREE_CODE (vr0->max) == INTEGER_CST
3609 && !TREE_OVERFLOW (vr0->max))
3610 maxi = tree_floor_log2 (vr0->max) + 1;
3611 }
3612 goto bitop_builtin;
3613 /* __builtin_parity* returns [0, 1]. */
3614 CASE_INT_FN (BUILT_IN_PARITY):
3615 mini = 0;
3616 maxi = 1;
3617 goto bitop_builtin;
3618 /* __builtin_c[lt]z* return [0, prec-1], except for
3619 when the argument is 0, but that is undefined behavior.
3620 On many targets where the CLZ RTL or optab value is defined
3621 for 0 the value is prec, so include that in the range
3622 by default. */
3623 CASE_INT_FN (BUILT_IN_CLZ):
3624 arg = gimple_call_arg (stmt, 0);
3625 prec = TYPE_PRECISION (TREE_TYPE (arg));
3626 mini = 0;
3627 maxi = prec;
3628 if (optab_handler (clz_optab, TYPE_MODE (TREE_TYPE (arg)))
3629 != CODE_FOR_nothing
3630 && CLZ_DEFINED_VALUE_AT_ZERO (TYPE_MODE (TREE_TYPE (arg)),
3631 zerov)
3632 /* Handle only the single common value. */
3633 && zerov != prec)
3634 /* Magic value to give up, unless vr0 proves
3635 arg is non-zero. */
3636 mini = -2;
3637 if (TREE_CODE (arg) == SSA_NAME)
3638 {
3639 value_range_t *vr0 = get_value_range (arg);
3640 /* From clz of VR_RANGE minimum we can compute
3641 result maximum. */
3642 if (vr0->type == VR_RANGE
3643 && TREE_CODE (vr0->min) == INTEGER_CST
3644 && !TREE_OVERFLOW (vr0->min))
3645 {
3646 maxi = prec - 1 - tree_floor_log2 (vr0->min);
3647 if (maxi != prec)
3648 mini = 0;
3649 }
3650 else if (vr0->type == VR_ANTI_RANGE
3651 && integer_zerop (vr0->min)
3652 && !TREE_OVERFLOW (vr0->min))
3653 {
3654 maxi = prec - 1;
3655 mini = 0;
3656 }
3657 if (mini == -2)
3658 break;
3659 /* From clz of VR_RANGE maximum we can compute
3660 result minimum. */
3661 if (vr0->type == VR_RANGE
3662 && TREE_CODE (vr0->max) == INTEGER_CST
3663 && !TREE_OVERFLOW (vr0->max))
3664 {
3665 mini = prec - 1 - tree_floor_log2 (vr0->max);
3666 if (mini == prec)
3667 break;
3668 }
3669 }
3670 if (mini == -2)
3671 break;
3672 goto bitop_builtin;
3673 /* __builtin_ctz* return [0, prec-1], except for
3674 when the argument is 0, but that is undefined behavior.
3675 If there is a ctz optab for this mode and
3676 CTZ_DEFINED_VALUE_AT_ZERO, include that in the range,
3677 otherwise just assume 0 won't be seen. */
3678 CASE_INT_FN (BUILT_IN_CTZ):
3679 arg = gimple_call_arg (stmt, 0);
3680 prec = TYPE_PRECISION (TREE_TYPE (arg));
3681 mini = 0;
3682 maxi = prec - 1;
3683 if (optab_handler (ctz_optab, TYPE_MODE (TREE_TYPE (arg)))
3684 != CODE_FOR_nothing
3685 && CTZ_DEFINED_VALUE_AT_ZERO (TYPE_MODE (TREE_TYPE (arg)),
3686 zerov))
3687 {
3688 /* Handle only the two common values. */
3689 if (zerov == -1)
3690 mini = -1;
3691 else if (zerov == prec)
3692 maxi = prec;
3693 else
3694 /* Magic value to give up, unless vr0 proves
3695 arg is non-zero. */
3696 mini = -2;
3697 }
3698 if (TREE_CODE (arg) == SSA_NAME)
3699 {
3700 value_range_t *vr0 = get_value_range (arg);
3701 /* If arg is non-zero, then use [0, prec - 1]. */
3702 if (((vr0->type == VR_RANGE
3703 && integer_nonzerop (vr0->min))
3704 || (vr0->type == VR_ANTI_RANGE
3705 && integer_zerop (vr0->min)))
3706 && !TREE_OVERFLOW (vr0->min))
3707 {
3708 mini = 0;
3709 maxi = prec - 1;
3710 }
3711 /* If some high bits are known to be zero,
3712 we can decrease the result maximum. */
3713 if (vr0->type == VR_RANGE
3714 && TREE_CODE (vr0->max) == INTEGER_CST
3715 && !TREE_OVERFLOW (vr0->max))
3716 {
3717 maxi = tree_floor_log2 (vr0->max);
3718 /* For vr0 [0, 0] give up. */
3719 if (maxi == -1)
3720 break;
3721 }
3722 }
3723 if (mini == -2)
3724 break;
3725 goto bitop_builtin;
3726 /* __builtin_clrsb* returns [0, prec-1]. */
3727 CASE_INT_FN (BUILT_IN_CLRSB):
3728 arg = gimple_call_arg (stmt, 0);
3729 prec = TYPE_PRECISION (TREE_TYPE (arg));
3730 mini = 0;
3731 maxi = prec - 1;
3732 goto bitop_builtin;
3733 bitop_builtin:
3734 set_value_range (vr, VR_RANGE, build_int_cst (type, mini),
3735 build_int_cst (type, maxi), NULL);
3736 return;
3737 default:
3738 break;
3739 }
3740 }
3741 if (INTEGRAL_TYPE_P (type)
3742 && gimple_stmt_nonnegative_warnv_p (stmt, &sop))
3743 set_value_range_to_nonnegative (vr, type,
3744 sop || stmt_overflow_infinity (stmt));
3745 else if (vrp_stmt_computes_nonzero (stmt, &sop)
3746 && !sop)
3747 set_value_range_to_nonnull (vr, type);
3748 else
3749 set_value_range_to_varying (vr);
3750 }
3751
3752
3753 /* Try to compute a useful range out of assignment STMT and store it
3754 in *VR. */
3755
3756 static void
3757 extract_range_from_assignment (value_range_t *vr, gimple stmt)
3758 {
3759 enum tree_code code = gimple_assign_rhs_code (stmt);
3760
3761 if (code == ASSERT_EXPR)
3762 extract_range_from_assert (vr, gimple_assign_rhs1 (stmt));
3763 else if (code == SSA_NAME)
3764 extract_range_from_ssa_name (vr, gimple_assign_rhs1 (stmt));
3765 else if (TREE_CODE_CLASS (code) == tcc_binary)
3766 extract_range_from_binary_expr (vr, gimple_assign_rhs_code (stmt),
3767 gimple_expr_type (stmt),
3768 gimple_assign_rhs1 (stmt),
3769 gimple_assign_rhs2 (stmt));
3770 else if (TREE_CODE_CLASS (code) == tcc_unary)
3771 extract_range_from_unary_expr (vr, gimple_assign_rhs_code (stmt),
3772 gimple_expr_type (stmt),
3773 gimple_assign_rhs1 (stmt));
3774 else if (code == COND_EXPR)
3775 extract_range_from_cond_expr (vr, stmt);
3776 else if (TREE_CODE_CLASS (code) == tcc_comparison)
3777 extract_range_from_comparison (vr, gimple_assign_rhs_code (stmt),
3778 gimple_expr_type (stmt),
3779 gimple_assign_rhs1 (stmt),
3780 gimple_assign_rhs2 (stmt));
3781 else if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS
3782 && is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
3783 set_value_range_to_value (vr, gimple_assign_rhs1 (stmt), NULL);
3784 else
3785 set_value_range_to_varying (vr);
3786
3787 if (vr->type == VR_VARYING)
3788 extract_range_basic (vr, stmt);
3789 }
3790
3791 /* Given a range VR, a LOOP and a variable VAR, determine whether it
3792 would be profitable to adjust VR using scalar evolution information
3793 for VAR. If so, update VR with the new limits. */
3794
3795 static void
3796 adjust_range_with_scev (value_range_t *vr, struct loop *loop,
3797 gimple stmt, tree var)
3798 {
3799 tree init, step, chrec, tmin, tmax, min, max, type, tem;
3800 enum ev_direction dir;
3801
3802 /* TODO. Don't adjust anti-ranges. An anti-range may provide
3803 better opportunities than a regular range, but I'm not sure. */
3804 if (vr->type == VR_ANTI_RANGE)
3805 return;
3806
3807 chrec = instantiate_parameters (loop, analyze_scalar_evolution (loop, var));
3808
3809 /* Like in PR19590, scev can return a constant function. */
3810 if (is_gimple_min_invariant (chrec))
3811 {
3812 set_value_range_to_value (vr, chrec, vr->equiv);
3813 return;
3814 }
3815
3816 if (TREE_CODE (chrec) != POLYNOMIAL_CHREC)
3817 return;
3818
3819 init = initial_condition_in_loop_num (chrec, loop->num);
3820 tem = op_with_constant_singleton_value_range (init);
3821 if (tem)
3822 init = tem;
3823 step = evolution_part_in_loop_num (chrec, loop->num);
3824 tem = op_with_constant_singleton_value_range (step);
3825 if (tem)
3826 step = tem;
3827
3828 /* If STEP is symbolic, we can't know whether INIT will be the
3829 minimum or maximum value in the range. Also, unless INIT is
3830 a simple expression, compare_values and possibly other functions
3831 in tree-vrp won't be able to handle it. */
3832 if (step == NULL_TREE
3833 || !is_gimple_min_invariant (step)
3834 || !valid_value_p (init))
3835 return;
3836
3837 dir = scev_direction (chrec);
3838 if (/* Do not adjust ranges if we do not know whether the iv increases
3839 or decreases, ... */
3840 dir == EV_DIR_UNKNOWN
3841 /* ... or if it may wrap. */
3842 || scev_probably_wraps_p (init, step, stmt, get_chrec_loop (chrec),
3843 true))
3844 return;
3845
3846 /* We use TYPE_MIN_VALUE and TYPE_MAX_VALUE here instead of
3847 negative_overflow_infinity and positive_overflow_infinity,
3848 because we have concluded that the loop probably does not
3849 wrap. */
3850
3851 type = TREE_TYPE (var);
3852 if (POINTER_TYPE_P (type) || !TYPE_MIN_VALUE (type))
3853 tmin = lower_bound_in_type (type, type);
3854 else
3855 tmin = TYPE_MIN_VALUE (type);
3856 if (POINTER_TYPE_P (type) || !TYPE_MAX_VALUE (type))
3857 tmax = upper_bound_in_type (type, type);
3858 else
3859 tmax = TYPE_MAX_VALUE (type);
3860
3861 /* Try to use estimated number of iterations for the loop to constrain the
3862 final value in the evolution. */
3863 if (TREE_CODE (step) == INTEGER_CST
3864 && is_gimple_val (init)
3865 && (TREE_CODE (init) != SSA_NAME
3866 || get_value_range (init)->type == VR_RANGE))
3867 {
3868 double_int nit;
3869
3870 /* We are only entering here for loop header PHI nodes, so using
3871 the number of latch executions is the correct thing to use. */
3872 if (max_loop_iterations (loop, &nit))
3873 {
3874 value_range_t maxvr = VR_INITIALIZER;
3875 double_int dtmp;
3876 bool unsigned_p = TYPE_UNSIGNED (TREE_TYPE (step));
3877 bool overflow = false;
3878
3879 dtmp = tree_to_double_int (step)
3880 .mul_with_sign (nit, unsigned_p, &overflow);
3881 /* If the multiplication overflowed we can't do a meaningful
3882 adjustment. Likewise if the result doesn't fit in the type
3883 of the induction variable. For a signed type we have to
3884 check whether the result has the expected signedness which
3885 is that of the step as number of iterations is unsigned. */
3886 if (!overflow
3887 && double_int_fits_to_tree_p (TREE_TYPE (init), dtmp)
3888 && (unsigned_p
3889 || ((dtmp.high ^ TREE_INT_CST_HIGH (step)) >= 0)))
3890 {
3891 tem = double_int_to_tree (TREE_TYPE (init), dtmp);
3892 extract_range_from_binary_expr (&maxvr, PLUS_EXPR,
3893 TREE_TYPE (init), init, tem);
3894 /* Likewise if the addition did. */
3895 if (maxvr.type == VR_RANGE)
3896 {
3897 tmin = maxvr.min;
3898 tmax = maxvr.max;
3899 }
3900 }
3901 }
3902 }
3903
3904 if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED)
3905 {
3906 min = tmin;
3907 max = tmax;
3908
3909 /* For VARYING or UNDEFINED ranges, just about anything we get
3910 from scalar evolutions should be better. */
3911
3912 if (dir == EV_DIR_DECREASES)
3913 max = init;
3914 else
3915 min = init;
3916
3917 /* If we would create an invalid range, then just assume we
3918 know absolutely nothing. This may be over-conservative,
3919 but it's clearly safe, and should happen only in unreachable
3920 parts of code, or for invalid programs. */
3921 if (compare_values (min, max) == 1)
3922 return;
3923
3924 set_value_range (vr, VR_RANGE, min, max, vr->equiv);
3925 }
3926 else if (vr->type == VR_RANGE)
3927 {
3928 min = vr->min;
3929 max = vr->max;
3930
3931 if (dir == EV_DIR_DECREASES)
3932 {
3933 /* INIT is the maximum value. If INIT is lower than VR->MAX
3934 but no smaller than VR->MIN, set VR->MAX to INIT. */
3935 if (compare_values (init, max) == -1)
3936 max = init;
3937
3938 /* According to the loop information, the variable does not
3939 overflow. If we think it does, probably because of an
3940 overflow due to arithmetic on a different INF value,
3941 reset now. */
3942 if (is_negative_overflow_infinity (min)
3943 || compare_values (min, tmin) == -1)
3944 min = tmin;
3945
3946 }
3947 else
3948 {
3949 /* If INIT is bigger than VR->MIN, set VR->MIN to INIT. */
3950 if (compare_values (init, min) == 1)
3951 min = init;
3952
3953 if (is_positive_overflow_infinity (max)
3954 || compare_values (tmax, max) == -1)
3955 max = tmax;
3956 }
3957
3958 /* If we just created an invalid range with the minimum
3959 greater than the maximum, we fail conservatively.
3960 This should happen only in unreachable
3961 parts of code, or for invalid programs. */
3962 if (compare_values (min, max) == 1)
3963 return;
3964
3965 set_value_range (vr, VR_RANGE, min, max, vr->equiv);
3966 }
3967 }
3968
3969 /* Return true if VAR may overflow at STMT. This checks any available
3970 loop information to see if we can determine that VAR does not
3971 overflow. */
3972
3973 static bool
3974 vrp_var_may_overflow (tree var, gimple stmt)
3975 {
3976 struct loop *l;
3977 tree chrec, init, step;
3978
3979 if (current_loops == NULL)
3980 return true;
3981
3982 l = loop_containing_stmt (stmt);
3983 if (l == NULL
3984 || !loop_outer (l))
3985 return true;
3986
3987 chrec = instantiate_parameters (l, analyze_scalar_evolution (l, var));
3988 if (TREE_CODE (chrec) != POLYNOMIAL_CHREC)
3989 return true;
3990
3991 init = initial_condition_in_loop_num (chrec, l->num);
3992 step = evolution_part_in_loop_num (chrec, l->num);
3993
3994 if (step == NULL_TREE
3995 || !is_gimple_min_invariant (step)
3996 || !valid_value_p (init))
3997 return true;
3998
3999 /* If we get here, we know something useful about VAR based on the
4000 loop information. If it wraps, it may overflow. */
4001
4002 if (scev_probably_wraps_p (init, step, stmt, get_chrec_loop (chrec),
4003 true))
4004 return true;
4005
4006 if (dump_file && (dump_flags & TDF_DETAILS) != 0)
4007 {
4008 print_generic_expr (dump_file, var, 0);
4009 fprintf (dump_file, ": loop information indicates does not overflow\n");
4010 }
4011
4012 return false;
4013 }
4014
4015
4016 /* Given two numeric value ranges VR0, VR1 and a comparison code COMP:
4017
4018 - Return BOOLEAN_TRUE_NODE if VR0 COMP VR1 always returns true for
4019 all the values in the ranges.
4020
4021 - Return BOOLEAN_FALSE_NODE if the comparison always returns false.
4022
4023 - Return NULL_TREE if it is not always possible to determine the
4024 value of the comparison.
4025
4026 Also set *STRICT_OVERFLOW_P to indicate whether a range with an
4027 overflow infinity was used in the test. */
4028
4029
4030 static tree
4031 compare_ranges (enum tree_code comp, value_range_t *vr0, value_range_t *vr1,
4032 bool *strict_overflow_p)
4033 {
4034 /* VARYING or UNDEFINED ranges cannot be compared. */
4035 if (vr0->type == VR_VARYING
4036 || vr0->type == VR_UNDEFINED
4037 || vr1->type == VR_VARYING
4038 || vr1->type == VR_UNDEFINED)
4039 return NULL_TREE;
4040
4041 /* Anti-ranges need to be handled separately. */
4042 if (vr0->type == VR_ANTI_RANGE || vr1->type == VR_ANTI_RANGE)
4043 {
4044 /* If both are anti-ranges, then we cannot compute any
4045 comparison. */
4046 if (vr0->type == VR_ANTI_RANGE && vr1->type == VR_ANTI_RANGE)
4047 return NULL_TREE;
4048
4049 /* These comparisons are never statically computable. */
4050 if (comp == GT_EXPR
4051 || comp == GE_EXPR
4052 || comp == LT_EXPR
4053 || comp == LE_EXPR)
4054 return NULL_TREE;
4055
4056 /* Equality can be computed only between a range and an
4057 anti-range. ~[VAL1, VAL2] == [VAL1, VAL2] is always false. */
4058 if (vr0->type == VR_RANGE)
4059 {
4060 /* To simplify processing, make VR0 the anti-range. */
4061 value_range_t *tmp = vr0;
4062 vr0 = vr1;
4063 vr1 = tmp;
4064 }
4065
4066 gcc_assert (comp == NE_EXPR || comp == EQ_EXPR);
4067
4068 if (compare_values_warnv (vr0->min, vr1->min, strict_overflow_p) == 0
4069 && compare_values_warnv (vr0->max, vr1->max, strict_overflow_p) == 0)
4070 return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node;
4071
4072 return NULL_TREE;
4073 }
4074
4075 if (!usable_range_p (vr0, strict_overflow_p)
4076 || !usable_range_p (vr1, strict_overflow_p))
4077 return NULL_TREE;
4078
4079 /* Simplify processing. If COMP is GT_EXPR or GE_EXPR, switch the
4080 operands around and change the comparison code. */
4081 if (comp == GT_EXPR || comp == GE_EXPR)
4082 {
4083 value_range_t *tmp;
4084 comp = (comp == GT_EXPR) ? LT_EXPR : LE_EXPR;
4085 tmp = vr0;
4086 vr0 = vr1;
4087 vr1 = tmp;
4088 }
4089
4090 if (comp == EQ_EXPR)
4091 {
4092 /* Equality may only be computed if both ranges represent
4093 exactly one value. */
4094 if (compare_values_warnv (vr0->min, vr0->max, strict_overflow_p) == 0
4095 && compare_values_warnv (vr1->min, vr1->max, strict_overflow_p) == 0)
4096 {
4097 int cmp_min = compare_values_warnv (vr0->min, vr1->min,
4098 strict_overflow_p);
4099 int cmp_max = compare_values_warnv (vr0->max, vr1->max,
4100 strict_overflow_p);
4101 if (cmp_min == 0 && cmp_max == 0)
4102 return boolean_true_node;
4103 else if (cmp_min != -2 && cmp_max != -2)
4104 return boolean_false_node;
4105 }
4106 /* If [V0_MIN, V1_MAX] < [V1_MIN, V1_MAX] then V0 != V1. */
4107 else if (compare_values_warnv (vr0->min, vr1->max,
4108 strict_overflow_p) == 1
4109 || compare_values_warnv (vr1->min, vr0->max,
4110 strict_overflow_p) == 1)
4111 return boolean_false_node;
4112
4113 return NULL_TREE;
4114 }
4115 else if (comp == NE_EXPR)
4116 {
4117 int cmp1, cmp2;
4118
4119 /* If VR0 is completely to the left or completely to the right
4120 of VR1, they are always different. Notice that we need to
4121 make sure that both comparisons yield similar results to
4122 avoid comparing values that cannot be compared at
4123 compile-time. */
4124 cmp1 = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p);
4125 cmp2 = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p);
4126 if ((cmp1 == -1 && cmp2 == -1) || (cmp1 == 1 && cmp2 == 1))
4127 return boolean_true_node;
4128
4129 /* If VR0 and VR1 represent a single value and are identical,
4130 return false. */
4131 else if (compare_values_warnv (vr0->min, vr0->max,
4132 strict_overflow_p) == 0
4133 && compare_values_warnv (vr1->min, vr1->max,
4134 strict_overflow_p) == 0
4135 && compare_values_warnv (vr0->min, vr1->min,
4136 strict_overflow_p) == 0
4137 && compare_values_warnv (vr0->max, vr1->max,
4138 strict_overflow_p) == 0)
4139 return boolean_false_node;
4140
4141 /* Otherwise, they may or may not be different. */
4142 else
4143 return NULL_TREE;
4144 }
4145 else if (comp == LT_EXPR || comp == LE_EXPR)
4146 {
4147 int tst;
4148
4149 /* If VR0 is to the left of VR1, return true. */
4150 tst = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p);
4151 if ((comp == LT_EXPR && tst == -1)
4152 || (comp == LE_EXPR && (tst == -1 || tst == 0)))
4153 {
4154 if (overflow_infinity_range_p (vr0)
4155 || overflow_infinity_range_p (vr1))
4156 *strict_overflow_p = true;
4157 return boolean_true_node;
4158 }
4159
4160 /* If VR0 is to the right of VR1, return false. */
4161 tst = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p);
4162 if ((comp == LT_EXPR && (tst == 0 || tst == 1))
4163 || (comp == LE_EXPR && tst == 1))
4164 {
4165 if (overflow_infinity_range_p (vr0)
4166 || overflow_infinity_range_p (vr1))
4167 *strict_overflow_p = true;
4168 return boolean_false_node;
4169 }
4170
4171 /* Otherwise, we don't know. */
4172 return NULL_TREE;
4173 }
4174
4175 gcc_unreachable ();
4176 }
4177
4178
4179 /* Given a value range VR, a value VAL and a comparison code COMP, return
4180 BOOLEAN_TRUE_NODE if VR COMP VAL always returns true for all the
4181 values in VR. Return BOOLEAN_FALSE_NODE if the comparison
4182 always returns false. Return NULL_TREE if it is not always
4183 possible to determine the value of the comparison. Also set
4184 *STRICT_OVERFLOW_P to indicate whether a range with an overflow
4185 infinity was used in the test. */
4186
4187 static tree
4188 compare_range_with_value (enum tree_code comp, value_range_t *vr, tree val,
4189 bool *strict_overflow_p)
4190 {
4191 if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED)
4192 return NULL_TREE;
4193
4194 /* Anti-ranges need to be handled separately. */
4195 if (vr->type == VR_ANTI_RANGE)
4196 {
4197 /* For anti-ranges, the only predicates that we can compute at
4198 compile time are equality and inequality. */
4199 if (comp == GT_EXPR
4200 || comp == GE_EXPR
4201 || comp == LT_EXPR
4202 || comp == LE_EXPR)
4203 return NULL_TREE;
4204
4205 /* ~[VAL_1, VAL_2] OP VAL is known if VAL_1 <= VAL <= VAL_2. */
4206 if (value_inside_range (val, vr->min, vr->max) == 1)
4207 return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node;
4208
4209 return NULL_TREE;
4210 }
4211
4212 if (!usable_range_p (vr, strict_overflow_p))
4213 return NULL_TREE;
4214
4215 if (comp == EQ_EXPR)
4216 {
4217 /* EQ_EXPR may only be computed if VR represents exactly
4218 one value. */
4219 if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0)
4220 {
4221 int cmp = compare_values_warnv (vr->min, val, strict_overflow_p);
4222 if (cmp == 0)
4223 return boolean_true_node;
4224 else if (cmp == -1 || cmp == 1 || cmp == 2)
4225 return boolean_false_node;
4226 }
4227 else if (compare_values_warnv (val, vr->min, strict_overflow_p) == -1
4228 || compare_values_warnv (vr->max, val, strict_overflow_p) == -1)
4229 return boolean_false_node;
4230
4231 return NULL_TREE;
4232 }
4233 else if (comp == NE_EXPR)
4234 {
4235 /* If VAL is not inside VR, then they are always different. */
4236 if (compare_values_warnv (vr->max, val, strict_overflow_p) == -1
4237 || compare_values_warnv (vr->min, val, strict_overflow_p) == 1)
4238 return boolean_true_node;
4239
4240 /* If VR represents exactly one value equal to VAL, then return
4241 false. */
4242 if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0
4243 && compare_values_warnv (vr->min, val, strict_overflow_p) == 0)
4244 return boolean_false_node;
4245
4246 /* Otherwise, they may or may not be different. */
4247 return NULL_TREE;
4248 }
4249 else if (comp == LT_EXPR || comp == LE_EXPR)
4250 {
4251 int tst;
4252
4253 /* If VR is to the left of VAL, return true. */
4254 tst = compare_values_warnv (vr->max, val, strict_overflow_p);
4255 if ((comp == LT_EXPR && tst == -1)
4256 || (comp == LE_EXPR && (tst == -1 || tst == 0)))
4257 {
4258 if (overflow_infinity_range_p (vr))
4259 *strict_overflow_p = true;
4260 return boolean_true_node;
4261 }
4262
4263 /* If VR is to the right of VAL, return false. */
4264 tst = compare_values_warnv (vr->min, val, strict_overflow_p);
4265 if ((comp == LT_EXPR && (tst == 0 || tst == 1))
4266 || (comp == LE_EXPR && tst == 1))
4267 {
4268 if (overflow_infinity_range_p (vr))
4269 *strict_overflow_p = true;
4270 return boolean_false_node;
4271 }
4272
4273 /* Otherwise, we don't know. */
4274 return NULL_TREE;
4275 }
4276 else if (comp == GT_EXPR || comp == GE_EXPR)
4277 {
4278 int tst;
4279
4280 /* If VR is to the right of VAL, return true. */
4281 tst = compare_values_warnv (vr->min, val, strict_overflow_p);
4282 if ((comp == GT_EXPR && tst == 1)
4283 || (comp == GE_EXPR && (tst == 0 || tst == 1)))
4284 {
4285 if (overflow_infinity_range_p (vr))
4286 *strict_overflow_p = true;
4287 return boolean_true_node;
4288 }
4289
4290 /* If VR is to the left of VAL, return false. */
4291 tst = compare_values_warnv (vr->max, val, strict_overflow_p);
4292 if ((comp == GT_EXPR && (tst == -1 || tst == 0))
4293 || (comp == GE_EXPR && tst == -1))
4294 {
4295 if (overflow_infinity_range_p (vr))
4296 *strict_overflow_p = true;
4297 return boolean_false_node;
4298 }
4299
4300 /* Otherwise, we don't know. */
4301 return NULL_TREE;
4302 }
4303
4304 gcc_unreachable ();
4305 }
4306
4307
4308 /* Debugging dumps. */
4309
4310 void dump_value_range (FILE *, value_range_t *);
4311 void debug_value_range (value_range_t *);
4312 void dump_all_value_ranges (FILE *);
4313 void debug_all_value_ranges (void);
4314 void dump_vr_equiv (FILE *, bitmap);
4315 void debug_vr_equiv (bitmap);
4316
4317
4318 /* Dump value range VR to FILE. */
4319
4320 void
4321 dump_value_range (FILE *file, value_range_t *vr)
4322 {
4323 if (vr == NULL)
4324 fprintf (file, "[]");
4325 else if (vr->type == VR_UNDEFINED)
4326 fprintf (file, "UNDEFINED");
4327 else if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
4328 {
4329 tree type = TREE_TYPE (vr->min);
4330
4331 fprintf (file, "%s[", (vr->type == VR_ANTI_RANGE) ? "~" : "");
4332
4333 if (is_negative_overflow_infinity (vr->min))
4334 fprintf (file, "-INF(OVF)");
4335 else if (INTEGRAL_TYPE_P (type)
4336 && !TYPE_UNSIGNED (type)
4337 && vrp_val_is_min (vr->min))
4338 fprintf (file, "-INF");
4339 else
4340 print_generic_expr (file, vr->min, 0);
4341
4342 fprintf (file, ", ");
4343
4344 if (is_positive_overflow_infinity (vr->max))
4345 fprintf (file, "+INF(OVF)");
4346 else if (INTEGRAL_TYPE_P (type)
4347 && vrp_val_is_max (vr->max))
4348 fprintf (file, "+INF");
4349 else
4350 print_generic_expr (file, vr->max, 0);
4351
4352 fprintf (file, "]");
4353
4354 if (vr->equiv)
4355 {
4356 bitmap_iterator bi;
4357 unsigned i, c = 0;
4358
4359 fprintf (file, " EQUIVALENCES: { ");
4360
4361 EXECUTE_IF_SET_IN_BITMAP (vr->equiv, 0, i, bi)
4362 {
4363 print_generic_expr (file, ssa_name (i), 0);
4364 fprintf (file, " ");
4365 c++;
4366 }
4367
4368 fprintf (file, "} (%u elements)", c);
4369 }
4370 }
4371 else if (vr->type == VR_VARYING)
4372 fprintf (file, "VARYING");
4373 else
4374 fprintf (file, "INVALID RANGE");
4375 }
4376
4377
4378 /* Dump value range VR to stderr. */
4379
4380 DEBUG_FUNCTION void
4381 debug_value_range (value_range_t *vr)
4382 {
4383 dump_value_range (stderr, vr);
4384 fprintf (stderr, "\n");
4385 }
4386
4387
4388 /* Dump value ranges of all SSA_NAMEs to FILE. */
4389
4390 void
4391 dump_all_value_ranges (FILE *file)
4392 {
4393 size_t i;
4394
4395 for (i = 0; i < num_vr_values; i++)
4396 {
4397 if (vr_value[i])
4398 {
4399 print_generic_expr (file, ssa_name (i), 0);
4400 fprintf (file, ": ");
4401 dump_value_range (file, vr_value[i]);
4402 fprintf (file, "\n");
4403 }
4404 }
4405
4406 fprintf (file, "\n");
4407 }
4408
4409
4410 /* Dump all value ranges to stderr. */
4411
4412 DEBUG_FUNCTION void
4413 debug_all_value_ranges (void)
4414 {
4415 dump_all_value_ranges (stderr);
4416 }
4417
4418
4419 /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
4420 create a new SSA name N and return the assertion assignment
4421 'V = ASSERT_EXPR <V, V OP W>'. */
4422
4423 static gimple
4424 build_assert_expr_for (tree cond, tree v)
4425 {
4426 tree a;
4427 gimple assertion;
4428
4429 gcc_assert (TREE_CODE (v) == SSA_NAME
4430 && COMPARISON_CLASS_P (cond));
4431
4432 a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond);
4433 assertion = gimple_build_assign (NULL_TREE, a);
4434
4435 /* The new ASSERT_EXPR, creates a new SSA name that replaces the
4436 operand of the ASSERT_EXPR. Create it so the new name and the old one
4437 are registered in the replacement table so that we can fix the SSA web
4438 after adding all the ASSERT_EXPRs. */
4439 create_new_def_for (v, assertion, NULL);
4440
4441 return assertion;
4442 }
4443
4444
4445 /* Return false if EXPR is a predicate expression involving floating
4446 point values. */
4447
4448 static inline bool
4449 fp_predicate (gimple stmt)
4450 {
4451 GIMPLE_CHECK (stmt, GIMPLE_COND);
4452
4453 return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)));
4454 }
4455
4456
4457 /* If the range of values taken by OP can be inferred after STMT executes,
4458 return the comparison code (COMP_CODE_P) and value (VAL_P) that
4459 describes the inferred range. Return true if a range could be
4460 inferred. */
4461
4462 static bool
4463 infer_value_range (gimple stmt, tree op, enum tree_code *comp_code_p, tree *val_p)
4464 {
4465 *val_p = NULL_TREE;
4466 *comp_code_p = ERROR_MARK;
4467
4468 /* Do not attempt to infer anything in names that flow through
4469 abnormal edges. */
4470 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op))
4471 return false;
4472
4473 /* Similarly, don't infer anything from statements that may throw
4474 exceptions. */
4475 if (stmt_could_throw_p (stmt))
4476 return false;
4477
4478 /* If STMT is the last statement of a basic block with no
4479 successors, there is no point inferring anything about any of its
4480 operands. We would not be able to find a proper insertion point
4481 for the assertion, anyway. */
4482 if (stmt_ends_bb_p (stmt) && EDGE_COUNT (gimple_bb (stmt)->succs) == 0)
4483 return false;
4484
4485 /* We can only assume that a pointer dereference will yield
4486 non-NULL if -fdelete-null-pointer-checks is enabled. */
4487 if (flag_delete_null_pointer_checks
4488 && POINTER_TYPE_P (TREE_TYPE (op))
4489 && gimple_code (stmt) != GIMPLE_ASM)
4490 {
4491 unsigned num_uses, num_loads, num_stores;
4492
4493 count_uses_and_derefs (op, stmt, &num_uses, &num_loads, &num_stores);
4494 if (num_loads + num_stores > 0)
4495 {
4496 *val_p = build_int_cst (TREE_TYPE (op), 0);
4497 *comp_code_p = NE_EXPR;
4498 return true;
4499 }
4500 }
4501
4502 return false;
4503 }
4504
4505
4506 void dump_asserts_for (FILE *, tree);
4507 void debug_asserts_for (tree);
4508 void dump_all_asserts (FILE *);
4509 void debug_all_asserts (void);
4510
4511 /* Dump all the registered assertions for NAME to FILE. */
4512
4513 void
4514 dump_asserts_for (FILE *file, tree name)
4515 {
4516 assert_locus_t loc;
4517
4518 fprintf (file, "Assertions to be inserted for ");
4519 print_generic_expr (file, name, 0);
4520 fprintf (file, "\n");
4521
4522 loc = asserts_for[SSA_NAME_VERSION (name)];
4523 while (loc)
4524 {
4525 fprintf (file, "\t");
4526 print_gimple_stmt (file, gsi_stmt (loc->si), 0, 0);
4527 fprintf (file, "\n\tBB #%d", loc->bb->index);
4528 if (loc->e)
4529 {
4530 fprintf (file, "\n\tEDGE %d->%d", loc->e->src->index,
4531 loc->e->dest->index);
4532 dump_edge_info (file, loc->e, dump_flags, 0);
4533 }
4534 fprintf (file, "\n\tPREDICATE: ");
4535 print_generic_expr (file, name, 0);
4536 fprintf (file, " %s ", tree_code_name[(int)loc->comp_code]);
4537 print_generic_expr (file, loc->val, 0);
4538 fprintf (file, "\n\n");
4539 loc = loc->next;
4540 }
4541
4542 fprintf (file, "\n");
4543 }
4544
4545
4546 /* Dump all the registered assertions for NAME to stderr. */
4547
4548 DEBUG_FUNCTION void
4549 debug_asserts_for (tree name)
4550 {
4551 dump_asserts_for (stderr, name);
4552 }
4553
4554
4555 /* Dump all the registered assertions for all the names to FILE. */
4556
4557 void
4558 dump_all_asserts (FILE *file)
4559 {
4560 unsigned i;
4561 bitmap_iterator bi;
4562
4563 fprintf (file, "\nASSERT_EXPRs to be inserted\n\n");
4564 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
4565 dump_asserts_for (file, ssa_name (i));
4566 fprintf (file, "\n");
4567 }
4568
4569
4570 /* Dump all the registered assertions for all the names to stderr. */
4571
4572 DEBUG_FUNCTION void
4573 debug_all_asserts (void)
4574 {
4575 dump_all_asserts (stderr);
4576 }
4577
4578
4579 /* If NAME doesn't have an ASSERT_EXPR registered for asserting
4580 'EXPR COMP_CODE VAL' at a location that dominates block BB or
4581 E->DEST, then register this location as a possible insertion point
4582 for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
4583
4584 BB, E and SI provide the exact insertion point for the new
4585 ASSERT_EXPR. If BB is NULL, then the ASSERT_EXPR is to be inserted
4586 on edge E. Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
4587 BB. If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
4588 must not be NULL. */
4589
4590 static void
4591 register_new_assert_for (tree name, tree expr,
4592 enum tree_code comp_code,
4593 tree val,
4594 basic_block bb,
4595 edge e,
4596 gimple_stmt_iterator si)
4597 {
4598 assert_locus_t n, loc, last_loc;
4599 basic_block dest_bb;
4600
4601 gcc_checking_assert (bb == NULL || e == NULL);
4602
4603 if (e == NULL)
4604 gcc_checking_assert (gimple_code (gsi_stmt (si)) != GIMPLE_COND
4605 && gimple_code (gsi_stmt (si)) != GIMPLE_SWITCH);
4606
4607 /* Never build an assert comparing against an integer constant with
4608 TREE_OVERFLOW set. This confuses our undefined overflow warning
4609 machinery. */
4610 if (TREE_CODE (val) == INTEGER_CST
4611 && TREE_OVERFLOW (val))
4612 val = build_int_cst_wide (TREE_TYPE (val),
4613 TREE_INT_CST_LOW (val), TREE_INT_CST_HIGH (val));
4614
4615 /* The new assertion A will be inserted at BB or E. We need to
4616 determine if the new location is dominated by a previously
4617 registered location for A. If we are doing an edge insertion,
4618 assume that A will be inserted at E->DEST. Note that this is not
4619 necessarily true.
4620
4621 If E is a critical edge, it will be split. But even if E is
4622 split, the new block will dominate the same set of blocks that
4623 E->DEST dominates.
4624
4625 The reverse, however, is not true, blocks dominated by E->DEST
4626 will not be dominated by the new block created to split E. So,
4627 if the insertion location is on a critical edge, we will not use
4628 the new location to move another assertion previously registered
4629 at a block dominated by E->DEST. */
4630 dest_bb = (bb) ? bb : e->dest;
4631
4632 /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and
4633 VAL at a block dominating DEST_BB, then we don't need to insert a new
4634 one. Similarly, if the same assertion already exists at a block
4635 dominated by DEST_BB and the new location is not on a critical
4636 edge, then update the existing location for the assertion (i.e.,
4637 move the assertion up in the dominance tree).
4638
4639 Note, this is implemented as a simple linked list because there
4640 should not be more than a handful of assertions registered per
4641 name. If this becomes a performance problem, a table hashed by
4642 COMP_CODE and VAL could be implemented. */
4643 loc = asserts_for[SSA_NAME_VERSION (name)];
4644 last_loc = loc;
4645 while (loc)
4646 {
4647 if (loc->comp_code == comp_code
4648 && (loc->val == val
4649 || operand_equal_p (loc->val, val, 0))
4650 && (loc->expr == expr
4651 || operand_equal_p (loc->expr, expr, 0)))
4652 {
4653 /* If E is not a critical edge and DEST_BB
4654 dominates the existing location for the assertion, move
4655 the assertion up in the dominance tree by updating its
4656 location information. */
4657 if ((e == NULL || !EDGE_CRITICAL_P (e))
4658 && dominated_by_p (CDI_DOMINATORS, loc->bb, dest_bb))
4659 {
4660 loc->bb = dest_bb;
4661 loc->e = e;
4662 loc->si = si;
4663 return;
4664 }
4665 }
4666
4667 /* Update the last node of the list and move to the next one. */
4668 last_loc = loc;
4669 loc = loc->next;
4670 }
4671
4672 /* If we didn't find an assertion already registered for
4673 NAME COMP_CODE VAL, add a new one at the end of the list of
4674 assertions associated with NAME. */
4675 n = XNEW (struct assert_locus_d);
4676 n->bb = dest_bb;
4677 n->e = e;
4678 n->si = si;
4679 n->comp_code = comp_code;
4680 n->val = val;
4681 n->expr = expr;
4682 n->next = NULL;
4683
4684 if (last_loc)
4685 last_loc->next = n;
4686 else
4687 asserts_for[SSA_NAME_VERSION (name)] = n;
4688
4689 bitmap_set_bit (need_assert_for, SSA_NAME_VERSION (name));
4690 }
4691
4692 /* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME.
4693 Extract a suitable test code and value and store them into *CODE_P and
4694 *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P.
4695
4696 If no extraction was possible, return FALSE, otherwise return TRUE.
4697
4698 If INVERT is true, then we invert the result stored into *CODE_P. */
4699
4700 static bool
4701 extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code,
4702 tree cond_op0, tree cond_op1,
4703 bool invert, enum tree_code *code_p,
4704 tree *val_p)
4705 {
4706 enum tree_code comp_code;
4707 tree val;
4708
4709 /* Otherwise, we have a comparison of the form NAME COMP VAL
4710 or VAL COMP NAME. */
4711 if (name == cond_op1)
4712 {
4713 /* If the predicate is of the form VAL COMP NAME, flip
4714 COMP around because we need to register NAME as the
4715 first operand in the predicate. */
4716 comp_code = swap_tree_comparison (cond_code);
4717 val = cond_op0;
4718 }
4719 else
4720 {
4721 /* The comparison is of the form NAME COMP VAL, so the
4722 comparison code remains unchanged. */
4723 comp_code = cond_code;
4724 val = cond_op1;
4725 }
4726
4727 /* Invert the comparison code as necessary. */
4728 if (invert)
4729 comp_code = invert_tree_comparison (comp_code, 0);
4730
4731 /* VRP does not handle float types. */
4732 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (val)))
4733 return false;
4734
4735 /* Do not register always-false predicates.
4736 FIXME: this works around a limitation in fold() when dealing with
4737 enumerations. Given 'enum { N1, N2 } x;', fold will not
4738 fold 'if (x > N2)' to 'if (0)'. */
4739 if ((comp_code == GT_EXPR || comp_code == LT_EXPR)
4740 && INTEGRAL_TYPE_P (TREE_TYPE (val)))
4741 {
4742 tree min = TYPE_MIN_VALUE (TREE_TYPE (val));
4743 tree max = TYPE_MAX_VALUE (TREE_TYPE (val));
4744
4745 if (comp_code == GT_EXPR
4746 && (!max
4747 || compare_values (val, max) == 0))
4748 return false;
4749
4750 if (comp_code == LT_EXPR
4751 && (!min
4752 || compare_values (val, min) == 0))
4753 return false;
4754 }
4755 *code_p = comp_code;
4756 *val_p = val;
4757 return true;
4758 }
4759
4760 /* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any
4761 (otherwise return VAL). VAL and MASK must be zero-extended for
4762 precision PREC. If SGNBIT is non-zero, first xor VAL with SGNBIT
4763 (to transform signed values into unsigned) and at the end xor
4764 SGNBIT back. */
4765
4766 static double_int
4767 masked_increment (double_int val, double_int mask, double_int sgnbit,
4768 unsigned int prec)
4769 {
4770 double_int bit = double_int_one, res;
4771 unsigned int i;
4772
4773 val ^= sgnbit;
4774 for (i = 0; i < prec; i++, bit += bit)
4775 {
4776 res = mask;
4777 if ((res & bit).is_zero ())
4778 continue;
4779 res = bit - double_int_one;
4780 res = (val + bit).and_not (res);
4781 res &= mask;
4782 if (res.ugt (val))
4783 return res ^ sgnbit;
4784 }
4785 return val ^ sgnbit;
4786 }
4787
4788 /* Try to register an edge assertion for SSA name NAME on edge E for
4789 the condition COND contributing to the conditional jump pointed to by BSI.
4790 Invert the condition COND if INVERT is true.
4791 Return true if an assertion for NAME could be registered. */
4792
4793 static bool
4794 register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi,
4795 enum tree_code cond_code,
4796 tree cond_op0, tree cond_op1, bool invert)
4797 {
4798 tree val;
4799 enum tree_code comp_code;
4800 bool retval = false;
4801
4802 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
4803 cond_op0,
4804 cond_op1,
4805 invert, &comp_code, &val))
4806 return false;
4807
4808 /* Only register an ASSERT_EXPR if NAME was found in the sub-graph
4809 reachable from E. */
4810 if (live_on_edge (e, name)
4811 && !has_single_use (name))
4812 {
4813 register_new_assert_for (name, name, comp_code, val, NULL, e, bsi);
4814 retval = true;
4815 }
4816
4817 /* In the case of NAME <= CST and NAME being defined as
4818 NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2
4819 and NAME2 <= CST - CST2. We can do the same for NAME > CST.
4820 This catches range and anti-range tests. */
4821 if ((comp_code == LE_EXPR
4822 || comp_code == GT_EXPR)
4823 && TREE_CODE (val) == INTEGER_CST
4824 && TYPE_UNSIGNED (TREE_TYPE (val)))
4825 {
4826 gimple def_stmt = SSA_NAME_DEF_STMT (name);
4827 tree cst2 = NULL_TREE, name2 = NULL_TREE, name3 = NULL_TREE;
4828
4829 /* Extract CST2 from the (optional) addition. */
4830 if (is_gimple_assign (def_stmt)
4831 && gimple_assign_rhs_code (def_stmt) == PLUS_EXPR)
4832 {
4833 name2 = gimple_assign_rhs1 (def_stmt);
4834 cst2 = gimple_assign_rhs2 (def_stmt);
4835 if (TREE_CODE (name2) == SSA_NAME
4836 && TREE_CODE (cst2) == INTEGER_CST)
4837 def_stmt = SSA_NAME_DEF_STMT (name2);
4838 }
4839
4840 /* Extract NAME2 from the (optional) sign-changing cast. */
4841 if (gimple_assign_cast_p (def_stmt))
4842 {
4843 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt))
4844 && ! TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
4845 && (TYPE_PRECISION (gimple_expr_type (def_stmt))
4846 == TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))))
4847 name3 = gimple_assign_rhs1 (def_stmt);
4848 }
4849
4850 /* If name3 is used later, create an ASSERT_EXPR for it. */
4851 if (name3 != NULL_TREE
4852 && TREE_CODE (name3) == SSA_NAME
4853 && (cst2 == NULL_TREE
4854 || TREE_CODE (cst2) == INTEGER_CST)
4855 && INTEGRAL_TYPE_P (TREE_TYPE (name3))
4856 && live_on_edge (e, name3)
4857 && !has_single_use (name3))
4858 {
4859 tree tmp;
4860
4861 /* Build an expression for the range test. */
4862 tmp = build1 (NOP_EXPR, TREE_TYPE (name), name3);
4863 if (cst2 != NULL_TREE)
4864 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
4865
4866 if (dump_file)
4867 {
4868 fprintf (dump_file, "Adding assert for ");
4869 print_generic_expr (dump_file, name3, 0);
4870 fprintf (dump_file, " from ");
4871 print_generic_expr (dump_file, tmp, 0);
4872 fprintf (dump_file, "\n");
4873 }
4874
4875 register_new_assert_for (name3, tmp, comp_code, val, NULL, e, bsi);
4876
4877 retval = true;
4878 }
4879
4880 /* If name2 is used later, create an ASSERT_EXPR for it. */
4881 if (name2 != NULL_TREE
4882 && TREE_CODE (name2) == SSA_NAME
4883 && TREE_CODE (cst2) == INTEGER_CST
4884 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
4885 && live_on_edge (e, name2)
4886 && !has_single_use (name2))
4887 {
4888 tree tmp;
4889
4890 /* Build an expression for the range test. */
4891 tmp = name2;
4892 if (TREE_TYPE (name) != TREE_TYPE (name2))
4893 tmp = build1 (NOP_EXPR, TREE_TYPE (name), tmp);
4894 if (cst2 != NULL_TREE)
4895 tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
4896
4897 if (dump_file)
4898 {
4899 fprintf (dump_file, "Adding assert for ");
4900 print_generic_expr (dump_file, name2, 0);
4901 fprintf (dump_file, " from ");
4902 print_generic_expr (dump_file, tmp, 0);
4903 fprintf (dump_file, "\n");
4904 }
4905
4906 register_new_assert_for (name2, tmp, comp_code, val, NULL, e, bsi);
4907
4908 retval = true;
4909 }
4910 }
4911
4912 /* In the case of post-in/decrement tests like if (i++) ... and uses
4913 of the in/decremented value on the edge the extra name we want to
4914 assert for is not on the def chain of the name compared. Instead
4915 it is in the set of use stmts. */
4916 if ((comp_code == NE_EXPR
4917 || comp_code == EQ_EXPR)
4918 && TREE_CODE (val) == INTEGER_CST)
4919 {
4920 imm_use_iterator ui;
4921 gimple use_stmt;
4922 FOR_EACH_IMM_USE_STMT (use_stmt, ui, name)
4923 {
4924 /* Cut off to use-stmts that are in the predecessor. */
4925 if (gimple_bb (use_stmt) != e->src)
4926 continue;
4927
4928 if (!is_gimple_assign (use_stmt))
4929 continue;
4930
4931 enum tree_code code = gimple_assign_rhs_code (use_stmt);
4932 if (code != PLUS_EXPR
4933 && code != MINUS_EXPR)
4934 continue;
4935
4936 tree cst = gimple_assign_rhs2 (use_stmt);
4937 if (TREE_CODE (cst) != INTEGER_CST)
4938 continue;
4939
4940 tree name2 = gimple_assign_lhs (use_stmt);
4941 if (live_on_edge (e, name2))
4942 {
4943 cst = int_const_binop (code, val, cst);
4944 register_new_assert_for (name2, name2, comp_code, cst,
4945 NULL, e, bsi);
4946 retval = true;
4947 }
4948 }
4949 }
4950
4951 if (TREE_CODE_CLASS (comp_code) == tcc_comparison
4952 && TREE_CODE (val) == INTEGER_CST)
4953 {
4954 gimple def_stmt = SSA_NAME_DEF_STMT (name);
4955 tree name2 = NULL_TREE, names[2], cst2 = NULL_TREE;
4956 tree val2 = NULL_TREE;
4957 double_int mask = double_int_zero;
4958 unsigned int prec = TYPE_PRECISION (TREE_TYPE (val));
4959 unsigned int nprec = prec;
4960 enum tree_code rhs_code = ERROR_MARK;
4961
4962 if (is_gimple_assign (def_stmt))
4963 rhs_code = gimple_assign_rhs_code (def_stmt);
4964
4965 /* Add asserts for NAME cmp CST and NAME being defined
4966 as NAME = (int) NAME2. */
4967 if (!TYPE_UNSIGNED (TREE_TYPE (val))
4968 && (comp_code == LE_EXPR || comp_code == LT_EXPR
4969 || comp_code == GT_EXPR || comp_code == GE_EXPR)
4970 && gimple_assign_cast_p (def_stmt))
4971 {
4972 name2 = gimple_assign_rhs1 (def_stmt);
4973 if (CONVERT_EXPR_CODE_P (rhs_code)
4974 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
4975 && TYPE_UNSIGNED (TREE_TYPE (name2))
4976 && prec == TYPE_PRECISION (TREE_TYPE (name2))
4977 && (comp_code == LE_EXPR || comp_code == GT_EXPR
4978 || !tree_int_cst_equal (val,
4979 TYPE_MIN_VALUE (TREE_TYPE (val))))
4980 && live_on_edge (e, name2)
4981 && !has_single_use (name2))
4982 {
4983 tree tmp, cst;
4984 enum tree_code new_comp_code = comp_code;
4985
4986 cst = fold_convert (TREE_TYPE (name2),
4987 TYPE_MIN_VALUE (TREE_TYPE (val)));
4988 /* Build an expression for the range test. */
4989 tmp = build2 (PLUS_EXPR, TREE_TYPE (name2), name2, cst);
4990 cst = fold_build2 (PLUS_EXPR, TREE_TYPE (name2), cst,
4991 fold_convert (TREE_TYPE (name2), val));
4992 if (comp_code == LT_EXPR || comp_code == GE_EXPR)
4993 {
4994 new_comp_code = comp_code == LT_EXPR ? LE_EXPR : GT_EXPR;
4995 cst = fold_build2 (MINUS_EXPR, TREE_TYPE (name2), cst,
4996 build_int_cst (TREE_TYPE (name2), 1));
4997 }
4998
4999 if (dump_file)
5000 {
5001 fprintf (dump_file, "Adding assert for ");
5002 print_generic_expr (dump_file, name2, 0);
5003 fprintf (dump_file, " from ");
5004 print_generic_expr (dump_file, tmp, 0);
5005 fprintf (dump_file, "\n");
5006 }
5007
5008 register_new_assert_for (name2, tmp, new_comp_code, cst, NULL,
5009 e, bsi);
5010
5011 retval = true;
5012 }
5013 }
5014
5015 /* Add asserts for NAME cmp CST and NAME being defined as
5016 NAME = NAME2 >> CST2.
5017
5018 Extract CST2 from the right shift. */
5019 if (rhs_code == RSHIFT_EXPR)
5020 {
5021 name2 = gimple_assign_rhs1 (def_stmt);
5022 cst2 = gimple_assign_rhs2 (def_stmt);
5023 if (TREE_CODE (name2) == SSA_NAME
5024 && host_integerp (cst2, 1)
5025 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
5026 && IN_RANGE (tree_low_cst (cst2, 1), 1, prec - 1)
5027 && prec <= HOST_BITS_PER_DOUBLE_INT
5028 && prec == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (val)))
5029 && live_on_edge (e, name2)
5030 && !has_single_use (name2))
5031 {
5032 mask = double_int::mask (tree_low_cst (cst2, 1));
5033 val2 = fold_binary (LSHIFT_EXPR, TREE_TYPE (val), val, cst2);
5034 }
5035 }
5036 if (val2 != NULL_TREE
5037 && TREE_CODE (val2) == INTEGER_CST
5038 && simple_cst_equal (fold_build2 (RSHIFT_EXPR,
5039 TREE_TYPE (val),
5040 val2, cst2), val))
5041 {
5042 enum tree_code new_comp_code = comp_code;
5043 tree tmp, new_val;
5044
5045 tmp = name2;
5046 if (comp_code == EQ_EXPR || comp_code == NE_EXPR)
5047 {
5048 if (!TYPE_UNSIGNED (TREE_TYPE (val)))
5049 {
5050 tree type = build_nonstandard_integer_type (prec, 1);
5051 tmp = build1 (NOP_EXPR, type, name2);
5052 val2 = fold_convert (type, val2);
5053 }
5054 tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (tmp), tmp, val2);
5055 new_val = double_int_to_tree (TREE_TYPE (tmp), mask);
5056 new_comp_code = comp_code == EQ_EXPR ? LE_EXPR : GT_EXPR;
5057 }
5058 else if (comp_code == LT_EXPR || comp_code == GE_EXPR)
5059 {
5060 double_int minval
5061 = double_int::min_value (prec, TYPE_UNSIGNED (TREE_TYPE (val)));
5062 new_val = val2;
5063 if (minval == tree_to_double_int (new_val))
5064 new_val = NULL_TREE;
5065 }
5066 else
5067 {
5068 double_int maxval
5069 = double_int::max_value (prec, TYPE_UNSIGNED (TREE_TYPE (val)));
5070 mask |= tree_to_double_int (val2);
5071 if (mask == maxval)
5072 new_val = NULL_TREE;
5073 else
5074 new_val = double_int_to_tree (TREE_TYPE (val2), mask);
5075 }
5076
5077 if (new_val)
5078 {
5079 if (dump_file)
5080 {
5081 fprintf (dump_file, "Adding assert for ");
5082 print_generic_expr (dump_file, name2, 0);
5083 fprintf (dump_file, " from ");
5084 print_generic_expr (dump_file, tmp, 0);
5085 fprintf (dump_file, "\n");
5086 }
5087
5088 register_new_assert_for (name2, tmp, new_comp_code, new_val,
5089 NULL, e, bsi);
5090 retval = true;
5091 }
5092 }
5093
5094 /* Add asserts for NAME cmp CST and NAME being defined as
5095 NAME = NAME2 & CST2.
5096
5097 Extract CST2 from the and.
5098
5099 Also handle
5100 NAME = (unsigned) NAME2;
5101 casts where NAME's type is unsigned and has smaller precision
5102 than NAME2's type as if it was NAME = NAME2 & MASK. */
5103 names[0] = NULL_TREE;
5104 names[1] = NULL_TREE;
5105 cst2 = NULL_TREE;
5106 if (rhs_code == BIT_AND_EXPR
5107 || (CONVERT_EXPR_CODE_P (rhs_code)
5108 && TREE_CODE (TREE_TYPE (val)) == INTEGER_TYPE
5109 && TYPE_UNSIGNED (TREE_TYPE (val))
5110 && TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
5111 > prec
5112 && !retval))
5113 {
5114 name2 = gimple_assign_rhs1 (def_stmt);
5115 if (rhs_code == BIT_AND_EXPR)
5116 cst2 = gimple_assign_rhs2 (def_stmt);
5117 else
5118 {
5119 cst2 = TYPE_MAX_VALUE (TREE_TYPE (val));
5120 nprec = TYPE_PRECISION (TREE_TYPE (name2));
5121 }
5122 if (TREE_CODE (name2) == SSA_NAME
5123 && INTEGRAL_TYPE_P (TREE_TYPE (name2))
5124 && TREE_CODE (cst2) == INTEGER_CST
5125 && !integer_zerop (cst2)
5126 && nprec <= HOST_BITS_PER_DOUBLE_INT
5127 && (nprec > 1
5128 || TYPE_UNSIGNED (TREE_TYPE (val))))
5129 {
5130 gimple def_stmt2 = SSA_NAME_DEF_STMT (name2);
5131 if (gimple_assign_cast_p (def_stmt2))
5132 {
5133 names[1] = gimple_assign_rhs1 (def_stmt2);
5134 if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt2))
5135 || !INTEGRAL_TYPE_P (TREE_TYPE (names[1]))
5136 || (TYPE_PRECISION (TREE_TYPE (name2))
5137 != TYPE_PRECISION (TREE_TYPE (names[1])))
5138 || !live_on_edge (e, names[1])
5139 || has_single_use (names[1]))
5140 names[1] = NULL_TREE;
5141 }
5142 if (live_on_edge (e, name2)
5143 && !has_single_use (name2))
5144 names[0] = name2;
5145 }
5146 }
5147 if (names[0] || names[1])
5148 {
5149 double_int minv, maxv = double_int_zero, valv, cst2v;
5150 double_int tem, sgnbit;
5151 bool valid_p = false, valn = false, cst2n = false;
5152 enum tree_code ccode = comp_code;
5153
5154 valv = tree_to_double_int (val).zext (nprec);
5155 cst2v = tree_to_double_int (cst2).zext (nprec);
5156 if (!TYPE_UNSIGNED (TREE_TYPE (val)))
5157 {
5158 valn = valv.sext (nprec).is_negative ();
5159 cst2n = cst2v.sext (nprec).is_negative ();
5160 }
5161 /* If CST2 doesn't have most significant bit set,
5162 but VAL is negative, we have comparison like
5163 if ((x & 0x123) > -4) (always true). Just give up. */
5164 if (!cst2n && valn)
5165 ccode = ERROR_MARK;
5166 if (cst2n)
5167 sgnbit = double_int_one.llshift (nprec - 1, nprec).zext (nprec);
5168 else
5169 sgnbit = double_int_zero;
5170 minv = valv & cst2v;
5171 switch (ccode)
5172 {
5173 case EQ_EXPR:
5174 /* Minimum unsigned value for equality is VAL & CST2
5175 (should be equal to VAL, otherwise we probably should
5176 have folded the comparison into false) and
5177 maximum unsigned value is VAL | ~CST2. */
5178 maxv = valv | ~cst2v;
5179 maxv = maxv.zext (nprec);
5180 valid_p = true;
5181 break;
5182 case NE_EXPR:
5183 tem = valv | ~cst2v;
5184 tem = tem.zext (nprec);
5185 /* If VAL is 0, handle (X & CST2) != 0 as (X & CST2) > 0U. */
5186 if (valv.is_zero ())
5187 {
5188 cst2n = false;
5189 sgnbit = double_int_zero;
5190 goto gt_expr;
5191 }
5192 /* If (VAL | ~CST2) is all ones, handle it as
5193 (X & CST2) < VAL. */
5194 if (tem == double_int::mask (nprec))
5195 {
5196 cst2n = false;
5197 valn = false;
5198 sgnbit = double_int_zero;
5199 goto lt_expr;
5200 }
5201 if (!cst2n
5202 && cst2v.sext (nprec).is_negative ())
5203 sgnbit
5204 = double_int_one.llshift (nprec - 1, nprec).zext (nprec);
5205 if (!sgnbit.is_zero ())
5206 {
5207 if (valv == sgnbit)
5208 {
5209 cst2n = true;
5210 valn = true;
5211 goto gt_expr;
5212 }
5213 if (tem == double_int::mask (nprec - 1))
5214 {
5215 cst2n = true;
5216 goto lt_expr;
5217 }
5218 if (!cst2n)
5219 sgnbit = double_int_zero;
5220 }
5221 break;
5222 case GE_EXPR:
5223 /* Minimum unsigned value for >= if (VAL & CST2) == VAL
5224 is VAL and maximum unsigned value is ~0. For signed
5225 comparison, if CST2 doesn't have most significant bit
5226 set, handle it similarly. If CST2 has MSB set,
5227 the minimum is the same, and maximum is ~0U/2. */
5228 if (minv != valv)
5229 {
5230 /* If (VAL & CST2) != VAL, X & CST2 can't be equal to
5231 VAL. */
5232 minv = masked_increment (valv, cst2v, sgnbit, nprec);
5233 if (minv == valv)
5234 break;
5235 }
5236 maxv = double_int::mask (nprec - (cst2n ? 1 : 0));
5237 valid_p = true;
5238 break;
5239 case GT_EXPR:
5240 gt_expr:
5241 /* Find out smallest MINV where MINV > VAL
5242 && (MINV & CST2) == MINV, if any. If VAL is signed and
5243 CST2 has MSB set, compute it biased by 1 << (nprec - 1). */
5244 minv = masked_increment (valv, cst2v, sgnbit, nprec);
5245 if (minv == valv)
5246 break;
5247 maxv = double_int::mask (nprec - (cst2n ? 1 : 0));
5248 valid_p = true;
5249 break;
5250 case LE_EXPR:
5251 /* Minimum unsigned value for <= is 0 and maximum
5252 unsigned value is VAL | ~CST2 if (VAL & CST2) == VAL.
5253 Otherwise, find smallest VAL2 where VAL2 > VAL
5254 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
5255 as maximum.
5256 For signed comparison, if CST2 doesn't have most
5257 significant bit set, handle it similarly. If CST2 has
5258 MSB set, the maximum is the same and minimum is INT_MIN. */
5259 if (minv == valv)
5260 maxv = valv;
5261 else
5262 {
5263 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
5264 if (maxv == valv)
5265 break;
5266 maxv -= double_int_one;
5267 }
5268 maxv |= ~cst2v;
5269 maxv = maxv.zext (nprec);
5270 minv = sgnbit;
5271 valid_p = true;
5272 break;
5273 case LT_EXPR:
5274 lt_expr:
5275 /* Minimum unsigned value for < is 0 and maximum
5276 unsigned value is (VAL-1) | ~CST2 if (VAL & CST2) == VAL.
5277 Otherwise, find smallest VAL2 where VAL2 > VAL
5278 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
5279 as maximum.
5280 For signed comparison, if CST2 doesn't have most
5281 significant bit set, handle it similarly. If CST2 has
5282 MSB set, the maximum is the same and minimum is INT_MIN. */
5283 if (minv == valv)
5284 {
5285 if (valv == sgnbit)
5286 break;
5287 maxv = valv;
5288 }
5289 else
5290 {
5291 maxv = masked_increment (valv, cst2v, sgnbit, nprec);
5292 if (maxv == valv)
5293 break;
5294 }
5295 maxv -= double_int_one;
5296 maxv |= ~cst2v;
5297 maxv = maxv.zext (nprec);
5298 minv = sgnbit;
5299 valid_p = true;
5300 break;
5301 default:
5302 break;
5303 }
5304 if (valid_p
5305 && (maxv - minv).zext (nprec) != double_int::mask (nprec))
5306 {
5307 tree tmp, new_val, type;
5308 int i;
5309
5310 for (i = 0; i < 2; i++)
5311 if (names[i])
5312 {
5313 double_int maxv2 = maxv;
5314 tmp = names[i];
5315 type = TREE_TYPE (names[i]);
5316 if (!TYPE_UNSIGNED (type))
5317 {
5318 type = build_nonstandard_integer_type (nprec, 1);
5319 tmp = build1 (NOP_EXPR, type, names[i]);
5320 }
5321 if (!minv.is_zero ())
5322 {
5323 tmp = build2 (PLUS_EXPR, type, tmp,
5324 double_int_to_tree (type, -minv));
5325 maxv2 = maxv - minv;
5326 }
5327 new_val = double_int_to_tree (type, maxv2);
5328
5329 if (dump_file)
5330 {
5331 fprintf (dump_file, "Adding assert for ");
5332 print_generic_expr (dump_file, names[i], 0);
5333 fprintf (dump_file, " from ");
5334 print_generic_expr (dump_file, tmp, 0);
5335 fprintf (dump_file, "\n");
5336 }
5337
5338 register_new_assert_for (names[i], tmp, LE_EXPR,
5339 new_val, NULL, e, bsi);
5340 retval = true;
5341 }
5342 }
5343 }
5344 }
5345
5346 return retval;
5347 }
5348
5349 /* OP is an operand of a truth value expression which is known to have
5350 a particular value. Register any asserts for OP and for any
5351 operands in OP's defining statement.
5352
5353 If CODE is EQ_EXPR, then we want to register OP is zero (false),
5354 if CODE is NE_EXPR, then we want to register OP is nonzero (true). */
5355
5356 static bool
5357 register_edge_assert_for_1 (tree op, enum tree_code code,
5358 edge e, gimple_stmt_iterator bsi)
5359 {
5360 bool retval = false;
5361 gimple op_def;
5362 tree val;
5363 enum tree_code rhs_code;
5364
5365 /* We only care about SSA_NAMEs. */
5366 if (TREE_CODE (op) != SSA_NAME)
5367 return false;
5368
5369 /* We know that OP will have a zero or nonzero value. If OP is used
5370 more than once go ahead and register an assert for OP.
5371
5372 The FOUND_IN_SUBGRAPH support is not helpful in this situation as
5373 it will always be set for OP (because OP is used in a COND_EXPR in
5374 the subgraph). */
5375 if (!has_single_use (op))
5376 {
5377 val = build_int_cst (TREE_TYPE (op), 0);
5378 register_new_assert_for (op, op, code, val, NULL, e, bsi);
5379 retval = true;
5380 }
5381
5382 /* Now look at how OP is set. If it's set from a comparison,
5383 a truth operation or some bit operations, then we may be able
5384 to register information about the operands of that assignment. */
5385 op_def = SSA_NAME_DEF_STMT (op);
5386 if (gimple_code (op_def) != GIMPLE_ASSIGN)
5387 return retval;
5388
5389 rhs_code = gimple_assign_rhs_code (op_def);
5390
5391 if (TREE_CODE_CLASS (rhs_code) == tcc_comparison)
5392 {
5393 bool invert = (code == EQ_EXPR ? true : false);
5394 tree op0 = gimple_assign_rhs1 (op_def);
5395 tree op1 = gimple_assign_rhs2 (op_def);
5396
5397 if (TREE_CODE (op0) == SSA_NAME)
5398 retval |= register_edge_assert_for_2 (op0, e, bsi, rhs_code, op0, op1,
5399 invert);
5400 if (TREE_CODE (op1) == SSA_NAME)
5401 retval |= register_edge_assert_for_2 (op1, e, bsi, rhs_code, op0, op1,
5402 invert);
5403 }
5404 else if ((code == NE_EXPR
5405 && gimple_assign_rhs_code (op_def) == BIT_AND_EXPR)
5406 || (code == EQ_EXPR
5407 && gimple_assign_rhs_code (op_def) == BIT_IOR_EXPR))
5408 {
5409 /* Recurse on each operand. */
5410 tree op0 = gimple_assign_rhs1 (op_def);
5411 tree op1 = gimple_assign_rhs2 (op_def);
5412 if (TREE_CODE (op0) == SSA_NAME
5413 && has_single_use (op0))
5414 retval |= register_edge_assert_for_1 (op0, code, e, bsi);
5415 if (TREE_CODE (op1) == SSA_NAME
5416 && has_single_use (op1))
5417 retval |= register_edge_assert_for_1 (op1, code, e, bsi);
5418 }
5419 else if (gimple_assign_rhs_code (op_def) == BIT_NOT_EXPR
5420 && TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (op_def))) == 1)
5421 {
5422 /* Recurse, flipping CODE. */
5423 code = invert_tree_comparison (code, false);
5424 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def),
5425 code, e, bsi);
5426 }
5427 else if (gimple_assign_rhs_code (op_def) == SSA_NAME)
5428 {
5429 /* Recurse through the copy. */
5430 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def),
5431 code, e, bsi);
5432 }
5433 else if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (op_def)))
5434 {
5435 /* Recurse through the type conversion. */
5436 retval |= register_edge_assert_for_1 (gimple_assign_rhs1 (op_def),
5437 code, e, bsi);
5438 }
5439
5440 return retval;
5441 }
5442
5443 /* Try to register an edge assertion for SSA name NAME on edge E for
5444 the condition COND contributing to the conditional jump pointed to by SI.
5445 Return true if an assertion for NAME could be registered. */
5446
5447 static bool
5448 register_edge_assert_for (tree name, edge e, gimple_stmt_iterator si,
5449 enum tree_code cond_code, tree cond_op0,
5450 tree cond_op1)
5451 {
5452 tree val;
5453 enum tree_code comp_code;
5454 bool retval = false;
5455 bool is_else_edge = (e->flags & EDGE_FALSE_VALUE) != 0;
5456
5457 /* Do not attempt to infer anything in names that flow through
5458 abnormal edges. */
5459 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
5460 return false;
5461
5462 if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
5463 cond_op0, cond_op1,
5464 is_else_edge,
5465 &comp_code, &val))
5466 return false;
5467
5468 /* Register ASSERT_EXPRs for name. */
5469 retval |= register_edge_assert_for_2 (name, e, si, cond_code, cond_op0,
5470 cond_op1, is_else_edge);
5471
5472
5473 /* If COND is effectively an equality test of an SSA_NAME against
5474 the value zero or one, then we may be able to assert values
5475 for SSA_NAMEs which flow into COND. */
5476
5477 /* In the case of NAME == 1 or NAME != 0, for BIT_AND_EXPR defining
5478 statement of NAME we can assert both operands of the BIT_AND_EXPR
5479 have nonzero value. */
5480 if (((comp_code == EQ_EXPR && integer_onep (val))
5481 || (comp_code == NE_EXPR && integer_zerop (val))))
5482 {
5483 gimple def_stmt = SSA_NAME_DEF_STMT (name);
5484
5485 if (is_gimple_assign (def_stmt)
5486 && gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR)
5487 {
5488 tree op0 = gimple_assign_rhs1 (def_stmt);
5489 tree op1 = gimple_assign_rhs2 (def_stmt);
5490 retval |= register_edge_assert_for_1 (op0, NE_EXPR, e, si);
5491 retval |= register_edge_assert_for_1 (op1, NE_EXPR, e, si);
5492 }
5493 }
5494
5495 /* In the case of NAME == 0 or NAME != 1, for BIT_IOR_EXPR defining
5496 statement of NAME we can assert both operands of the BIT_IOR_EXPR
5497 have zero value. */
5498 if (((comp_code == EQ_EXPR && integer_zerop (val))
5499 || (comp_code == NE_EXPR && integer_onep (val))))
5500 {
5501 gimple def_stmt = SSA_NAME_DEF_STMT (name);
5502
5503 /* For BIT_IOR_EXPR only if NAME == 0 both operands have
5504 necessarily zero value, or if type-precision is one. */
5505 if (is_gimple_assign (def_stmt)
5506 && (gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR
5507 && (TYPE_PRECISION (TREE_TYPE (name)) == 1
5508 || comp_code == EQ_EXPR)))
5509 {
5510 tree op0 = gimple_assign_rhs1 (def_stmt);
5511 tree op1 = gimple_assign_rhs2 (def_stmt);
5512 retval |= register_edge_assert_for_1 (op0, EQ_EXPR, e, si);
5513 retval |= register_edge_assert_for_1 (op1, EQ_EXPR, e, si);
5514 }
5515 }
5516
5517 return retval;
5518 }
5519
5520
5521 /* Determine whether the outgoing edges of BB should receive an
5522 ASSERT_EXPR for each of the operands of BB's LAST statement.
5523 The last statement of BB must be a COND_EXPR.
5524
5525 If any of the sub-graphs rooted at BB have an interesting use of
5526 the predicate operands, an assert location node is added to the
5527 list of assertions for the corresponding operands. */
5528
5529 static bool
5530 find_conditional_asserts (basic_block bb, gimple last)
5531 {
5532 bool need_assert;
5533 gimple_stmt_iterator bsi;
5534 tree op;
5535 edge_iterator ei;
5536 edge e;
5537 ssa_op_iter iter;
5538
5539 need_assert = false;
5540 bsi = gsi_for_stmt (last);
5541
5542 /* Look for uses of the operands in each of the sub-graphs
5543 rooted at BB. We need to check each of the outgoing edges
5544 separately, so that we know what kind of ASSERT_EXPR to
5545 insert. */
5546 FOR_EACH_EDGE (e, ei, bb->succs)
5547 {
5548 if (e->dest == bb)
5549 continue;
5550
5551 /* Register the necessary assertions for each operand in the
5552 conditional predicate. */
5553 FOR_EACH_SSA_TREE_OPERAND (op, last, iter, SSA_OP_USE)
5554 {
5555 need_assert |= register_edge_assert_for (op, e, bsi,
5556 gimple_cond_code (last),
5557 gimple_cond_lhs (last),
5558 gimple_cond_rhs (last));
5559 }
5560 }
5561
5562 return need_assert;
5563 }
5564
5565 struct case_info
5566 {
5567 tree expr;
5568 basic_block bb;
5569 };
5570
5571 /* Compare two case labels sorting first by the destination bb index
5572 and then by the case value. */
5573
5574 static int
5575 compare_case_labels (const void *p1, const void *p2)
5576 {
5577 const struct case_info *ci1 = (const struct case_info *) p1;
5578 const struct case_info *ci2 = (const struct case_info *) p2;
5579 int idx1 = ci1->bb->index;
5580 int idx2 = ci2->bb->index;
5581
5582 if (idx1 < idx2)
5583 return -1;
5584 else if (idx1 == idx2)
5585 {
5586 /* Make sure the default label is first in a group. */
5587 if (!CASE_LOW (ci1->expr))
5588 return -1;
5589 else if (!CASE_LOW (ci2->expr))
5590 return 1;
5591 else
5592 return tree_int_cst_compare (CASE_LOW (ci1->expr),
5593 CASE_LOW (ci2->expr));
5594 }
5595 else
5596 return 1;
5597 }
5598
5599 /* Determine whether the outgoing edges of BB should receive an
5600 ASSERT_EXPR for each of the operands of BB's LAST statement.
5601 The last statement of BB must be a SWITCH_EXPR.
5602
5603 If any of the sub-graphs rooted at BB have an interesting use of
5604 the predicate operands, an assert location node is added to the
5605 list of assertions for the corresponding operands. */
5606
5607 static bool
5608 find_switch_asserts (basic_block bb, gimple last)
5609 {
5610 bool need_assert;
5611 gimple_stmt_iterator bsi;
5612 tree op;
5613 edge e;
5614 struct case_info *ci;
5615 size_t n = gimple_switch_num_labels (last);
5616 #if GCC_VERSION >= 4000
5617 unsigned int idx;
5618 #else
5619 /* Work around GCC 3.4 bug (PR 37086). */
5620 volatile unsigned int idx;
5621 #endif
5622
5623 need_assert = false;
5624 bsi = gsi_for_stmt (last);
5625 op = gimple_switch_index (last);
5626 if (TREE_CODE (op) != SSA_NAME)
5627 return false;
5628
5629 /* Build a vector of case labels sorted by destination label. */
5630 ci = XNEWVEC (struct case_info, n);
5631 for (idx = 0; idx < n; ++idx)
5632 {
5633 ci[idx].expr = gimple_switch_label (last, idx);
5634 ci[idx].bb = label_to_block (CASE_LABEL (ci[idx].expr));
5635 }
5636 qsort (ci, n, sizeof (struct case_info), compare_case_labels);
5637
5638 for (idx = 0; idx < n; ++idx)
5639 {
5640 tree min, max;
5641 tree cl = ci[idx].expr;
5642 basic_block cbb = ci[idx].bb;
5643
5644 min = CASE_LOW (cl);
5645 max = CASE_HIGH (cl);
5646
5647 /* If there are multiple case labels with the same destination
5648 we need to combine them to a single value range for the edge. */
5649 if (idx + 1 < n && cbb == ci[idx + 1].bb)
5650 {
5651 /* Skip labels until the last of the group. */
5652 do {
5653 ++idx;
5654 } while (idx < n && cbb == ci[idx].bb);
5655 --idx;
5656
5657 /* Pick up the maximum of the case label range. */
5658 if (CASE_HIGH (ci[idx].expr))
5659 max = CASE_HIGH (ci[idx].expr);
5660 else
5661 max = CASE_LOW (ci[idx].expr);
5662 }
5663
5664 /* Nothing to do if the range includes the default label until we
5665 can register anti-ranges. */
5666 if (min == NULL_TREE)
5667 continue;
5668
5669 /* Find the edge to register the assert expr on. */
5670 e = find_edge (bb, cbb);
5671
5672 /* Register the necessary assertions for the operand in the
5673 SWITCH_EXPR. */
5674 need_assert |= register_edge_assert_for (op, e, bsi,
5675 max ? GE_EXPR : EQ_EXPR,
5676 op,
5677 fold_convert (TREE_TYPE (op),
5678 min));
5679 if (max)
5680 {
5681 need_assert |= register_edge_assert_for (op, e, bsi, LE_EXPR,
5682 op,
5683 fold_convert (TREE_TYPE (op),
5684 max));
5685 }
5686 }
5687
5688 XDELETEVEC (ci);
5689 return need_assert;
5690 }
5691
5692
5693 /* Traverse all the statements in block BB looking for statements that
5694 may generate useful assertions for the SSA names in their operand.
5695 If a statement produces a useful assertion A for name N_i, then the
5696 list of assertions already generated for N_i is scanned to
5697 determine if A is actually needed.
5698
5699 If N_i already had the assertion A at a location dominating the
5700 current location, then nothing needs to be done. Otherwise, the
5701 new location for A is recorded instead.
5702
5703 1- For every statement S in BB, all the variables used by S are
5704 added to bitmap FOUND_IN_SUBGRAPH.
5705
5706 2- If statement S uses an operand N in a way that exposes a known
5707 value range for N, then if N was not already generated by an
5708 ASSERT_EXPR, create a new assert location for N. For instance,
5709 if N is a pointer and the statement dereferences it, we can
5710 assume that N is not NULL.
5711
5712 3- COND_EXPRs are a special case of #2. We can derive range
5713 information from the predicate but need to insert different
5714 ASSERT_EXPRs for each of the sub-graphs rooted at the
5715 conditional block. If the last statement of BB is a conditional
5716 expression of the form 'X op Y', then
5717
5718 a) Remove X and Y from the set FOUND_IN_SUBGRAPH.
5719
5720 b) If the conditional is the only entry point to the sub-graph
5721 corresponding to the THEN_CLAUSE, recurse into it. On
5722 return, if X and/or Y are marked in FOUND_IN_SUBGRAPH, then
5723 an ASSERT_EXPR is added for the corresponding variable.
5724
5725 c) Repeat step (b) on the ELSE_CLAUSE.
5726
5727 d) Mark X and Y in FOUND_IN_SUBGRAPH.
5728
5729 For instance,
5730
5731 if (a == 9)
5732 b = a;
5733 else
5734 b = c + 1;
5735
5736 In this case, an assertion on the THEN clause is useful to
5737 determine that 'a' is always 9 on that edge. However, an assertion
5738 on the ELSE clause would be unnecessary.
5739
5740 4- If BB does not end in a conditional expression, then we recurse
5741 into BB's dominator children.
5742
5743 At the end of the recursive traversal, every SSA name will have a
5744 list of locations where ASSERT_EXPRs should be added. When a new
5745 location for name N is found, it is registered by calling
5746 register_new_assert_for. That function keeps track of all the
5747 registered assertions to prevent adding unnecessary assertions.
5748 For instance, if a pointer P_4 is dereferenced more than once in a
5749 dominator tree, only the location dominating all the dereference of
5750 P_4 will receive an ASSERT_EXPR.
5751
5752 If this function returns true, then it means that there are names
5753 for which we need to generate ASSERT_EXPRs. Those assertions are
5754 inserted by process_assert_insertions. */
5755
5756 static bool
5757 find_assert_locations_1 (basic_block bb, sbitmap live)
5758 {
5759 gimple_stmt_iterator si;
5760 gimple last;
5761 bool need_assert;
5762
5763 need_assert = false;
5764 last = last_stmt (bb);
5765
5766 /* If BB's last statement is a conditional statement involving integer
5767 operands, determine if we need to add ASSERT_EXPRs. */
5768 if (last
5769 && gimple_code (last) == GIMPLE_COND
5770 && !fp_predicate (last)
5771 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
5772 need_assert |= find_conditional_asserts (bb, last);
5773
5774 /* If BB's last statement is a switch statement involving integer
5775 operands, determine if we need to add ASSERT_EXPRs. */
5776 if (last
5777 && gimple_code (last) == GIMPLE_SWITCH
5778 && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
5779 need_assert |= find_switch_asserts (bb, last);
5780
5781 /* Traverse all the statements in BB marking used names and looking
5782 for statements that may infer assertions for their used operands. */
5783 for (si = gsi_last_bb (bb); !gsi_end_p (si); gsi_prev (&si))
5784 {
5785 gimple stmt;
5786 tree op;
5787 ssa_op_iter i;
5788
5789 stmt = gsi_stmt (si);
5790
5791 if (is_gimple_debug (stmt))
5792 continue;
5793
5794 /* See if we can derive an assertion for any of STMT's operands. */
5795 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
5796 {
5797 tree value;
5798 enum tree_code comp_code;
5799
5800 /* If op is not live beyond this stmt, do not bother to insert
5801 asserts for it. */
5802 if (!bitmap_bit_p (live, SSA_NAME_VERSION (op)))
5803 continue;
5804
5805 /* If OP is used in such a way that we can infer a value
5806 range for it, and we don't find a previous assertion for
5807 it, create a new assertion location node for OP. */
5808 if (infer_value_range (stmt, op, &comp_code, &value))
5809 {
5810 /* If we are able to infer a nonzero value range for OP,
5811 then walk backwards through the use-def chain to see if OP
5812 was set via a typecast.
5813
5814 If so, then we can also infer a nonzero value range
5815 for the operand of the NOP_EXPR. */
5816 if (comp_code == NE_EXPR && integer_zerop (value))
5817 {
5818 tree t = op;
5819 gimple def_stmt = SSA_NAME_DEF_STMT (t);
5820
5821 while (is_gimple_assign (def_stmt)
5822 && gimple_assign_rhs_code (def_stmt) == NOP_EXPR
5823 && TREE_CODE
5824 (gimple_assign_rhs1 (def_stmt)) == SSA_NAME
5825 && POINTER_TYPE_P
5826 (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))
5827 {
5828 t = gimple_assign_rhs1 (def_stmt);
5829 def_stmt = SSA_NAME_DEF_STMT (t);
5830
5831 /* Note we want to register the assert for the
5832 operand of the NOP_EXPR after SI, not after the
5833 conversion. */
5834 if (! has_single_use (t))
5835 {
5836 register_new_assert_for (t, t, comp_code, value,
5837 bb, NULL, si);
5838 need_assert = true;
5839 }
5840 }
5841 }
5842
5843 register_new_assert_for (op, op, comp_code, value, bb, NULL, si);
5844 need_assert = true;
5845 }
5846 }
5847
5848 /* Update live. */
5849 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
5850 bitmap_set_bit (live, SSA_NAME_VERSION (op));
5851 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_DEF)
5852 bitmap_clear_bit (live, SSA_NAME_VERSION (op));
5853 }
5854
5855 /* Traverse all PHI nodes in BB, updating live. */
5856 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
5857 {
5858 use_operand_p arg_p;
5859 ssa_op_iter i;
5860 gimple phi = gsi_stmt (si);
5861 tree res = gimple_phi_result (phi);
5862
5863 if (virtual_operand_p (res))
5864 continue;
5865
5866 FOR_EACH_PHI_ARG (arg_p, phi, i, SSA_OP_USE)
5867 {
5868 tree arg = USE_FROM_PTR (arg_p);
5869 if (TREE_CODE (arg) == SSA_NAME)
5870 bitmap_set_bit (live, SSA_NAME_VERSION (arg));
5871 }
5872
5873 bitmap_clear_bit (live, SSA_NAME_VERSION (res));
5874 }
5875
5876 return need_assert;
5877 }
5878
5879 /* Do an RPO walk over the function computing SSA name liveness
5880 on-the-fly and deciding on assert expressions to insert.
5881 Returns true if there are assert expressions to be inserted. */
5882
5883 static bool
5884 find_assert_locations (void)
5885 {
5886 int *rpo = XNEWVEC (int, last_basic_block);
5887 int *bb_rpo = XNEWVEC (int, last_basic_block);
5888 int *last_rpo = XCNEWVEC (int, last_basic_block);
5889 int rpo_cnt, i;
5890 bool need_asserts;
5891
5892 live = XCNEWVEC (sbitmap, last_basic_block);
5893 rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
5894 for (i = 0; i < rpo_cnt; ++i)
5895 bb_rpo[rpo[i]] = i;
5896
5897 need_asserts = false;
5898 for (i = rpo_cnt - 1; i >= 0; --i)
5899 {
5900 basic_block bb = BASIC_BLOCK (rpo[i]);
5901 edge e;
5902 edge_iterator ei;
5903
5904 if (!live[rpo[i]])
5905 {
5906 live[rpo[i]] = sbitmap_alloc (num_ssa_names);
5907 bitmap_clear (live[rpo[i]]);
5908 }
5909
5910 /* Process BB and update the live information with uses in
5911 this block. */
5912 need_asserts |= find_assert_locations_1 (bb, live[rpo[i]]);
5913
5914 /* Merge liveness into the predecessor blocks and free it. */
5915 if (!bitmap_empty_p (live[rpo[i]]))
5916 {
5917 int pred_rpo = i;
5918 FOR_EACH_EDGE (e, ei, bb->preds)
5919 {
5920 int pred = e->src->index;
5921 if ((e->flags & EDGE_DFS_BACK) || pred == ENTRY_BLOCK)
5922 continue;
5923
5924 if (!live[pred])
5925 {
5926 live[pred] = sbitmap_alloc (num_ssa_names);
5927 bitmap_clear (live[pred]);
5928 }
5929 bitmap_ior (live[pred], live[pred], live[rpo[i]]);
5930
5931 if (bb_rpo[pred] < pred_rpo)
5932 pred_rpo = bb_rpo[pred];
5933 }
5934
5935 /* Record the RPO number of the last visited block that needs
5936 live information from this block. */
5937 last_rpo[rpo[i]] = pred_rpo;
5938 }
5939 else
5940 {
5941 sbitmap_free (live[rpo[i]]);
5942 live[rpo[i]] = NULL;
5943 }
5944
5945 /* We can free all successors live bitmaps if all their
5946 predecessors have been visited already. */
5947 FOR_EACH_EDGE (e, ei, bb->succs)
5948 if (last_rpo[e->dest->index] == i
5949 && live[e->dest->index])
5950 {
5951 sbitmap_free (live[e->dest->index]);
5952 live[e->dest->index] = NULL;
5953 }
5954 }
5955
5956 XDELETEVEC (rpo);
5957 XDELETEVEC (bb_rpo);
5958 XDELETEVEC (last_rpo);
5959 for (i = 0; i < last_basic_block; ++i)
5960 if (live[i])
5961 sbitmap_free (live[i]);
5962 XDELETEVEC (live);
5963
5964 return need_asserts;
5965 }
5966
5967 /* Create an ASSERT_EXPR for NAME and insert it in the location
5968 indicated by LOC. Return true if we made any edge insertions. */
5969
5970 static bool
5971 process_assert_insertions_for (tree name, assert_locus_t loc)
5972 {
5973 /* Build the comparison expression NAME_i COMP_CODE VAL. */
5974 gimple stmt;
5975 tree cond;
5976 gimple assert_stmt;
5977 edge_iterator ei;
5978 edge e;
5979
5980 /* If we have X <=> X do not insert an assert expr for that. */
5981 if (loc->expr == loc->val)
5982 return false;
5983
5984 cond = build2 (loc->comp_code, boolean_type_node, loc->expr, loc->val);
5985 assert_stmt = build_assert_expr_for (cond, name);
5986 if (loc->e)
5987 {
5988 /* We have been asked to insert the assertion on an edge. This
5989 is used only by COND_EXPR and SWITCH_EXPR assertions. */
5990 gcc_checking_assert (gimple_code (gsi_stmt (loc->si)) == GIMPLE_COND
5991 || (gimple_code (gsi_stmt (loc->si))
5992 == GIMPLE_SWITCH));
5993
5994 gsi_insert_on_edge (loc->e, assert_stmt);
5995 return true;
5996 }
5997
5998 /* Otherwise, we can insert right after LOC->SI iff the
5999 statement must not be the last statement in the block. */
6000 stmt = gsi_stmt (loc->si);
6001 if (!stmt_ends_bb_p (stmt))
6002 {
6003 gsi_insert_after (&loc->si, assert_stmt, GSI_SAME_STMT);
6004 return false;
6005 }
6006
6007 /* If STMT must be the last statement in BB, we can only insert new
6008 assertions on the non-abnormal edge out of BB. Note that since
6009 STMT is not control flow, there may only be one non-abnormal edge
6010 out of BB. */
6011 FOR_EACH_EDGE (e, ei, loc->bb->succs)
6012 if (!(e->flags & EDGE_ABNORMAL))
6013 {
6014 gsi_insert_on_edge (e, assert_stmt);
6015 return true;
6016 }
6017
6018 gcc_unreachable ();
6019 }
6020
6021
6022 /* Process all the insertions registered for every name N_i registered
6023 in NEED_ASSERT_FOR. The list of assertions to be inserted are
6024 found in ASSERTS_FOR[i]. */
6025
6026 static void
6027 process_assert_insertions (void)
6028 {
6029 unsigned i;
6030 bitmap_iterator bi;
6031 bool update_edges_p = false;
6032 int num_asserts = 0;
6033
6034 if (dump_file && (dump_flags & TDF_DETAILS))
6035 dump_all_asserts (dump_file);
6036
6037 EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
6038 {
6039 assert_locus_t loc = asserts_for[i];
6040 gcc_assert (loc);
6041
6042 while (loc)
6043 {
6044 assert_locus_t next = loc->next;
6045 update_edges_p |= process_assert_insertions_for (ssa_name (i), loc);
6046 free (loc);
6047 loc = next;
6048 num_asserts++;
6049 }
6050 }
6051
6052 if (update_edges_p)
6053 gsi_commit_edge_inserts ();
6054
6055 statistics_counter_event (cfun, "Number of ASSERT_EXPR expressions inserted",
6056 num_asserts);
6057 }
6058
6059
6060 /* Traverse the flowgraph looking for conditional jumps to insert range
6061 expressions. These range expressions are meant to provide information
6062 to optimizations that need to reason in terms of value ranges. They
6063 will not be expanded into RTL. For instance, given:
6064
6065 x = ...
6066 y = ...
6067 if (x < y)
6068 y = x - 2;
6069 else
6070 x = y + 3;
6071
6072 this pass will transform the code into:
6073
6074 x = ...
6075 y = ...
6076 if (x < y)
6077 {
6078 x = ASSERT_EXPR <x, x < y>
6079 y = x - 2
6080 }
6081 else
6082 {
6083 y = ASSERT_EXPR <y, x <= y>
6084 x = y + 3
6085 }
6086
6087 The idea is that once copy and constant propagation have run, other
6088 optimizations will be able to determine what ranges of values can 'x'
6089 take in different paths of the code, simply by checking the reaching
6090 definition of 'x'. */
6091
6092 static void
6093 insert_range_assertions (void)
6094 {
6095 need_assert_for = BITMAP_ALLOC (NULL);
6096 asserts_for = XCNEWVEC (assert_locus_t, num_ssa_names);
6097
6098 calculate_dominance_info (CDI_DOMINATORS);
6099
6100 if (find_assert_locations ())
6101 {
6102 process_assert_insertions ();
6103 update_ssa (TODO_update_ssa_no_phi);
6104 }
6105
6106 if (dump_file && (dump_flags & TDF_DETAILS))
6107 {
6108 fprintf (dump_file, "\nSSA form after inserting ASSERT_EXPRs\n");
6109 dump_function_to_file (current_function_decl, dump_file, dump_flags);
6110 }
6111
6112 free (asserts_for);
6113 BITMAP_FREE (need_assert_for);
6114 }
6115
6116 /* Checks one ARRAY_REF in REF, located at LOCUS. Ignores flexible arrays
6117 and "struct" hacks. If VRP can determine that the
6118 array subscript is a constant, check if it is outside valid
6119 range. If the array subscript is a RANGE, warn if it is
6120 non-overlapping with valid range.
6121 IGNORE_OFF_BY_ONE is true if the ARRAY_REF is inside a ADDR_EXPR. */
6122
6123 static void
6124 check_array_ref (location_t location, tree ref, bool ignore_off_by_one)
6125 {
6126 value_range_t* vr = NULL;
6127 tree low_sub, up_sub;
6128 tree low_bound, up_bound, up_bound_p1;
6129 tree base;
6130
6131 if (TREE_NO_WARNING (ref))
6132 return;
6133
6134 low_sub = up_sub = TREE_OPERAND (ref, 1);
6135 up_bound = array_ref_up_bound (ref);
6136
6137 /* Can not check flexible arrays. */
6138 if (!up_bound
6139 || TREE_CODE (up_bound) != INTEGER_CST)
6140 return;
6141
6142 /* Accesses to trailing arrays via pointers may access storage
6143 beyond the types array bounds. */
6144 base = get_base_address (ref);
6145 if (base && TREE_CODE (base) == MEM_REF)
6146 {
6147 tree cref, next = NULL_TREE;
6148
6149 if (TREE_CODE (TREE_OPERAND (ref, 0)) != COMPONENT_REF)
6150 return;
6151
6152 cref = TREE_OPERAND (ref, 0);
6153 if (TREE_CODE (TREE_TYPE (TREE_OPERAND (cref, 0))) == RECORD_TYPE)
6154 for (next = DECL_CHAIN (TREE_OPERAND (cref, 1));
6155 next && TREE_CODE (next) != FIELD_DECL;
6156 next = DECL_CHAIN (next))
6157 ;
6158
6159 /* If this is the last field in a struct type or a field in a
6160 union type do not warn. */
6161 if (!next)
6162 return;
6163 }
6164
6165 low_bound = array_ref_low_bound (ref);
6166 up_bound_p1 = int_const_binop (PLUS_EXPR, up_bound, integer_one_node);
6167
6168 if (TREE_CODE (low_sub) == SSA_NAME)
6169 {
6170 vr = get_value_range (low_sub);
6171 if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
6172 {
6173 low_sub = vr->type == VR_RANGE ? vr->max : vr->min;
6174 up_sub = vr->type == VR_RANGE ? vr->min : vr->max;
6175 }
6176 }
6177
6178 if (vr && vr->type == VR_ANTI_RANGE)
6179 {
6180 if (TREE_CODE (up_sub) == INTEGER_CST
6181 && tree_int_cst_lt (up_bound, up_sub)
6182 && TREE_CODE (low_sub) == INTEGER_CST
6183 && tree_int_cst_lt (low_sub, low_bound))
6184 {
6185 warning_at (location, OPT_Warray_bounds,
6186 "array subscript is outside array bounds");
6187 TREE_NO_WARNING (ref) = 1;
6188 }
6189 }
6190 else if (TREE_CODE (up_sub) == INTEGER_CST
6191 && (ignore_off_by_one
6192 ? (tree_int_cst_lt (up_bound, up_sub)
6193 && !tree_int_cst_equal (up_bound_p1, up_sub))
6194 : (tree_int_cst_lt (up_bound, up_sub)
6195 || tree_int_cst_equal (up_bound_p1, up_sub))))
6196 {
6197 if (dump_file && (dump_flags & TDF_DETAILS))
6198 {
6199 fprintf (dump_file, "Array bound warning for ");
6200 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
6201 fprintf (dump_file, "\n");
6202 }
6203 warning_at (location, OPT_Warray_bounds,
6204 "array subscript is above array bounds");
6205 TREE_NO_WARNING (ref) = 1;
6206 }
6207 else if (TREE_CODE (low_sub) == INTEGER_CST
6208 && tree_int_cst_lt (low_sub, low_bound))
6209 {
6210 if (dump_file && (dump_flags & TDF_DETAILS))
6211 {
6212 fprintf (dump_file, "Array bound warning for ");
6213 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
6214 fprintf (dump_file, "\n");
6215 }
6216 warning_at (location, OPT_Warray_bounds,
6217 "array subscript is below array bounds");
6218 TREE_NO_WARNING (ref) = 1;
6219 }
6220 }
6221
6222 /* Searches if the expr T, located at LOCATION computes
6223 address of an ARRAY_REF, and call check_array_ref on it. */
6224
6225 static void
6226 search_for_addr_array (tree t, location_t location)
6227 {
6228 while (TREE_CODE (t) == SSA_NAME)
6229 {
6230 gimple g = SSA_NAME_DEF_STMT (t);
6231
6232 if (gimple_code (g) != GIMPLE_ASSIGN)
6233 return;
6234
6235 if (get_gimple_rhs_class (gimple_assign_rhs_code (g))
6236 != GIMPLE_SINGLE_RHS)
6237 return;
6238
6239 t = gimple_assign_rhs1 (g);
6240 }
6241
6242
6243 /* We are only interested in addresses of ARRAY_REF's. */
6244 if (TREE_CODE (t) != ADDR_EXPR)
6245 return;
6246
6247 /* Check each ARRAY_REFs in the reference chain. */
6248 do
6249 {
6250 if (TREE_CODE (t) == ARRAY_REF)
6251 check_array_ref (location, t, true /*ignore_off_by_one*/);
6252
6253 t = TREE_OPERAND (t, 0);
6254 }
6255 while (handled_component_p (t));
6256
6257 if (TREE_CODE (t) == MEM_REF
6258 && TREE_CODE (TREE_OPERAND (t, 0)) == ADDR_EXPR
6259 && !TREE_NO_WARNING (t))
6260 {
6261 tree tem = TREE_OPERAND (TREE_OPERAND (t, 0), 0);
6262 tree low_bound, up_bound, el_sz;
6263 double_int idx;
6264 if (TREE_CODE (TREE_TYPE (tem)) != ARRAY_TYPE
6265 || TREE_CODE (TREE_TYPE (TREE_TYPE (tem))) == ARRAY_TYPE
6266 || !TYPE_DOMAIN (TREE_TYPE (tem)))
6267 return;
6268
6269 low_bound = TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
6270 up_bound = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
6271 el_sz = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (tem)));
6272 if (!low_bound
6273 || TREE_CODE (low_bound) != INTEGER_CST
6274 || !up_bound
6275 || TREE_CODE (up_bound) != INTEGER_CST
6276 || !el_sz
6277 || TREE_CODE (el_sz) != INTEGER_CST)
6278 return;
6279
6280 idx = mem_ref_offset (t);
6281 idx = idx.sdiv (tree_to_double_int (el_sz), TRUNC_DIV_EXPR);
6282 if (idx.slt (double_int_zero))
6283 {
6284 if (dump_file && (dump_flags & TDF_DETAILS))
6285 {
6286 fprintf (dump_file, "Array bound warning for ");
6287 dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
6288 fprintf (dump_file, "\n");
6289 }
6290 warning_at (location, OPT_Warray_bounds,
6291 "array subscript is below array bounds");
6292 TREE_NO_WARNING (t) = 1;
6293 }
6294 else if (idx.sgt (tree_to_double_int (up_bound)
6295 - tree_to_double_int (low_bound)
6296 + double_int_one))
6297 {
6298 if (dump_file && (dump_flags & TDF_DETAILS))
6299 {
6300 fprintf (dump_file, "Array bound warning for ");
6301 dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
6302 fprintf (dump_file, "\n");
6303 }
6304 warning_at (location, OPT_Warray_bounds,
6305 "array subscript is above array bounds");
6306 TREE_NO_WARNING (t) = 1;
6307 }
6308 }
6309 }
6310
6311 /* walk_tree() callback that checks if *TP is
6312 an ARRAY_REF inside an ADDR_EXPR (in which an array
6313 subscript one outside the valid range is allowed). Call
6314 check_array_ref for each ARRAY_REF found. The location is
6315 passed in DATA. */
6316
6317 static tree
6318 check_array_bounds (tree *tp, int *walk_subtree, void *data)
6319 {
6320 tree t = *tp;
6321 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
6322 location_t location;
6323
6324 if (EXPR_HAS_LOCATION (t))
6325 location = EXPR_LOCATION (t);
6326 else
6327 {
6328 location_t *locp = (location_t *) wi->info;
6329 location = *locp;
6330 }
6331
6332 *walk_subtree = TRUE;
6333
6334 if (TREE_CODE (t) == ARRAY_REF)
6335 check_array_ref (location, t, false /*ignore_off_by_one*/);
6336
6337 if (TREE_CODE (t) == MEM_REF
6338 || (TREE_CODE (t) == RETURN_EXPR && TREE_OPERAND (t, 0)))
6339 search_for_addr_array (TREE_OPERAND (t, 0), location);
6340
6341 if (TREE_CODE (t) == ADDR_EXPR)
6342 *walk_subtree = FALSE;
6343
6344 return NULL_TREE;
6345 }
6346
6347 /* Walk over all statements of all reachable BBs and call check_array_bounds
6348 on them. */
6349
6350 static void
6351 check_all_array_refs (void)
6352 {
6353 basic_block bb;
6354 gimple_stmt_iterator si;
6355
6356 FOR_EACH_BB (bb)
6357 {
6358 edge_iterator ei;
6359 edge e;
6360 bool executable = false;
6361
6362 /* Skip blocks that were found to be unreachable. */
6363 FOR_EACH_EDGE (e, ei, bb->preds)
6364 executable |= !!(e->flags & EDGE_EXECUTABLE);
6365 if (!executable)
6366 continue;
6367
6368 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
6369 {
6370 gimple stmt = gsi_stmt (si);
6371 struct walk_stmt_info wi;
6372 if (!gimple_has_location (stmt))
6373 continue;
6374
6375 if (is_gimple_call (stmt))
6376 {
6377 size_t i;
6378 size_t n = gimple_call_num_args (stmt);
6379 for (i = 0; i < n; i++)
6380 {
6381 tree arg = gimple_call_arg (stmt, i);
6382 search_for_addr_array (arg, gimple_location (stmt));
6383 }
6384 }
6385 else
6386 {
6387 memset (&wi, 0, sizeof (wi));
6388 wi.info = CONST_CAST (void *, (const void *)
6389 gimple_location_ptr (stmt));
6390
6391 walk_gimple_op (gsi_stmt (si),
6392 check_array_bounds,
6393 &wi);
6394 }
6395 }
6396 }
6397 }
6398
6399 /* Convert range assertion expressions into the implied copies and
6400 copy propagate away the copies. Doing the trivial copy propagation
6401 here avoids the need to run the full copy propagation pass after
6402 VRP.
6403
6404 FIXME, this will eventually lead to copy propagation removing the
6405 names that had useful range information attached to them. For
6406 instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>,
6407 then N_i will have the range [3, +INF].
6408
6409 However, by converting the assertion into the implied copy
6410 operation N_i = N_j, we will then copy-propagate N_j into the uses
6411 of N_i and lose the range information. We may want to hold on to
6412 ASSERT_EXPRs a little while longer as the ranges could be used in
6413 things like jump threading.
6414
6415 The problem with keeping ASSERT_EXPRs around is that passes after
6416 VRP need to handle them appropriately.
6417
6418 Another approach would be to make the range information a first
6419 class property of the SSA_NAME so that it can be queried from
6420 any pass. This is made somewhat more complex by the need for
6421 multiple ranges to be associated with one SSA_NAME. */
6422
6423 static void
6424 remove_range_assertions (void)
6425 {
6426 basic_block bb;
6427 gimple_stmt_iterator si;
6428
6429 /* Note that the BSI iterator bump happens at the bottom of the
6430 loop and no bump is necessary if we're removing the statement
6431 referenced by the current BSI. */
6432 FOR_EACH_BB (bb)
6433 for (si = gsi_start_bb (bb); !gsi_end_p (si);)
6434 {
6435 gimple stmt = gsi_stmt (si);
6436 gimple use_stmt;
6437
6438 if (is_gimple_assign (stmt)
6439 && gimple_assign_rhs_code (stmt) == ASSERT_EXPR)
6440 {
6441 tree rhs = gimple_assign_rhs1 (stmt);
6442 tree var;
6443 tree cond = fold (ASSERT_EXPR_COND (rhs));
6444 use_operand_p use_p;
6445 imm_use_iterator iter;
6446
6447 gcc_assert (cond != boolean_false_node);
6448
6449 /* Propagate the RHS into every use of the LHS. */
6450 var = ASSERT_EXPR_VAR (rhs);
6451 FOR_EACH_IMM_USE_STMT (use_stmt, iter,
6452 gimple_assign_lhs (stmt))
6453 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
6454 {
6455 SET_USE (use_p, var);
6456 gcc_assert (TREE_CODE (var) == SSA_NAME);
6457 }
6458
6459 /* And finally, remove the copy, it is not needed. */
6460 gsi_remove (&si, true);
6461 release_defs (stmt);
6462 }
6463 else
6464 gsi_next (&si);
6465 }
6466 }
6467
6468
6469 /* Return true if STMT is interesting for VRP. */
6470
6471 static bool
6472 stmt_interesting_for_vrp (gimple stmt)
6473 {
6474 if (gimple_code (stmt) == GIMPLE_PHI)
6475 {
6476 tree res = gimple_phi_result (stmt);
6477 return (!virtual_operand_p (res)
6478 && (INTEGRAL_TYPE_P (TREE_TYPE (res))
6479 || POINTER_TYPE_P (TREE_TYPE (res))));
6480 }
6481 else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
6482 {
6483 tree lhs = gimple_get_lhs (stmt);
6484
6485 /* In general, assignments with virtual operands are not useful
6486 for deriving ranges, with the obvious exception of calls to
6487 builtin functions. */
6488 if (lhs && TREE_CODE (lhs) == SSA_NAME
6489 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
6490 || POINTER_TYPE_P (TREE_TYPE (lhs)))
6491 && ((is_gimple_call (stmt)
6492 && gimple_call_fndecl (stmt) != NULL_TREE
6493 && DECL_BUILT_IN (gimple_call_fndecl (stmt)))
6494 || !gimple_vuse (stmt)))
6495 return true;
6496 }
6497 else if (gimple_code (stmt) == GIMPLE_COND
6498 || gimple_code (stmt) == GIMPLE_SWITCH)
6499 return true;
6500
6501 return false;
6502 }
6503
6504
6505 /* Initialize local data structures for VRP. */
6506
6507 static void
6508 vrp_initialize (void)
6509 {
6510 basic_block bb;
6511
6512 values_propagated = false;
6513 num_vr_values = num_ssa_names;
6514 vr_value = XCNEWVEC (value_range_t *, num_vr_values);
6515 vr_phi_edge_counts = XCNEWVEC (int, num_ssa_names);
6516
6517 FOR_EACH_BB (bb)
6518 {
6519 gimple_stmt_iterator si;
6520
6521 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
6522 {
6523 gimple phi = gsi_stmt (si);
6524 if (!stmt_interesting_for_vrp (phi))
6525 {
6526 tree lhs = PHI_RESULT (phi);
6527 set_value_range_to_varying (get_value_range (lhs));
6528 prop_set_simulate_again (phi, false);
6529 }
6530 else
6531 prop_set_simulate_again (phi, true);
6532 }
6533
6534 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
6535 {
6536 gimple stmt = gsi_stmt (si);
6537
6538 /* If the statement is a control insn, then we do not
6539 want to avoid simulating the statement once. Failure
6540 to do so means that those edges will never get added. */
6541 if (stmt_ends_bb_p (stmt))
6542 prop_set_simulate_again (stmt, true);
6543 else if (!stmt_interesting_for_vrp (stmt))
6544 {
6545 ssa_op_iter i;
6546 tree def;
6547 FOR_EACH_SSA_TREE_OPERAND (def, stmt, i, SSA_OP_DEF)
6548 set_value_range_to_varying (get_value_range (def));
6549 prop_set_simulate_again (stmt, false);
6550 }
6551 else
6552 prop_set_simulate_again (stmt, true);
6553 }
6554 }
6555 }
6556
6557 /* Return the singleton value-range for NAME or NAME. */
6558
6559 static inline tree
6560 vrp_valueize (tree name)
6561 {
6562 if (TREE_CODE (name) == SSA_NAME)
6563 {
6564 value_range_t *vr = get_value_range (name);
6565 if (vr->type == VR_RANGE
6566 && (vr->min == vr->max
6567 || operand_equal_p (vr->min, vr->max, 0)))
6568 return vr->min;
6569 }
6570 return name;
6571 }
6572
6573 /* Visit assignment STMT. If it produces an interesting range, record
6574 the SSA name in *OUTPUT_P. */
6575
6576 static enum ssa_prop_result
6577 vrp_visit_assignment_or_call (gimple stmt, tree *output_p)
6578 {
6579 tree def, lhs;
6580 ssa_op_iter iter;
6581 enum gimple_code code = gimple_code (stmt);
6582 lhs = gimple_get_lhs (stmt);
6583
6584 /* We only keep track of ranges in integral and pointer types. */
6585 if (TREE_CODE (lhs) == SSA_NAME
6586 && ((INTEGRAL_TYPE_P (TREE_TYPE (lhs))
6587 /* It is valid to have NULL MIN/MAX values on a type. See
6588 build_range_type. */
6589 && TYPE_MIN_VALUE (TREE_TYPE (lhs))
6590 && TYPE_MAX_VALUE (TREE_TYPE (lhs)))
6591 || POINTER_TYPE_P (TREE_TYPE (lhs))))
6592 {
6593 value_range_t new_vr = VR_INITIALIZER;
6594
6595 /* Try folding the statement to a constant first. */
6596 tree tem = gimple_fold_stmt_to_constant (stmt, vrp_valueize);
6597 if (tem && !is_overflow_infinity (tem))
6598 set_value_range (&new_vr, VR_RANGE, tem, tem, NULL);
6599 /* Then dispatch to value-range extracting functions. */
6600 else if (code == GIMPLE_CALL)
6601 extract_range_basic (&new_vr, stmt);
6602 else
6603 extract_range_from_assignment (&new_vr, stmt);
6604
6605 if (update_value_range (lhs, &new_vr))
6606 {
6607 *output_p = lhs;
6608
6609 if (dump_file && (dump_flags & TDF_DETAILS))
6610 {
6611 fprintf (dump_file, "Found new range for ");
6612 print_generic_expr (dump_file, lhs, 0);
6613 fprintf (dump_file, ": ");
6614 dump_value_range (dump_file, &new_vr);
6615 fprintf (dump_file, "\n\n");
6616 }
6617
6618 if (new_vr.type == VR_VARYING)
6619 return SSA_PROP_VARYING;
6620
6621 return SSA_PROP_INTERESTING;
6622 }
6623
6624 return SSA_PROP_NOT_INTERESTING;
6625 }
6626
6627 /* Every other statement produces no useful ranges. */
6628 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_DEF)
6629 set_value_range_to_varying (get_value_range (def));
6630
6631 return SSA_PROP_VARYING;
6632 }
6633
6634 /* Helper that gets the value range of the SSA_NAME with version I
6635 or a symbolic range containing the SSA_NAME only if the value range
6636 is varying or undefined. */
6637
6638 static inline value_range_t
6639 get_vr_for_comparison (int i)
6640 {
6641 value_range_t vr = *get_value_range (ssa_name (i));
6642
6643 /* If name N_i does not have a valid range, use N_i as its own
6644 range. This allows us to compare against names that may
6645 have N_i in their ranges. */
6646 if (vr.type == VR_VARYING || vr.type == VR_UNDEFINED)
6647 {
6648 vr.type = VR_RANGE;
6649 vr.min = ssa_name (i);
6650 vr.max = ssa_name (i);
6651 }
6652
6653 return vr;
6654 }
6655
6656 /* Compare all the value ranges for names equivalent to VAR with VAL
6657 using comparison code COMP. Return the same value returned by
6658 compare_range_with_value, including the setting of
6659 *STRICT_OVERFLOW_P. */
6660
6661 static tree
6662 compare_name_with_value (enum tree_code comp, tree var, tree val,
6663 bool *strict_overflow_p)
6664 {
6665 bitmap_iterator bi;
6666 unsigned i;
6667 bitmap e;
6668 tree retval, t;
6669 int used_strict_overflow;
6670 bool sop;
6671 value_range_t equiv_vr;
6672
6673 /* Get the set of equivalences for VAR. */
6674 e = get_value_range (var)->equiv;
6675
6676 /* Start at -1. Set it to 0 if we do a comparison without relying
6677 on overflow, or 1 if all comparisons rely on overflow. */
6678 used_strict_overflow = -1;
6679
6680 /* Compare vars' value range with val. */
6681 equiv_vr = get_vr_for_comparison (SSA_NAME_VERSION (var));
6682 sop = false;
6683 retval = compare_range_with_value (comp, &equiv_vr, val, &sop);
6684 if (retval)
6685 used_strict_overflow = sop ? 1 : 0;
6686
6687 /* If the equiv set is empty we have done all work we need to do. */
6688 if (e == NULL)
6689 {
6690 if (retval
6691 && used_strict_overflow > 0)
6692 *strict_overflow_p = true;
6693 return retval;
6694 }
6695
6696 EXECUTE_IF_SET_IN_BITMAP (e, 0, i, bi)
6697 {
6698 equiv_vr = get_vr_for_comparison (i);
6699 sop = false;
6700 t = compare_range_with_value (comp, &equiv_vr, val, &sop);
6701 if (t)
6702 {
6703 /* If we get different answers from different members
6704 of the equivalence set this check must be in a dead
6705 code region. Folding it to a trap representation
6706 would be correct here. For now just return don't-know. */
6707 if (retval != NULL
6708 && t != retval)
6709 {
6710 retval = NULL_TREE;
6711 break;
6712 }
6713 retval = t;
6714
6715 if (!sop)
6716 used_strict_overflow = 0;
6717 else if (used_strict_overflow < 0)
6718 used_strict_overflow = 1;
6719 }
6720 }
6721
6722 if (retval
6723 && used_strict_overflow > 0)
6724 *strict_overflow_p = true;
6725
6726 return retval;
6727 }
6728
6729
6730 /* Given a comparison code COMP and names N1 and N2, compare all the
6731 ranges equivalent to N1 against all the ranges equivalent to N2
6732 to determine the value of N1 COMP N2. Return the same value
6733 returned by compare_ranges. Set *STRICT_OVERFLOW_P to indicate
6734 whether we relied on an overflow infinity in the comparison. */
6735
6736
6737 static tree
6738 compare_names (enum tree_code comp, tree n1, tree n2,
6739 bool *strict_overflow_p)
6740 {
6741 tree t, retval;
6742 bitmap e1, e2;
6743 bitmap_iterator bi1, bi2;
6744 unsigned i1, i2;
6745 int used_strict_overflow;
6746 static bitmap_obstack *s_obstack = NULL;
6747 static bitmap s_e1 = NULL, s_e2 = NULL;
6748
6749 /* Compare the ranges of every name equivalent to N1 against the
6750 ranges of every name equivalent to N2. */
6751 e1 = get_value_range (n1)->equiv;
6752 e2 = get_value_range (n2)->equiv;
6753
6754 /* Use the fake bitmaps if e1 or e2 are not available. */
6755 if (s_obstack == NULL)
6756 {
6757 s_obstack = XNEW (bitmap_obstack);
6758 bitmap_obstack_initialize (s_obstack);
6759 s_e1 = BITMAP_ALLOC (s_obstack);
6760 s_e2 = BITMAP_ALLOC (s_obstack);
6761 }
6762 if (e1 == NULL)
6763 e1 = s_e1;
6764 if (e2 == NULL)
6765 e2 = s_e2;
6766
6767 /* Add N1 and N2 to their own set of equivalences to avoid
6768 duplicating the body of the loop just to check N1 and N2
6769 ranges. */
6770 bitmap_set_bit (e1, SSA_NAME_VERSION (n1));
6771 bitmap_set_bit (e2, SSA_NAME_VERSION (n2));
6772
6773 /* If the equivalence sets have a common intersection, then the two
6774 names can be compared without checking their ranges. */
6775 if (bitmap_intersect_p (e1, e2))
6776 {
6777 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
6778 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
6779
6780 return (comp == EQ_EXPR || comp == GE_EXPR || comp == LE_EXPR)
6781 ? boolean_true_node
6782 : boolean_false_node;
6783 }
6784
6785 /* Start at -1. Set it to 0 if we do a comparison without relying
6786 on overflow, or 1 if all comparisons rely on overflow. */
6787 used_strict_overflow = -1;
6788
6789 /* Otherwise, compare all the equivalent ranges. First, add N1 and
6790 N2 to their own set of equivalences to avoid duplicating the body
6791 of the loop just to check N1 and N2 ranges. */
6792 EXECUTE_IF_SET_IN_BITMAP (e1, 0, i1, bi1)
6793 {
6794 value_range_t vr1 = get_vr_for_comparison (i1);
6795
6796 t = retval = NULL_TREE;
6797 EXECUTE_IF_SET_IN_BITMAP (e2, 0, i2, bi2)
6798 {
6799 bool sop = false;
6800
6801 value_range_t vr2 = get_vr_for_comparison (i2);
6802
6803 t = compare_ranges (comp, &vr1, &vr2, &sop);
6804 if (t)
6805 {
6806 /* If we get different answers from different members
6807 of the equivalence set this check must be in a dead
6808 code region. Folding it to a trap representation
6809 would be correct here. For now just return don't-know. */
6810 if (retval != NULL
6811 && t != retval)
6812 {
6813 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
6814 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
6815 return NULL_TREE;
6816 }
6817 retval = t;
6818
6819 if (!sop)
6820 used_strict_overflow = 0;
6821 else if (used_strict_overflow < 0)
6822 used_strict_overflow = 1;
6823 }
6824 }
6825
6826 if (retval)
6827 {
6828 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
6829 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
6830 if (used_strict_overflow > 0)
6831 *strict_overflow_p = true;
6832 return retval;
6833 }
6834 }
6835
6836 /* None of the equivalent ranges are useful in computing this
6837 comparison. */
6838 bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
6839 bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
6840 return NULL_TREE;
6841 }
6842
6843 /* Helper function for vrp_evaluate_conditional_warnv. */
6844
6845 static tree
6846 vrp_evaluate_conditional_warnv_with_ops_using_ranges (enum tree_code code,
6847 tree op0, tree op1,
6848 bool * strict_overflow_p)
6849 {
6850 value_range_t *vr0, *vr1;
6851
6852 vr0 = (TREE_CODE (op0) == SSA_NAME) ? get_value_range (op0) : NULL;
6853 vr1 = (TREE_CODE (op1) == SSA_NAME) ? get_value_range (op1) : NULL;
6854
6855 if (vr0 && vr1)
6856 return compare_ranges (code, vr0, vr1, strict_overflow_p);
6857 else if (vr0 && vr1 == NULL)
6858 return compare_range_with_value (code, vr0, op1, strict_overflow_p);
6859 else if (vr0 == NULL && vr1)
6860 return (compare_range_with_value
6861 (swap_tree_comparison (code), vr1, op0, strict_overflow_p));
6862 return NULL;
6863 }
6864
6865 /* Helper function for vrp_evaluate_conditional_warnv. */
6866
6867 static tree
6868 vrp_evaluate_conditional_warnv_with_ops (enum tree_code code, tree op0,
6869 tree op1, bool use_equiv_p,
6870 bool *strict_overflow_p, bool *only_ranges)
6871 {
6872 tree ret;
6873 if (only_ranges)
6874 *only_ranges = true;
6875
6876 /* We only deal with integral and pointer types. */
6877 if (!INTEGRAL_TYPE_P (TREE_TYPE (op0))
6878 && !POINTER_TYPE_P (TREE_TYPE (op0)))
6879 return NULL_TREE;
6880
6881 if (use_equiv_p)
6882 {
6883 if (only_ranges
6884 && (ret = vrp_evaluate_conditional_warnv_with_ops_using_ranges
6885 (code, op0, op1, strict_overflow_p)))
6886 return ret;
6887 *only_ranges = false;
6888 if (TREE_CODE (op0) == SSA_NAME && TREE_CODE (op1) == SSA_NAME)
6889 return compare_names (code, op0, op1, strict_overflow_p);
6890 else if (TREE_CODE (op0) == SSA_NAME)
6891 return compare_name_with_value (code, op0, op1, strict_overflow_p);
6892 else if (TREE_CODE (op1) == SSA_NAME)
6893 return (compare_name_with_value
6894 (swap_tree_comparison (code), op1, op0, strict_overflow_p));
6895 }
6896 else
6897 return vrp_evaluate_conditional_warnv_with_ops_using_ranges (code, op0, op1,
6898 strict_overflow_p);
6899 return NULL_TREE;
6900 }
6901
6902 /* Given (CODE OP0 OP1) within STMT, try to simplify it based on value range
6903 information. Return NULL if the conditional can not be evaluated.
6904 The ranges of all the names equivalent with the operands in COND
6905 will be used when trying to compute the value. If the result is
6906 based on undefined signed overflow, issue a warning if
6907 appropriate. */
6908
6909 static tree
6910 vrp_evaluate_conditional (enum tree_code code, tree op0, tree op1, gimple stmt)
6911 {
6912 bool sop;
6913 tree ret;
6914 bool only_ranges;
6915
6916 /* Some passes and foldings leak constants with overflow flag set
6917 into the IL. Avoid doing wrong things with these and bail out. */
6918 if ((TREE_CODE (op0) == INTEGER_CST
6919 && TREE_OVERFLOW (op0))
6920 || (TREE_CODE (op1) == INTEGER_CST
6921 && TREE_OVERFLOW (op1)))
6922 return NULL_TREE;
6923
6924 sop = false;
6925 ret = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, true, &sop,
6926 &only_ranges);
6927
6928 if (ret && sop)
6929 {
6930 enum warn_strict_overflow_code wc;
6931 const char* warnmsg;
6932
6933 if (is_gimple_min_invariant (ret))
6934 {
6935 wc = WARN_STRICT_OVERFLOW_CONDITIONAL;
6936 warnmsg = G_("assuming signed overflow does not occur when "
6937 "simplifying conditional to constant");
6938 }
6939 else
6940 {
6941 wc = WARN_STRICT_OVERFLOW_COMPARISON;
6942 warnmsg = G_("assuming signed overflow does not occur when "
6943 "simplifying conditional");
6944 }
6945
6946 if (issue_strict_overflow_warning (wc))
6947 {
6948 location_t location;
6949
6950 if (!gimple_has_location (stmt))
6951 location = input_location;
6952 else
6953 location = gimple_location (stmt);
6954 warning_at (location, OPT_Wstrict_overflow, "%s", warnmsg);
6955 }
6956 }
6957
6958 if (warn_type_limits
6959 && ret && only_ranges
6960 && TREE_CODE_CLASS (code) == tcc_comparison
6961 && TREE_CODE (op0) == SSA_NAME)
6962 {
6963 /* If the comparison is being folded and the operand on the LHS
6964 is being compared against a constant value that is outside of
6965 the natural range of OP0's type, then the predicate will
6966 always fold regardless of the value of OP0. If -Wtype-limits
6967 was specified, emit a warning. */
6968 tree type = TREE_TYPE (op0);
6969 value_range_t *vr0 = get_value_range (op0);
6970
6971 if (vr0->type != VR_VARYING
6972 && INTEGRAL_TYPE_P (type)
6973 && vrp_val_is_min (vr0->min)
6974 && vrp_val_is_max (vr0->max)
6975 && is_gimple_min_invariant (op1))
6976 {
6977 location_t location;
6978
6979 if (!gimple_has_location (stmt))
6980 location = input_location;
6981 else
6982 location = gimple_location (stmt);
6983
6984 warning_at (location, OPT_Wtype_limits,
6985 integer_zerop (ret)
6986 ? G_("comparison always false "
6987 "due to limited range of data type")
6988 : G_("comparison always true "
6989 "due to limited range of data type"));
6990 }
6991 }
6992
6993 return ret;
6994 }
6995
6996
6997 /* Visit conditional statement STMT. If we can determine which edge
6998 will be taken out of STMT's basic block, record it in
6999 *TAKEN_EDGE_P and return SSA_PROP_INTERESTING. Otherwise, return
7000 SSA_PROP_VARYING. */
7001
7002 static enum ssa_prop_result
7003 vrp_visit_cond_stmt (gimple stmt, edge *taken_edge_p)
7004 {
7005 tree val;
7006 bool sop;
7007
7008 *taken_edge_p = NULL;
7009
7010 if (dump_file && (dump_flags & TDF_DETAILS))
7011 {
7012 tree use;
7013 ssa_op_iter i;
7014
7015 fprintf (dump_file, "\nVisiting conditional with predicate: ");
7016 print_gimple_stmt (dump_file, stmt, 0, 0);
7017 fprintf (dump_file, "\nWith known ranges\n");
7018
7019 FOR_EACH_SSA_TREE_OPERAND (use, stmt, i, SSA_OP_USE)
7020 {
7021 fprintf (dump_file, "\t");
7022 print_generic_expr (dump_file, use, 0);
7023 fprintf (dump_file, ": ");
7024 dump_value_range (dump_file, vr_value[SSA_NAME_VERSION (use)]);
7025 }
7026
7027 fprintf (dump_file, "\n");
7028 }
7029
7030 /* Compute the value of the predicate COND by checking the known
7031 ranges of each of its operands.
7032
7033 Note that we cannot evaluate all the equivalent ranges here
7034 because those ranges may not yet be final and with the current
7035 propagation strategy, we cannot determine when the value ranges
7036 of the names in the equivalence set have changed.
7037
7038 For instance, given the following code fragment
7039
7040 i_5 = PHI <8, i_13>
7041 ...
7042 i_14 = ASSERT_EXPR <i_5, i_5 != 0>
7043 if (i_14 == 1)
7044 ...
7045
7046 Assume that on the first visit to i_14, i_5 has the temporary
7047 range [8, 8] because the second argument to the PHI function is
7048 not yet executable. We derive the range ~[0, 0] for i_14 and the
7049 equivalence set { i_5 }. So, when we visit 'if (i_14 == 1)' for
7050 the first time, since i_14 is equivalent to the range [8, 8], we
7051 determine that the predicate is always false.
7052
7053 On the next round of propagation, i_13 is determined to be
7054 VARYING, which causes i_5 to drop down to VARYING. So, another
7055 visit to i_14 is scheduled. In this second visit, we compute the
7056 exact same range and equivalence set for i_14, namely ~[0, 0] and
7057 { i_5 }. But we did not have the previous range for i_5
7058 registered, so vrp_visit_assignment thinks that the range for
7059 i_14 has not changed. Therefore, the predicate 'if (i_14 == 1)'
7060 is not visited again, which stops propagation from visiting
7061 statements in the THEN clause of that if().
7062
7063 To properly fix this we would need to keep the previous range
7064 value for the names in the equivalence set. This way we would've
7065 discovered that from one visit to the other i_5 changed from
7066 range [8, 8] to VR_VARYING.
7067
7068 However, fixing this apparent limitation may not be worth the
7069 additional checking. Testing on several code bases (GCC, DLV,
7070 MICO, TRAMP3D and SPEC2000) showed that doing this results in
7071 4 more predicates folded in SPEC. */
7072 sop = false;
7073
7074 val = vrp_evaluate_conditional_warnv_with_ops (gimple_cond_code (stmt),
7075 gimple_cond_lhs (stmt),
7076 gimple_cond_rhs (stmt),
7077 false, &sop, NULL);
7078 if (val)
7079 {
7080 if (!sop)
7081 *taken_edge_p = find_taken_edge (gimple_bb (stmt), val);
7082 else
7083 {
7084 if (dump_file && (dump_flags & TDF_DETAILS))
7085 fprintf (dump_file,
7086 "\nIgnoring predicate evaluation because "
7087 "it assumes that signed overflow is undefined");
7088 val = NULL_TREE;
7089 }
7090 }
7091
7092 if (dump_file && (dump_flags & TDF_DETAILS))
7093 {
7094 fprintf (dump_file, "\nPredicate evaluates to: ");
7095 if (val == NULL_TREE)
7096 fprintf (dump_file, "DON'T KNOW\n");
7097 else
7098 print_generic_stmt (dump_file, val, 0);
7099 }
7100
7101 return (*taken_edge_p) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING;
7102 }
7103
7104 /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL
7105 that includes the value VAL. The search is restricted to the range
7106 [START_IDX, n - 1] where n is the size of VEC.
7107
7108 If there is a CASE_LABEL for VAL, its index is placed in IDX and true is
7109 returned.
7110
7111 If there is no CASE_LABEL for VAL and there is one that is larger than VAL,
7112 it is placed in IDX and false is returned.
7113
7114 If VAL is larger than any CASE_LABEL, n is placed on IDX and false is
7115 returned. */
7116
7117 static bool
7118 find_case_label_index (gimple stmt, size_t start_idx, tree val, size_t *idx)
7119 {
7120 size_t n = gimple_switch_num_labels (stmt);
7121 size_t low, high;
7122
7123 /* Find case label for minimum of the value range or the next one.
7124 At each iteration we are searching in [low, high - 1]. */
7125
7126 for (low = start_idx, high = n; high != low; )
7127 {
7128 tree t;
7129 int cmp;
7130 /* Note that i != high, so we never ask for n. */
7131 size_t i = (high + low) / 2;
7132 t = gimple_switch_label (stmt, i);
7133
7134 /* Cache the result of comparing CASE_LOW and val. */
7135 cmp = tree_int_cst_compare (CASE_LOW (t), val);
7136
7137 if (cmp == 0)
7138 {
7139 /* Ranges cannot be empty. */
7140 *idx = i;
7141 return true;
7142 }
7143 else if (cmp > 0)
7144 high = i;
7145 else
7146 {
7147 low = i + 1;
7148 if (CASE_HIGH (t) != NULL
7149 && tree_int_cst_compare (CASE_HIGH (t), val) >= 0)
7150 {
7151 *idx = i;
7152 return true;
7153 }
7154 }
7155 }
7156
7157 *idx = high;
7158 return false;
7159 }
7160
7161 /* Searches the case label vector VEC for the range of CASE_LABELs that is used
7162 for values between MIN and MAX. The first index is placed in MIN_IDX. The
7163 last index is placed in MAX_IDX. If the range of CASE_LABELs is empty
7164 then MAX_IDX < MIN_IDX.
7165 Returns true if the default label is not needed. */
7166
7167 static bool
7168 find_case_label_range (gimple stmt, tree min, tree max, size_t *min_idx,
7169 size_t *max_idx)
7170 {
7171 size_t i, j;
7172 bool min_take_default = !find_case_label_index (stmt, 1, min, &i);
7173 bool max_take_default = !find_case_label_index (stmt, i, max, &j);
7174
7175 if (i == j
7176 && min_take_default
7177 && max_take_default)
7178 {
7179 /* Only the default case label reached.
7180 Return an empty range. */
7181 *min_idx = 1;
7182 *max_idx = 0;
7183 return false;
7184 }
7185 else
7186 {
7187 bool take_default = min_take_default || max_take_default;
7188 tree low, high;
7189 size_t k;
7190
7191 if (max_take_default)
7192 j--;
7193
7194 /* If the case label range is continuous, we do not need
7195 the default case label. Verify that. */
7196 high = CASE_LOW (gimple_switch_label (stmt, i));
7197 if (CASE_HIGH (gimple_switch_label (stmt, i)))
7198 high = CASE_HIGH (gimple_switch_label (stmt, i));
7199 for (k = i + 1; k <= j; ++k)
7200 {
7201 low = CASE_LOW (gimple_switch_label (stmt, k));
7202 if (!integer_onep (int_const_binop (MINUS_EXPR, low, high)))
7203 {
7204 take_default = true;
7205 break;
7206 }
7207 high = low;
7208 if (CASE_HIGH (gimple_switch_label (stmt, k)))
7209 high = CASE_HIGH (gimple_switch_label (stmt, k));
7210 }
7211
7212 *min_idx = i;
7213 *max_idx = j;
7214 return !take_default;
7215 }
7216 }
7217
7218 /* Searches the case label vector VEC for the ranges of CASE_LABELs that are
7219 used in range VR. The indices are placed in MIN_IDX1, MAX_IDX, MIN_IDX2 and
7220 MAX_IDX2. If the ranges of CASE_LABELs are empty then MAX_IDX1 < MIN_IDX1.
7221 Returns true if the default label is not needed. */
7222
7223 static bool
7224 find_case_label_ranges (gimple stmt, value_range_t *vr, size_t *min_idx1,
7225 size_t *max_idx1, size_t *min_idx2,
7226 size_t *max_idx2)
7227 {
7228 size_t i, j, k, l;
7229 unsigned int n = gimple_switch_num_labels (stmt);
7230 bool take_default;
7231 tree case_low, case_high;
7232 tree min = vr->min, max = vr->max;
7233
7234 gcc_checking_assert (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE);
7235
7236 take_default = !find_case_label_range (stmt, min, max, &i, &j);
7237
7238 /* Set second range to emtpy. */
7239 *min_idx2 = 1;
7240 *max_idx2 = 0;
7241
7242 if (vr->type == VR_RANGE)
7243 {
7244 *min_idx1 = i;
7245 *max_idx1 = j;
7246 return !take_default;
7247 }
7248
7249 /* Set first range to all case labels. */
7250 *min_idx1 = 1;
7251 *max_idx1 = n - 1;
7252
7253 if (i > j)
7254 return false;
7255
7256 /* Make sure all the values of case labels [i , j] are contained in
7257 range [MIN, MAX]. */
7258 case_low = CASE_LOW (gimple_switch_label (stmt, i));
7259 case_high = CASE_HIGH (gimple_switch_label (stmt, j));
7260 if (tree_int_cst_compare (case_low, min) < 0)
7261 i += 1;
7262 if (case_high != NULL_TREE
7263 && tree_int_cst_compare (max, case_high) < 0)
7264 j -= 1;
7265
7266 if (i > j)
7267 return false;
7268
7269 /* If the range spans case labels [i, j], the corresponding anti-range spans
7270 the labels [1, i - 1] and [j + 1, n - 1]. */
7271 k = j + 1;
7272 l = n - 1;
7273 if (k > l)
7274 {
7275 k = 1;
7276 l = 0;
7277 }
7278
7279 j = i - 1;
7280 i = 1;
7281 if (i > j)
7282 {
7283 i = k;
7284 j = l;
7285 k = 1;
7286 l = 0;
7287 }
7288
7289 *min_idx1 = i;
7290 *max_idx1 = j;
7291 *min_idx2 = k;
7292 *max_idx2 = l;
7293 return false;
7294 }
7295
7296 /* Visit switch statement STMT. If we can determine which edge
7297 will be taken out of STMT's basic block, record it in
7298 *TAKEN_EDGE_P and return SSA_PROP_INTERESTING. Otherwise, return
7299 SSA_PROP_VARYING. */
7300
7301 static enum ssa_prop_result
7302 vrp_visit_switch_stmt (gimple stmt, edge *taken_edge_p)
7303 {
7304 tree op, val;
7305 value_range_t *vr;
7306 size_t i = 0, j = 0, k, l;
7307 bool take_default;
7308
7309 *taken_edge_p = NULL;
7310 op = gimple_switch_index (stmt);
7311 if (TREE_CODE (op) != SSA_NAME)
7312 return SSA_PROP_VARYING;
7313
7314 vr = get_value_range (op);
7315 if (dump_file && (dump_flags & TDF_DETAILS))
7316 {
7317 fprintf (dump_file, "\nVisiting switch expression with operand ");
7318 print_generic_expr (dump_file, op, 0);
7319 fprintf (dump_file, " with known range ");
7320 dump_value_range (dump_file, vr);
7321 fprintf (dump_file, "\n");
7322 }
7323
7324 if ((vr->type != VR_RANGE
7325 && vr->type != VR_ANTI_RANGE)
7326 || symbolic_range_p (vr))
7327 return SSA_PROP_VARYING;
7328
7329 /* Find the single edge that is taken from the switch expression. */
7330 take_default = !find_case_label_ranges (stmt, vr, &i, &j, &k, &l);
7331
7332 /* Check if the range spans no CASE_LABEL. If so, we only reach the default
7333 label */
7334 if (j < i)
7335 {
7336 gcc_assert (take_default);
7337 val = gimple_switch_default_label (stmt);
7338 }
7339 else
7340 {
7341 /* Check if labels with index i to j and maybe the default label
7342 are all reaching the same label. */
7343
7344 val = gimple_switch_label (stmt, i);
7345 if (take_default
7346 && CASE_LABEL (gimple_switch_default_label (stmt))
7347 != CASE_LABEL (val))
7348 {
7349 if (dump_file && (dump_flags & TDF_DETAILS))
7350 fprintf (dump_file, " not a single destination for this "
7351 "range\n");
7352 return SSA_PROP_VARYING;
7353 }
7354 for (++i; i <= j; ++i)
7355 {
7356 if (CASE_LABEL (gimple_switch_label (stmt, i)) != CASE_LABEL (val))
7357 {
7358 if (dump_file && (dump_flags & TDF_DETAILS))
7359 fprintf (dump_file, " not a single destination for this "
7360 "range\n");
7361 return SSA_PROP_VARYING;
7362 }
7363 }
7364 for (; k <= l; ++k)
7365 {
7366 if (CASE_LABEL (gimple_switch_label (stmt, k)) != CASE_LABEL (val))
7367 {
7368 if (dump_file && (dump_flags & TDF_DETAILS))
7369 fprintf (dump_file, " not a single destination for this "
7370 "range\n");
7371 return SSA_PROP_VARYING;
7372 }
7373 }
7374 }
7375
7376 *taken_edge_p = find_edge (gimple_bb (stmt),
7377 label_to_block (CASE_LABEL (val)));
7378
7379 if (dump_file && (dump_flags & TDF_DETAILS))
7380 {
7381 fprintf (dump_file, " will take edge to ");
7382 print_generic_stmt (dump_file, CASE_LABEL (val), 0);
7383 }
7384
7385 return SSA_PROP_INTERESTING;
7386 }
7387
7388
7389 /* Evaluate statement STMT. If the statement produces a useful range,
7390 return SSA_PROP_INTERESTING and record the SSA name with the
7391 interesting range into *OUTPUT_P.
7392
7393 If STMT is a conditional branch and we can determine its truth
7394 value, the taken edge is recorded in *TAKEN_EDGE_P.
7395
7396 If STMT produces a varying value, return SSA_PROP_VARYING. */
7397
7398 static enum ssa_prop_result
7399 vrp_visit_stmt (gimple stmt, edge *taken_edge_p, tree *output_p)
7400 {
7401 tree def;
7402 ssa_op_iter iter;
7403
7404 if (dump_file && (dump_flags & TDF_DETAILS))
7405 {
7406 fprintf (dump_file, "\nVisiting statement:\n");
7407 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
7408 fprintf (dump_file, "\n");
7409 }
7410
7411 if (!stmt_interesting_for_vrp (stmt))
7412 gcc_assert (stmt_ends_bb_p (stmt));
7413 else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
7414 {
7415 /* In general, assignments with virtual operands are not useful
7416 for deriving ranges, with the obvious exception of calls to
7417 builtin functions. */
7418 if ((is_gimple_call (stmt)
7419 && gimple_call_fndecl (stmt) != NULL_TREE
7420 && DECL_BUILT_IN (gimple_call_fndecl (stmt)))
7421 || !gimple_vuse (stmt))
7422 return vrp_visit_assignment_or_call (stmt, output_p);
7423 }
7424 else if (gimple_code (stmt) == GIMPLE_COND)
7425 return vrp_visit_cond_stmt (stmt, taken_edge_p);
7426 else if (gimple_code (stmt) == GIMPLE_SWITCH)
7427 return vrp_visit_switch_stmt (stmt, taken_edge_p);
7428
7429 /* All other statements produce nothing of interest for VRP, so mark
7430 their outputs varying and prevent further simulation. */
7431 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_DEF)
7432 set_value_range_to_varying (get_value_range (def));
7433
7434 return SSA_PROP_VARYING;
7435 }
7436
7437 /* Union the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
7438 { VR1TYPE, VR0MIN, VR0MAX } and store the result
7439 in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest
7440 possible such range. The resulting range is not canonicalized. */
7441
7442 static void
7443 union_ranges (enum value_range_type *vr0type,
7444 tree *vr0min, tree *vr0max,
7445 enum value_range_type vr1type,
7446 tree vr1min, tree vr1max)
7447 {
7448 bool mineq = operand_equal_p (*vr0min, vr1min, 0);
7449 bool maxeq = operand_equal_p (*vr0max, vr1max, 0);
7450
7451 /* [] is vr0, () is vr1 in the following classification comments. */
7452 if (mineq && maxeq)
7453 {
7454 /* [( )] */
7455 if (*vr0type == vr1type)
7456 /* Nothing to do for equal ranges. */
7457 ;
7458 else if ((*vr0type == VR_RANGE
7459 && vr1type == VR_ANTI_RANGE)
7460 || (*vr0type == VR_ANTI_RANGE
7461 && vr1type == VR_RANGE))
7462 {
7463 /* For anti-range with range union the result is varying. */
7464 goto give_up;
7465 }
7466 else
7467 gcc_unreachable ();
7468 }
7469 else if (operand_less_p (*vr0max, vr1min) == 1
7470 || operand_less_p (vr1max, *vr0min) == 1)
7471 {
7472 /* [ ] ( ) or ( ) [ ]
7473 If the ranges have an empty intersection, result of the union
7474 operation is the anti-range or if both are anti-ranges
7475 it covers all. */
7476 if (*vr0type == VR_ANTI_RANGE
7477 && vr1type == VR_ANTI_RANGE)
7478 goto give_up;
7479 else if (*vr0type == VR_ANTI_RANGE
7480 && vr1type == VR_RANGE)
7481 ;
7482 else if (*vr0type == VR_RANGE
7483 && vr1type == VR_ANTI_RANGE)
7484 {
7485 *vr0type = vr1type;
7486 *vr0min = vr1min;
7487 *vr0max = vr1max;
7488 }
7489 else if (*vr0type == VR_RANGE
7490 && vr1type == VR_RANGE)
7491 {
7492 /* The result is the convex hull of both ranges. */
7493 if (operand_less_p (*vr0max, vr1min) == 1)
7494 {
7495 /* If the result can be an anti-range, create one. */
7496 if (TREE_CODE (*vr0max) == INTEGER_CST
7497 && TREE_CODE (vr1min) == INTEGER_CST
7498 && vrp_val_is_min (*vr0min)
7499 && vrp_val_is_max (vr1max))
7500 {
7501 tree min = int_const_binop (PLUS_EXPR,
7502 *vr0max, integer_one_node);
7503 tree max = int_const_binop (MINUS_EXPR,
7504 vr1min, integer_one_node);
7505 if (!operand_less_p (max, min))
7506 {
7507 *vr0type = VR_ANTI_RANGE;
7508 *vr0min = min;
7509 *vr0max = max;
7510 }
7511 else
7512 *vr0max = vr1max;
7513 }
7514 else
7515 *vr0max = vr1max;
7516 }
7517 else
7518 {
7519 /* If the result can be an anti-range, create one. */
7520 if (TREE_CODE (vr1max) == INTEGER_CST
7521 && TREE_CODE (*vr0min) == INTEGER_CST
7522 && vrp_val_is_min (vr1min)
7523 && vrp_val_is_max (*vr0max))
7524 {
7525 tree min = int_const_binop (PLUS_EXPR,
7526 vr1max, integer_one_node);
7527 tree max = int_const_binop (MINUS_EXPR,
7528 *vr0min, integer_one_node);
7529 if (!operand_less_p (max, min))
7530 {
7531 *vr0type = VR_ANTI_RANGE;
7532 *vr0min = min;
7533 *vr0max = max;
7534 }
7535 else
7536 *vr0min = vr1min;
7537 }
7538 else
7539 *vr0min = vr1min;
7540 }
7541 }
7542 else
7543 gcc_unreachable ();
7544 }
7545 else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1)
7546 && (mineq || operand_less_p (*vr0min, vr1min) == 1))
7547 {
7548 /* [ ( ) ] or [( ) ] or [ ( )] */
7549 if (*vr0type == VR_RANGE
7550 && vr1type == VR_RANGE)
7551 ;
7552 else if (*vr0type == VR_ANTI_RANGE
7553 && vr1type == VR_ANTI_RANGE)
7554 {
7555 *vr0type = vr1type;
7556 *vr0min = vr1min;
7557 *vr0max = vr1max;
7558 }
7559 else if (*vr0type == VR_ANTI_RANGE
7560 && vr1type == VR_RANGE)
7561 {
7562 /* Arbitrarily choose the right or left gap. */
7563 if (!mineq && TREE_CODE (vr1min) == INTEGER_CST)
7564 *vr0max = int_const_binop (MINUS_EXPR, vr1min, integer_one_node);
7565 else if (!maxeq && TREE_CODE (vr1max) == INTEGER_CST)
7566 *vr0min = int_const_binop (PLUS_EXPR, vr1max, integer_one_node);
7567 else
7568 goto give_up;
7569 }
7570 else if (*vr0type == VR_RANGE
7571 && vr1type == VR_ANTI_RANGE)
7572 /* The result covers everything. */
7573 goto give_up;
7574 else
7575 gcc_unreachable ();
7576 }
7577 else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1)
7578 && (mineq || operand_less_p (vr1min, *vr0min) == 1))
7579 {
7580 /* ( [ ] ) or ([ ] ) or ( [ ]) */
7581 if (*vr0type == VR_RANGE
7582 && vr1type == VR_RANGE)
7583 {
7584 *vr0type = vr1type;
7585 *vr0min = vr1min;
7586 *vr0max = vr1max;
7587 }
7588 else if (*vr0type == VR_ANTI_RANGE
7589 && vr1type == VR_ANTI_RANGE)
7590 ;
7591 else if (*vr0type == VR_RANGE
7592 && vr1type == VR_ANTI_RANGE)
7593 {
7594 *vr0type = VR_ANTI_RANGE;
7595 if (!mineq && TREE_CODE (*vr0min) == INTEGER_CST)
7596 {
7597 *vr0max = int_const_binop (MINUS_EXPR, *vr0min, integer_one_node);
7598 *vr0min = vr1min;
7599 }
7600 else if (!maxeq && TREE_CODE (*vr0max) == INTEGER_CST)
7601 {
7602 *vr0min = int_const_binop (PLUS_EXPR, *vr0max, integer_one_node);
7603 *vr0max = vr1max;
7604 }
7605 else
7606 goto give_up;
7607 }
7608 else if (*vr0type == VR_ANTI_RANGE
7609 && vr1type == VR_RANGE)
7610 /* The result covers everything. */
7611 goto give_up;
7612 else
7613 gcc_unreachable ();
7614 }
7615 else if ((operand_less_p (vr1min, *vr0max) == 1
7616 || operand_equal_p (vr1min, *vr0max, 0))
7617 && operand_less_p (*vr0min, vr1min) == 1)
7618 {
7619 /* [ ( ] ) or [ ]( ) */
7620 if (*vr0type == VR_RANGE
7621 && vr1type == VR_RANGE)
7622 *vr0max = vr1max;
7623 else if (*vr0type == VR_ANTI_RANGE
7624 && vr1type == VR_ANTI_RANGE)
7625 *vr0min = vr1min;
7626 else if (*vr0type == VR_ANTI_RANGE
7627 && vr1type == VR_RANGE)
7628 {
7629 if (TREE_CODE (vr1min) == INTEGER_CST)
7630 *vr0max = int_const_binop (MINUS_EXPR, vr1min, integer_one_node);
7631 else
7632 goto give_up;
7633 }
7634 else if (*vr0type == VR_RANGE
7635 && vr1type == VR_ANTI_RANGE)
7636 {
7637 if (TREE_CODE (*vr0max) == INTEGER_CST)
7638 {
7639 *vr0type = vr1type;
7640 *vr0min = int_const_binop (PLUS_EXPR, *vr0max, integer_one_node);
7641 *vr0max = vr1max;
7642 }
7643 else
7644 goto give_up;
7645 }
7646 else
7647 gcc_unreachable ();
7648 }
7649 else if ((operand_less_p (*vr0min, vr1max) == 1
7650 || operand_equal_p (*vr0min, vr1max, 0))
7651 && operand_less_p (vr1min, *vr0min) == 1)
7652 {
7653 /* ( [ ) ] or ( )[ ] */
7654 if (*vr0type == VR_RANGE
7655 && vr1type == VR_RANGE)
7656 *vr0min = vr1min;
7657 else if (*vr0type == VR_ANTI_RANGE
7658 && vr1type == VR_ANTI_RANGE)
7659 *vr0max = vr1max;
7660 else if (*vr0type == VR_ANTI_RANGE
7661 && vr1type == VR_RANGE)
7662 {
7663 if (TREE_CODE (vr1max) == INTEGER_CST)
7664 *vr0min = int_const_binop (PLUS_EXPR, vr1max, integer_one_node);
7665 else
7666 goto give_up;
7667 }
7668 else if (*vr0type == VR_RANGE
7669 && vr1type == VR_ANTI_RANGE)
7670 {
7671 if (TREE_CODE (*vr0min) == INTEGER_CST)
7672 {
7673 *vr0type = vr1type;
7674 *vr0min = vr1min;
7675 *vr0max = int_const_binop (MINUS_EXPR, *vr0min, integer_one_node);
7676 }
7677 else
7678 goto give_up;
7679 }
7680 else
7681 gcc_unreachable ();
7682 }
7683 else
7684 goto give_up;
7685
7686 return;
7687
7688 give_up:
7689 *vr0type = VR_VARYING;
7690 *vr0min = NULL_TREE;
7691 *vr0max = NULL_TREE;
7692 }
7693
7694 /* Intersect the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
7695 { VR1TYPE, VR0MIN, VR0MAX } and store the result
7696 in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest
7697 possible such range. The resulting range is not canonicalized. */
7698
7699 static void
7700 intersect_ranges (enum value_range_type *vr0type,
7701 tree *vr0min, tree *vr0max,
7702 enum value_range_type vr1type,
7703 tree vr1min, tree vr1max)
7704 {
7705 bool mineq = operand_equal_p (*vr0min, vr1min, 0);
7706 bool maxeq = operand_equal_p (*vr0max, vr1max, 0);
7707
7708 /* [] is vr0, () is vr1 in the following classification comments. */
7709 if (mineq && maxeq)
7710 {
7711 /* [( )] */
7712 if (*vr0type == vr1type)
7713 /* Nothing to do for equal ranges. */
7714 ;
7715 else if ((*vr0type == VR_RANGE
7716 && vr1type == VR_ANTI_RANGE)
7717 || (*vr0type == VR_ANTI_RANGE
7718 && vr1type == VR_RANGE))
7719 {
7720 /* For anti-range with range intersection the result is empty. */
7721 *vr0type = VR_UNDEFINED;
7722 *vr0min = NULL_TREE;
7723 *vr0max = NULL_TREE;
7724 }
7725 else
7726 gcc_unreachable ();
7727 }
7728 else if (operand_less_p (*vr0max, vr1min) == 1
7729 || operand_less_p (vr1max, *vr0min) == 1)
7730 {
7731 /* [ ] ( ) or ( ) [ ]
7732 If the ranges have an empty intersection, the result of the
7733 intersect operation is the range for intersecting an
7734 anti-range with a range or empty when intersecting two ranges. */
7735 if (*vr0type == VR_RANGE
7736 && vr1type == VR_ANTI_RANGE)
7737 ;
7738 else if (*vr0type == VR_ANTI_RANGE
7739 && vr1type == VR_RANGE)
7740 {
7741 *vr0type = vr1type;
7742 *vr0min = vr1min;
7743 *vr0max = vr1max;
7744 }
7745 else if (*vr0type == VR_RANGE
7746 && vr1type == VR_RANGE)
7747 {
7748 *vr0type = VR_UNDEFINED;
7749 *vr0min = NULL_TREE;
7750 *vr0max = NULL_TREE;
7751 }
7752 else if (*vr0type == VR_ANTI_RANGE
7753 && vr1type == VR_ANTI_RANGE)
7754 {
7755 /* If the anti-ranges are adjacent to each other merge them. */
7756 if (TREE_CODE (*vr0max) == INTEGER_CST
7757 && TREE_CODE (vr1min) == INTEGER_CST
7758 && operand_less_p (*vr0max, vr1min) == 1
7759 && integer_onep (int_const_binop (MINUS_EXPR,
7760 vr1min, *vr0max)))
7761 *vr0max = vr1max;
7762 else if (TREE_CODE (vr1max) == INTEGER_CST
7763 && TREE_CODE (*vr0min) == INTEGER_CST
7764 && operand_less_p (vr1max, *vr0min) == 1
7765 && integer_onep (int_const_binop (MINUS_EXPR,
7766 *vr0min, vr1max)))
7767 *vr0min = vr1min;
7768 /* Else arbitrarily take VR0. */
7769 }
7770 }
7771 else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1)
7772 && (mineq || operand_less_p (*vr0min, vr1min) == 1))
7773 {
7774 /* [ ( ) ] or [( ) ] or [ ( )] */
7775 if (*vr0type == VR_RANGE
7776 && vr1type == VR_RANGE)
7777 {
7778 /* If both are ranges the result is the inner one. */
7779 *vr0type = vr1type;
7780 *vr0min = vr1min;
7781 *vr0max = vr1max;
7782 }
7783 else if (*vr0type == VR_RANGE
7784 && vr1type == VR_ANTI_RANGE)
7785 {
7786 /* Choose the right gap if the left one is empty. */
7787 if (mineq)
7788 {
7789 if (TREE_CODE (vr1max) == INTEGER_CST)
7790 *vr0min = int_const_binop (PLUS_EXPR, vr1max, integer_one_node);
7791 else
7792 *vr0min = vr1max;
7793 }
7794 /* Choose the left gap if the right one is empty. */
7795 else if (maxeq)
7796 {
7797 if (TREE_CODE (vr1min) == INTEGER_CST)
7798 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
7799 integer_one_node);
7800 else
7801 *vr0max = vr1min;
7802 }
7803 /* Choose the anti-range if the range is effectively varying. */
7804 else if (vrp_val_is_min (*vr0min)
7805 && vrp_val_is_max (*vr0max))
7806 {
7807 *vr0type = vr1type;
7808 *vr0min = vr1min;
7809 *vr0max = vr1max;
7810 }
7811 /* Else choose the range. */
7812 }
7813 else if (*vr0type == VR_ANTI_RANGE
7814 && vr1type == VR_ANTI_RANGE)
7815 /* If both are anti-ranges the result is the outer one. */
7816 ;
7817 else if (*vr0type == VR_ANTI_RANGE
7818 && vr1type == VR_RANGE)
7819 {
7820 /* The intersection is empty. */
7821 *vr0type = VR_UNDEFINED;
7822 *vr0min = NULL_TREE;
7823 *vr0max = NULL_TREE;
7824 }
7825 else
7826 gcc_unreachable ();
7827 }
7828 else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1)
7829 && (mineq || operand_less_p (vr1min, *vr0min) == 1))
7830 {
7831 /* ( [ ] ) or ([ ] ) or ( [ ]) */
7832 if (*vr0type == VR_RANGE
7833 && vr1type == VR_RANGE)
7834 /* Choose the inner range. */
7835 ;
7836 else if (*vr0type == VR_ANTI_RANGE
7837 && vr1type == VR_RANGE)
7838 {
7839 /* Choose the right gap if the left is empty. */
7840 if (mineq)
7841 {
7842 *vr0type = VR_RANGE;
7843 if (TREE_CODE (*vr0max) == INTEGER_CST)
7844 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
7845 integer_one_node);
7846 else
7847 *vr0min = *vr0max;
7848 *vr0max = vr1max;
7849 }
7850 /* Choose the left gap if the right is empty. */
7851 else if (maxeq)
7852 {
7853 *vr0type = VR_RANGE;
7854 if (TREE_CODE (*vr0min) == INTEGER_CST)
7855 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
7856 integer_one_node);
7857 else
7858 *vr0max = *vr0min;
7859 *vr0min = vr1min;
7860 }
7861 /* Choose the anti-range if the range is effectively varying. */
7862 else if (vrp_val_is_min (vr1min)
7863 && vrp_val_is_max (vr1max))
7864 ;
7865 /* Else choose the range. */
7866 else
7867 {
7868 *vr0type = vr1type;
7869 *vr0min = vr1min;
7870 *vr0max = vr1max;
7871 }
7872 }
7873 else if (*vr0type == VR_ANTI_RANGE
7874 && vr1type == VR_ANTI_RANGE)
7875 {
7876 /* If both are anti-ranges the result is the outer one. */
7877 *vr0type = vr1type;
7878 *vr0min = vr1min;
7879 *vr0max = vr1max;
7880 }
7881 else if (vr1type == VR_ANTI_RANGE
7882 && *vr0type == VR_RANGE)
7883 {
7884 /* The intersection is empty. */
7885 *vr0type = VR_UNDEFINED;
7886 *vr0min = NULL_TREE;
7887 *vr0max = NULL_TREE;
7888 }
7889 else
7890 gcc_unreachable ();
7891 }
7892 else if ((operand_less_p (vr1min, *vr0max) == 1
7893 || operand_equal_p (vr1min, *vr0max, 0))
7894 && operand_less_p (*vr0min, vr1min) == 1)
7895 {
7896 /* [ ( ] ) or [ ]( ) */
7897 if (*vr0type == VR_ANTI_RANGE
7898 && vr1type == VR_ANTI_RANGE)
7899 *vr0max = vr1max;
7900 else if (*vr0type == VR_RANGE
7901 && vr1type == VR_RANGE)
7902 *vr0min = vr1min;
7903 else if (*vr0type == VR_RANGE
7904 && vr1type == VR_ANTI_RANGE)
7905 {
7906 if (TREE_CODE (vr1min) == INTEGER_CST)
7907 *vr0max = int_const_binop (MINUS_EXPR, vr1min,
7908 integer_one_node);
7909 else
7910 *vr0max = vr1min;
7911 }
7912 else if (*vr0type == VR_ANTI_RANGE
7913 && vr1type == VR_RANGE)
7914 {
7915 *vr0type = VR_RANGE;
7916 if (TREE_CODE (*vr0max) == INTEGER_CST)
7917 *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
7918 integer_one_node);
7919 else
7920 *vr0min = *vr0max;
7921 *vr0max = vr1max;
7922 }
7923 else
7924 gcc_unreachable ();
7925 }
7926 else if ((operand_less_p (*vr0min, vr1max) == 1
7927 || operand_equal_p (*vr0min, vr1max, 0))
7928 && operand_less_p (vr1min, *vr0min) == 1)
7929 {
7930 /* ( [ ) ] or ( )[ ] */
7931 if (*vr0type == VR_ANTI_RANGE
7932 && vr1type == VR_ANTI_RANGE)
7933 *vr0min = vr1min;
7934 else if (*vr0type == VR_RANGE
7935 && vr1type == VR_RANGE)
7936 *vr0max = vr1max;
7937 else if (*vr0type == VR_RANGE
7938 && vr1type == VR_ANTI_RANGE)
7939 {
7940 if (TREE_CODE (vr1max) == INTEGER_CST)
7941 *vr0min = int_const_binop (PLUS_EXPR, vr1max,
7942 integer_one_node);
7943 else
7944 *vr0min = vr1max;
7945 }
7946 else if (*vr0type == VR_ANTI_RANGE
7947 && vr1type == VR_RANGE)
7948 {
7949 *vr0type = VR_RANGE;
7950 if (TREE_CODE (*vr0min) == INTEGER_CST)
7951 *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
7952 integer_one_node);
7953 else
7954 *vr0max = *vr0min;
7955 *vr0min = vr1min;
7956 }
7957 else
7958 gcc_unreachable ();
7959 }
7960
7961 /* As a fallback simply use { *VRTYPE, *VR0MIN, *VR0MAX } as
7962 result for the intersection. That's always a conservative
7963 correct estimate. */
7964
7965 return;
7966 }
7967
7968
7969 /* Intersect the two value-ranges *VR0 and *VR1 and store the result
7970 in *VR0. This may not be the smallest possible such range. */
7971
7972 static void
7973 vrp_intersect_ranges_1 (value_range_t *vr0, value_range_t *vr1)
7974 {
7975 value_range_t saved;
7976
7977 /* If either range is VR_VARYING the other one wins. */
7978 if (vr1->type == VR_VARYING)
7979 return;
7980 if (vr0->type == VR_VARYING)
7981 {
7982 copy_value_range (vr0, vr1);
7983 return;
7984 }
7985
7986 /* When either range is VR_UNDEFINED the resulting range is
7987 VR_UNDEFINED, too. */
7988 if (vr0->type == VR_UNDEFINED)
7989 return;
7990 if (vr1->type == VR_UNDEFINED)
7991 {
7992 set_value_range_to_undefined (vr0);
7993 return;
7994 }
7995
7996 /* Save the original vr0 so we can return it as conservative intersection
7997 result when our worker turns things to varying. */
7998 saved = *vr0;
7999 intersect_ranges (&vr0->type, &vr0->min, &vr0->max,
8000 vr1->type, vr1->min, vr1->max);
8001 /* Make sure to canonicalize the result though as the inversion of a
8002 VR_RANGE can still be a VR_RANGE. */
8003 set_and_canonicalize_value_range (vr0, vr0->type,
8004 vr0->min, vr0->max, vr0->equiv);
8005 /* If that failed, use the saved original VR0. */
8006 if (vr0->type == VR_VARYING)
8007 {
8008 *vr0 = saved;
8009 return;
8010 }
8011 /* If the result is VR_UNDEFINED there is no need to mess with
8012 the equivalencies. */
8013 if (vr0->type == VR_UNDEFINED)
8014 return;
8015
8016 /* The resulting set of equivalences for range intersection is the union of
8017 the two sets. */
8018 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
8019 bitmap_ior_into (vr0->equiv, vr1->equiv);
8020 else if (vr1->equiv && !vr0->equiv)
8021 bitmap_copy (vr0->equiv, vr1->equiv);
8022 }
8023
8024 static void
8025 vrp_intersect_ranges (value_range_t *vr0, value_range_t *vr1)
8026 {
8027 if (dump_file && (dump_flags & TDF_DETAILS))
8028 {
8029 fprintf (dump_file, "Intersecting\n ");
8030 dump_value_range (dump_file, vr0);
8031 fprintf (dump_file, "\nand\n ");
8032 dump_value_range (dump_file, vr1);
8033 fprintf (dump_file, "\n");
8034 }
8035 vrp_intersect_ranges_1 (vr0, vr1);
8036 if (dump_file && (dump_flags & TDF_DETAILS))
8037 {
8038 fprintf (dump_file, "to\n ");
8039 dump_value_range (dump_file, vr0);
8040 fprintf (dump_file, "\n");
8041 }
8042 }
8043
8044 /* Meet operation for value ranges. Given two value ranges VR0 and
8045 VR1, store in VR0 a range that contains both VR0 and VR1. This
8046 may not be the smallest possible such range. */
8047
8048 static void
8049 vrp_meet_1 (value_range_t *vr0, value_range_t *vr1)
8050 {
8051 value_range_t saved;
8052
8053 if (vr0->type == VR_UNDEFINED)
8054 {
8055 set_value_range (vr0, vr1->type, vr1->min, vr1->max, vr1->equiv);
8056 return;
8057 }
8058
8059 if (vr1->type == VR_UNDEFINED)
8060 {
8061 /* VR0 already has the resulting range. */
8062 return;
8063 }
8064
8065 if (vr0->type == VR_VARYING)
8066 {
8067 /* Nothing to do. VR0 already has the resulting range. */
8068 return;
8069 }
8070
8071 if (vr1->type == VR_VARYING)
8072 {
8073 set_value_range_to_varying (vr0);
8074 return;
8075 }
8076
8077 saved = *vr0;
8078 union_ranges (&vr0->type, &vr0->min, &vr0->max,
8079 vr1->type, vr1->min, vr1->max);
8080 if (vr0->type == VR_VARYING)
8081 {
8082 /* Failed to find an efficient meet. Before giving up and setting
8083 the result to VARYING, see if we can at least derive a useful
8084 anti-range. FIXME, all this nonsense about distinguishing
8085 anti-ranges from ranges is necessary because of the odd
8086 semantics of range_includes_zero_p and friends. */
8087 if (((saved.type == VR_RANGE
8088 && range_includes_zero_p (saved.min, saved.max) == 0)
8089 || (saved.type == VR_ANTI_RANGE
8090 && range_includes_zero_p (saved.min, saved.max) == 1))
8091 && ((vr1->type == VR_RANGE
8092 && range_includes_zero_p (vr1->min, vr1->max) == 0)
8093 || (vr1->type == VR_ANTI_RANGE
8094 && range_includes_zero_p (vr1->min, vr1->max) == 1)))
8095 {
8096 set_value_range_to_nonnull (vr0, TREE_TYPE (saved.min));
8097
8098 /* Since this meet operation did not result from the meeting of
8099 two equivalent names, VR0 cannot have any equivalences. */
8100 if (vr0->equiv)
8101 bitmap_clear (vr0->equiv);
8102 return;
8103 }
8104
8105 set_value_range_to_varying (vr0);
8106 return;
8107 }
8108 set_and_canonicalize_value_range (vr0, vr0->type, vr0->min, vr0->max,
8109 vr0->equiv);
8110 if (vr0->type == VR_VARYING)
8111 return;
8112
8113 /* The resulting set of equivalences is always the intersection of
8114 the two sets. */
8115 if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
8116 bitmap_and_into (vr0->equiv, vr1->equiv);
8117 else if (vr0->equiv && !vr1->equiv)
8118 bitmap_clear (vr0->equiv);
8119 }
8120
8121 static void
8122 vrp_meet (value_range_t *vr0, value_range_t *vr1)
8123 {
8124 if (dump_file && (dump_flags & TDF_DETAILS))
8125 {
8126 fprintf (dump_file, "Meeting\n ");
8127 dump_value_range (dump_file, vr0);
8128 fprintf (dump_file, "\nand\n ");
8129 dump_value_range (dump_file, vr1);
8130 fprintf (dump_file, "\n");
8131 }
8132 vrp_meet_1 (vr0, vr1);
8133 if (dump_file && (dump_flags & TDF_DETAILS))
8134 {
8135 fprintf (dump_file, "to\n ");
8136 dump_value_range (dump_file, vr0);
8137 fprintf (dump_file, "\n");
8138 }
8139 }
8140
8141
8142 /* Visit all arguments for PHI node PHI that flow through executable
8143 edges. If a valid value range can be derived from all the incoming
8144 value ranges, set a new range for the LHS of PHI. */
8145
8146 static enum ssa_prop_result
8147 vrp_visit_phi_node (gimple phi)
8148 {
8149 size_t i;
8150 tree lhs = PHI_RESULT (phi);
8151 value_range_t *lhs_vr = get_value_range (lhs);
8152 value_range_t vr_result = VR_INITIALIZER;
8153 bool first = true;
8154 int edges, old_edges;
8155 struct loop *l;
8156
8157 if (dump_file && (dump_flags & TDF_DETAILS))
8158 {
8159 fprintf (dump_file, "\nVisiting PHI node: ");
8160 print_gimple_stmt (dump_file, phi, 0, dump_flags);
8161 }
8162
8163 edges = 0;
8164 for (i = 0; i < gimple_phi_num_args (phi); i++)
8165 {
8166 edge e = gimple_phi_arg_edge (phi, i);
8167
8168 if (dump_file && (dump_flags & TDF_DETAILS))
8169 {
8170 fprintf (dump_file,
8171 "\n Argument #%d (%d -> %d %sexecutable)\n",
8172 (int) i, e->src->index, e->dest->index,
8173 (e->flags & EDGE_EXECUTABLE) ? "" : "not ");
8174 }
8175
8176 if (e->flags & EDGE_EXECUTABLE)
8177 {
8178 tree arg = PHI_ARG_DEF (phi, i);
8179 value_range_t vr_arg;
8180
8181 ++edges;
8182
8183 if (TREE_CODE (arg) == SSA_NAME)
8184 {
8185 vr_arg = *(get_value_range (arg));
8186 /* Do not allow equivalences or symbolic ranges to leak in from
8187 backedges. That creates invalid equivalencies.
8188 See PR53465 and PR54767. */
8189 if (e->flags & EDGE_DFS_BACK
8190 && (vr_arg.type == VR_RANGE
8191 || vr_arg.type == VR_ANTI_RANGE))
8192 {
8193 vr_arg.equiv = NULL;
8194 if (symbolic_range_p (&vr_arg))
8195 {
8196 vr_arg.type = VR_VARYING;
8197 vr_arg.min = NULL_TREE;
8198 vr_arg.max = NULL_TREE;
8199 }
8200 }
8201 }
8202 else
8203 {
8204 if (is_overflow_infinity (arg))
8205 {
8206 arg = copy_node (arg);
8207 TREE_OVERFLOW (arg) = 0;
8208 }
8209
8210 vr_arg.type = VR_RANGE;
8211 vr_arg.min = arg;
8212 vr_arg.max = arg;
8213 vr_arg.equiv = NULL;
8214 }
8215
8216 if (dump_file && (dump_flags & TDF_DETAILS))
8217 {
8218 fprintf (dump_file, "\t");
8219 print_generic_expr (dump_file, arg, dump_flags);
8220 fprintf (dump_file, "\n\tValue: ");
8221 dump_value_range (dump_file, &vr_arg);
8222 fprintf (dump_file, "\n");
8223 }
8224
8225 if (first)
8226 copy_value_range (&vr_result, &vr_arg);
8227 else
8228 vrp_meet (&vr_result, &vr_arg);
8229 first = false;
8230
8231 if (vr_result.type == VR_VARYING)
8232 break;
8233 }
8234 }
8235
8236 if (vr_result.type == VR_VARYING)
8237 goto varying;
8238 else if (vr_result.type == VR_UNDEFINED)
8239 goto update_range;
8240
8241 old_edges = vr_phi_edge_counts[SSA_NAME_VERSION (lhs)];
8242 vr_phi_edge_counts[SSA_NAME_VERSION (lhs)] = edges;
8243
8244 /* To prevent infinite iterations in the algorithm, derive ranges
8245 when the new value is slightly bigger or smaller than the
8246 previous one. We don't do this if we have seen a new executable
8247 edge; this helps us avoid an overflow infinity for conditionals
8248 which are not in a loop. If the old value-range was VR_UNDEFINED
8249 use the updated range and iterate one more time. */
8250 if (edges > 0
8251 && gimple_phi_num_args (phi) > 1
8252 && edges == old_edges
8253 && lhs_vr->type != VR_UNDEFINED)
8254 {
8255 int cmp_min = compare_values (lhs_vr->min, vr_result.min);
8256 int cmp_max = compare_values (lhs_vr->max, vr_result.max);
8257
8258 /* For non VR_RANGE or for pointers fall back to varying if
8259 the range changed. */
8260 if ((lhs_vr->type != VR_RANGE || vr_result.type != VR_RANGE
8261 || POINTER_TYPE_P (TREE_TYPE (lhs)))
8262 && (cmp_min != 0 || cmp_max != 0))
8263 goto varying;
8264
8265 /* If the new minimum is smaller or larger than the previous
8266 one, go all the way to -INF. In the first case, to avoid
8267 iterating millions of times to reach -INF, and in the
8268 other case to avoid infinite bouncing between different
8269 minimums. */
8270 if (cmp_min > 0 || cmp_min < 0)
8271 {
8272 if (!needs_overflow_infinity (TREE_TYPE (vr_result.min))
8273 || !vrp_var_may_overflow (lhs, phi))
8274 vr_result.min = TYPE_MIN_VALUE (TREE_TYPE (vr_result.min));
8275 else if (supports_overflow_infinity (TREE_TYPE (vr_result.min)))
8276 vr_result.min =
8277 negative_overflow_infinity (TREE_TYPE (vr_result.min));
8278 }
8279
8280 /* Similarly, if the new maximum is smaller or larger than
8281 the previous one, go all the way to +INF. */
8282 if (cmp_max < 0 || cmp_max > 0)
8283 {
8284 if (!needs_overflow_infinity (TREE_TYPE (vr_result.max))
8285 || !vrp_var_may_overflow (lhs, phi))
8286 vr_result.max = TYPE_MAX_VALUE (TREE_TYPE (vr_result.max));
8287 else if (supports_overflow_infinity (TREE_TYPE (vr_result.max)))
8288 vr_result.max =
8289 positive_overflow_infinity (TREE_TYPE (vr_result.max));
8290 }
8291
8292 /* If we dropped either bound to +-INF then if this is a loop
8293 PHI node SCEV may known more about its value-range. */
8294 if ((cmp_min > 0 || cmp_min < 0
8295 || cmp_max < 0 || cmp_max > 0)
8296 && current_loops
8297 && (l = loop_containing_stmt (phi))
8298 && l->header == gimple_bb (phi))
8299 adjust_range_with_scev (&vr_result, l, phi, lhs);
8300
8301 /* If we will end up with a (-INF, +INF) range, set it to
8302 VARYING. Same if the previous max value was invalid for
8303 the type and we end up with vr_result.min > vr_result.max. */
8304 if ((vrp_val_is_max (vr_result.max)
8305 && vrp_val_is_min (vr_result.min))
8306 || compare_values (vr_result.min,
8307 vr_result.max) > 0)
8308 goto varying;
8309 }
8310
8311 /* If the new range is different than the previous value, keep
8312 iterating. */
8313 update_range:
8314 if (update_value_range (lhs, &vr_result))
8315 {
8316 if (dump_file && (dump_flags & TDF_DETAILS))
8317 {
8318 fprintf (dump_file, "Found new range for ");
8319 print_generic_expr (dump_file, lhs, 0);
8320 fprintf (dump_file, ": ");
8321 dump_value_range (dump_file, &vr_result);
8322 fprintf (dump_file, "\n\n");
8323 }
8324
8325 return SSA_PROP_INTERESTING;
8326 }
8327
8328 /* Nothing changed, don't add outgoing edges. */
8329 return SSA_PROP_NOT_INTERESTING;
8330
8331 /* No match found. Set the LHS to VARYING. */
8332 varying:
8333 set_value_range_to_varying (lhs_vr);
8334 return SSA_PROP_VARYING;
8335 }
8336
8337 /* Simplify boolean operations if the source is known
8338 to be already a boolean. */
8339 static bool
8340 simplify_truth_ops_using_ranges (gimple_stmt_iterator *gsi, gimple stmt)
8341 {
8342 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
8343 tree lhs, op0, op1;
8344 bool need_conversion;
8345
8346 /* We handle only !=/== case here. */
8347 gcc_assert (rhs_code == EQ_EXPR || rhs_code == NE_EXPR);
8348
8349 op0 = gimple_assign_rhs1 (stmt);
8350 if (!op_with_boolean_value_range_p (op0))
8351 return false;
8352
8353 op1 = gimple_assign_rhs2 (stmt);
8354 if (!op_with_boolean_value_range_p (op1))
8355 return false;
8356
8357 /* Reduce number of cases to handle to NE_EXPR. As there is no
8358 BIT_XNOR_EXPR we cannot replace A == B with a single statement. */
8359 if (rhs_code == EQ_EXPR)
8360 {
8361 if (TREE_CODE (op1) == INTEGER_CST)
8362 op1 = int_const_binop (BIT_XOR_EXPR, op1, integer_one_node);
8363 else
8364 return false;
8365 }
8366
8367 lhs = gimple_assign_lhs (stmt);
8368 need_conversion
8369 = !useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (op0));
8370
8371 /* Make sure to not sign-extend a 1-bit 1 when converting the result. */
8372 if (need_conversion
8373 && !TYPE_UNSIGNED (TREE_TYPE (op0))
8374 && TYPE_PRECISION (TREE_TYPE (op0)) == 1
8375 && TYPE_PRECISION (TREE_TYPE (lhs)) > 1)
8376 return false;
8377
8378 /* For A != 0 we can substitute A itself. */
8379 if (integer_zerop (op1))
8380 gimple_assign_set_rhs_with_ops (gsi,
8381 need_conversion
8382 ? NOP_EXPR : TREE_CODE (op0),
8383 op0, NULL_TREE);
8384 /* For A != B we substitute A ^ B. Either with conversion. */
8385 else if (need_conversion)
8386 {
8387 tree tem = make_ssa_name (TREE_TYPE (op0), NULL);
8388 gimple newop = gimple_build_assign_with_ops (BIT_XOR_EXPR, tem, op0, op1);
8389 gsi_insert_before (gsi, newop, GSI_SAME_STMT);
8390 gimple_assign_set_rhs_with_ops (gsi, NOP_EXPR, tem, NULL_TREE);
8391 }
8392 /* Or without. */
8393 else
8394 gimple_assign_set_rhs_with_ops (gsi, BIT_XOR_EXPR, op0, op1);
8395 update_stmt (gsi_stmt (*gsi));
8396
8397 return true;
8398 }
8399
8400 /* Simplify a division or modulo operator to a right shift or
8401 bitwise and if the first operand is unsigned or is greater
8402 than zero and the second operand is an exact power of two. */
8403
8404 static bool
8405 simplify_div_or_mod_using_ranges (gimple stmt)
8406 {
8407 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
8408 tree val = NULL;
8409 tree op0 = gimple_assign_rhs1 (stmt);
8410 tree op1 = gimple_assign_rhs2 (stmt);
8411 value_range_t *vr = get_value_range (gimple_assign_rhs1 (stmt));
8412
8413 if (TYPE_UNSIGNED (TREE_TYPE (op0)))
8414 {
8415 val = integer_one_node;
8416 }
8417 else
8418 {
8419 bool sop = false;
8420
8421 val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, &sop);
8422
8423 if (val
8424 && sop
8425 && integer_onep (val)
8426 && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
8427 {
8428 location_t location;
8429
8430 if (!gimple_has_location (stmt))
8431 location = input_location;
8432 else
8433 location = gimple_location (stmt);
8434 warning_at (location, OPT_Wstrict_overflow,
8435 "assuming signed overflow does not occur when "
8436 "simplifying %</%> or %<%%%> to %<>>%> or %<&%>");
8437 }
8438 }
8439
8440 if (val && integer_onep (val))
8441 {
8442 tree t;
8443
8444 if (rhs_code == TRUNC_DIV_EXPR)
8445 {
8446 t = build_int_cst (integer_type_node, tree_log2 (op1));
8447 gimple_assign_set_rhs_code (stmt, RSHIFT_EXPR);
8448 gimple_assign_set_rhs1 (stmt, op0);
8449 gimple_assign_set_rhs2 (stmt, t);
8450 }
8451 else
8452 {
8453 t = build_int_cst (TREE_TYPE (op1), 1);
8454 t = int_const_binop (MINUS_EXPR, op1, t);
8455 t = fold_convert (TREE_TYPE (op0), t);
8456
8457 gimple_assign_set_rhs_code (stmt, BIT_AND_EXPR);
8458 gimple_assign_set_rhs1 (stmt, op0);
8459 gimple_assign_set_rhs2 (stmt, t);
8460 }
8461
8462 update_stmt (stmt);
8463 return true;
8464 }
8465
8466 return false;
8467 }
8468
8469 /* If the operand to an ABS_EXPR is >= 0, then eliminate the
8470 ABS_EXPR. If the operand is <= 0, then simplify the
8471 ABS_EXPR into a NEGATE_EXPR. */
8472
8473 static bool
8474 simplify_abs_using_ranges (gimple stmt)
8475 {
8476 tree val = NULL;
8477 tree op = gimple_assign_rhs1 (stmt);
8478 tree type = TREE_TYPE (op);
8479 value_range_t *vr = get_value_range (op);
8480
8481 if (TYPE_UNSIGNED (type))
8482 {
8483 val = integer_zero_node;
8484 }
8485 else if (vr)
8486 {
8487 bool sop = false;
8488
8489 val = compare_range_with_value (LE_EXPR, vr, integer_zero_node, &sop);
8490 if (!val)
8491 {
8492 sop = false;
8493 val = compare_range_with_value (GE_EXPR, vr, integer_zero_node,
8494 &sop);
8495
8496 if (val)
8497 {
8498 if (integer_zerop (val))
8499 val = integer_one_node;
8500 else if (integer_onep (val))
8501 val = integer_zero_node;
8502 }
8503 }
8504
8505 if (val
8506 && (integer_onep (val) || integer_zerop (val)))
8507 {
8508 if (sop && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
8509 {
8510 location_t location;
8511
8512 if (!gimple_has_location (stmt))
8513 location = input_location;
8514 else
8515 location = gimple_location (stmt);
8516 warning_at (location, OPT_Wstrict_overflow,
8517 "assuming signed overflow does not occur when "
8518 "simplifying %<abs (X)%> to %<X%> or %<-X%>");
8519 }
8520
8521 gimple_assign_set_rhs1 (stmt, op);
8522 if (integer_onep (val))
8523 gimple_assign_set_rhs_code (stmt, NEGATE_EXPR);
8524 else
8525 gimple_assign_set_rhs_code (stmt, SSA_NAME);
8526 update_stmt (stmt);
8527 return true;
8528 }
8529 }
8530
8531 return false;
8532 }
8533
8534 /* Optimize away redundant BIT_AND_EXPR and BIT_IOR_EXPR.
8535 If all the bits that are being cleared by & are already
8536 known to be zero from VR, or all the bits that are being
8537 set by | are already known to be one from VR, the bit
8538 operation is redundant. */
8539
8540 static bool
8541 simplify_bit_ops_using_ranges (gimple_stmt_iterator *gsi, gimple stmt)
8542 {
8543 tree op0 = gimple_assign_rhs1 (stmt);
8544 tree op1 = gimple_assign_rhs2 (stmt);
8545 tree op = NULL_TREE;
8546 value_range_t vr0 = VR_INITIALIZER;
8547 value_range_t vr1 = VR_INITIALIZER;
8548 double_int may_be_nonzero0, may_be_nonzero1;
8549 double_int must_be_nonzero0, must_be_nonzero1;
8550 double_int mask;
8551
8552 if (TREE_CODE (op0) == SSA_NAME)
8553 vr0 = *(get_value_range (op0));
8554 else if (is_gimple_min_invariant (op0))
8555 set_value_range_to_value (&vr0, op0, NULL);
8556 else
8557 return false;
8558
8559 if (TREE_CODE (op1) == SSA_NAME)
8560 vr1 = *(get_value_range (op1));
8561 else if (is_gimple_min_invariant (op1))
8562 set_value_range_to_value (&vr1, op1, NULL);
8563 else
8564 return false;
8565
8566 if (!zero_nonzero_bits_from_vr (&vr0, &may_be_nonzero0, &must_be_nonzero0))
8567 return false;
8568 if (!zero_nonzero_bits_from_vr (&vr1, &may_be_nonzero1, &must_be_nonzero1))
8569 return false;
8570
8571 switch (gimple_assign_rhs_code (stmt))
8572 {
8573 case BIT_AND_EXPR:
8574 mask = may_be_nonzero0.and_not (must_be_nonzero1);
8575 if (mask.is_zero ())
8576 {
8577 op = op0;
8578 break;
8579 }
8580 mask = may_be_nonzero1.and_not (must_be_nonzero0);
8581 if (mask.is_zero ())
8582 {
8583 op = op1;
8584 break;
8585 }
8586 break;
8587 case BIT_IOR_EXPR:
8588 mask = may_be_nonzero0.and_not (must_be_nonzero1);
8589 if (mask.is_zero ())
8590 {
8591 op = op1;
8592 break;
8593 }
8594 mask = may_be_nonzero1.and_not (must_be_nonzero0);
8595 if (mask.is_zero ())
8596 {
8597 op = op0;
8598 break;
8599 }
8600 break;
8601 default:
8602 gcc_unreachable ();
8603 }
8604
8605 if (op == NULL_TREE)
8606 return false;
8607
8608 gimple_assign_set_rhs_with_ops (gsi, TREE_CODE (op), op, NULL);
8609 update_stmt (gsi_stmt (*gsi));
8610 return true;
8611 }
8612
8613 /* We are comparing trees OP0 and OP1 using COND_CODE. OP0 has
8614 a known value range VR.
8615
8616 If there is one and only one value which will satisfy the
8617 conditional, then return that value. Else return NULL. */
8618
8619 static tree
8620 test_for_singularity (enum tree_code cond_code, tree op0,
8621 tree op1, value_range_t *vr)
8622 {
8623 tree min = NULL;
8624 tree max = NULL;
8625
8626 /* Extract minimum/maximum values which satisfy the
8627 the conditional as it was written. */
8628 if (cond_code == LE_EXPR || cond_code == LT_EXPR)
8629 {
8630 /* This should not be negative infinity; there is no overflow
8631 here. */
8632 min = TYPE_MIN_VALUE (TREE_TYPE (op0));
8633
8634 max = op1;
8635 if (cond_code == LT_EXPR && !is_overflow_infinity (max))
8636 {
8637 tree one = build_int_cst (TREE_TYPE (op0), 1);
8638 max = fold_build2 (MINUS_EXPR, TREE_TYPE (op0), max, one);
8639 if (EXPR_P (max))
8640 TREE_NO_WARNING (max) = 1;
8641 }
8642 }
8643 else if (cond_code == GE_EXPR || cond_code == GT_EXPR)
8644 {
8645 /* This should not be positive infinity; there is no overflow
8646 here. */
8647 max = TYPE_MAX_VALUE (TREE_TYPE (op0));
8648
8649 min = op1;
8650 if (cond_code == GT_EXPR && !is_overflow_infinity (min))
8651 {
8652 tree one = build_int_cst (TREE_TYPE (op0), 1);
8653 min = fold_build2 (PLUS_EXPR, TREE_TYPE (op0), min, one);
8654 if (EXPR_P (min))
8655 TREE_NO_WARNING (min) = 1;
8656 }
8657 }
8658
8659 /* Now refine the minimum and maximum values using any
8660 value range information we have for op0. */
8661 if (min && max)
8662 {
8663 if (compare_values (vr->min, min) == 1)
8664 min = vr->min;
8665 if (compare_values (vr->max, max) == -1)
8666 max = vr->max;
8667
8668 /* If the new min/max values have converged to a single value,
8669 then there is only one value which can satisfy the condition,
8670 return that value. */
8671 if (operand_equal_p (min, max, 0) && is_gimple_min_invariant (min))
8672 return min;
8673 }
8674 return NULL;
8675 }
8676
8677 /* Return whether the value range *VR fits in an integer type specified
8678 by PRECISION and UNSIGNED_P. */
8679
8680 static bool
8681 range_fits_type_p (value_range_t *vr, unsigned precision, bool unsigned_p)
8682 {
8683 tree src_type;
8684 unsigned src_precision;
8685 double_int tem;
8686
8687 /* We can only handle integral and pointer types. */
8688 src_type = TREE_TYPE (vr->min);
8689 if (!INTEGRAL_TYPE_P (src_type)
8690 && !POINTER_TYPE_P (src_type))
8691 return false;
8692
8693 /* An extension is fine unless VR is signed and unsigned_p,
8694 and so is an identity transform. */
8695 src_precision = TYPE_PRECISION (TREE_TYPE (vr->min));
8696 if ((src_precision < precision
8697 && !(unsigned_p && !TYPE_UNSIGNED (src_type)))
8698 || (src_precision == precision
8699 && TYPE_UNSIGNED (src_type) == unsigned_p))
8700 return true;
8701
8702 /* Now we can only handle ranges with constant bounds. */
8703 if (vr->type != VR_RANGE
8704 || TREE_CODE (vr->min) != INTEGER_CST
8705 || TREE_CODE (vr->max) != INTEGER_CST)
8706 return false;
8707
8708 /* For sign changes, the MSB of the double_int has to be clear.
8709 An unsigned value with its MSB set cannot be represented by
8710 a signed double_int, while a negative value cannot be represented
8711 by an unsigned double_int. */
8712 if (TYPE_UNSIGNED (src_type) != unsigned_p
8713 && (TREE_INT_CST_HIGH (vr->min) | TREE_INT_CST_HIGH (vr->max)) < 0)
8714 return false;
8715
8716 /* Then we can perform the conversion on both ends and compare
8717 the result for equality. */
8718 tem = tree_to_double_int (vr->min).ext (precision, unsigned_p);
8719 if (tree_to_double_int (vr->min) != tem)
8720 return false;
8721 tem = tree_to_double_int (vr->max).ext (precision, unsigned_p);
8722 if (tree_to_double_int (vr->max) != tem)
8723 return false;
8724
8725 return true;
8726 }
8727
8728 /* Simplify a conditional using a relational operator to an equality
8729 test if the range information indicates only one value can satisfy
8730 the original conditional. */
8731
8732 static bool
8733 simplify_cond_using_ranges (gimple stmt)
8734 {
8735 tree op0 = gimple_cond_lhs (stmt);
8736 tree op1 = gimple_cond_rhs (stmt);
8737 enum tree_code cond_code = gimple_cond_code (stmt);
8738
8739 if (cond_code != NE_EXPR
8740 && cond_code != EQ_EXPR
8741 && TREE_CODE (op0) == SSA_NAME
8742 && INTEGRAL_TYPE_P (TREE_TYPE (op0))
8743 && is_gimple_min_invariant (op1))
8744 {
8745 value_range_t *vr = get_value_range (op0);
8746
8747 /* If we have range information for OP0, then we might be
8748 able to simplify this conditional. */
8749 if (vr->type == VR_RANGE)
8750 {
8751 tree new_tree = test_for_singularity (cond_code, op0, op1, vr);
8752
8753 if (new_tree)
8754 {
8755 if (dump_file)
8756 {
8757 fprintf (dump_file, "Simplified relational ");
8758 print_gimple_stmt (dump_file, stmt, 0, 0);
8759 fprintf (dump_file, " into ");
8760 }
8761
8762 gimple_cond_set_code (stmt, EQ_EXPR);
8763 gimple_cond_set_lhs (stmt, op0);
8764 gimple_cond_set_rhs (stmt, new_tree);
8765
8766 update_stmt (stmt);
8767
8768 if (dump_file)
8769 {
8770 print_gimple_stmt (dump_file, stmt, 0, 0);
8771 fprintf (dump_file, "\n");
8772 }
8773
8774 return true;
8775 }
8776
8777 /* Try again after inverting the condition. We only deal
8778 with integral types here, so no need to worry about
8779 issues with inverting FP comparisons. */
8780 cond_code = invert_tree_comparison (cond_code, false);
8781 new_tree = test_for_singularity (cond_code, op0, op1, vr);
8782
8783 if (new_tree)
8784 {
8785 if (dump_file)
8786 {
8787 fprintf (dump_file, "Simplified relational ");
8788 print_gimple_stmt (dump_file, stmt, 0, 0);
8789 fprintf (dump_file, " into ");
8790 }
8791
8792 gimple_cond_set_code (stmt, NE_EXPR);
8793 gimple_cond_set_lhs (stmt, op0);
8794 gimple_cond_set_rhs (stmt, new_tree);
8795
8796 update_stmt (stmt);
8797
8798 if (dump_file)
8799 {
8800 print_gimple_stmt (dump_file, stmt, 0, 0);
8801 fprintf (dump_file, "\n");
8802 }
8803
8804 return true;
8805 }
8806 }
8807 }
8808
8809 /* If we have a comparison of an SSA_NAME (OP0) against a constant,
8810 see if OP0 was set by a type conversion where the source of
8811 the conversion is another SSA_NAME with a range that fits
8812 into the range of OP0's type.
8813
8814 If so, the conversion is redundant as the earlier SSA_NAME can be
8815 used for the comparison directly if we just massage the constant in the
8816 comparison. */
8817 if (TREE_CODE (op0) == SSA_NAME
8818 && TREE_CODE (op1) == INTEGER_CST)
8819 {
8820 gimple def_stmt = SSA_NAME_DEF_STMT (op0);
8821 tree innerop;
8822
8823 if (!is_gimple_assign (def_stmt)
8824 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)))
8825 return false;
8826
8827 innerop = gimple_assign_rhs1 (def_stmt);
8828
8829 if (TREE_CODE (innerop) == SSA_NAME
8830 && !POINTER_TYPE_P (TREE_TYPE (innerop)))
8831 {
8832 value_range_t *vr = get_value_range (innerop);
8833
8834 if (range_int_cst_p (vr)
8835 && range_fits_type_p (vr,
8836 TYPE_PRECISION (TREE_TYPE (op0)),
8837 TYPE_UNSIGNED (TREE_TYPE (op0)))
8838 && int_fits_type_p (op1, TREE_TYPE (innerop))
8839 /* The range must not have overflowed, or if it did overflow
8840 we must not be wrapping/trapping overflow and optimizing
8841 with strict overflow semantics. */
8842 && ((!is_negative_overflow_infinity (vr->min)
8843 && !is_positive_overflow_infinity (vr->max))
8844 || TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (innerop))))
8845 {
8846 /* If the range overflowed and the user has asked for warnings
8847 when strict overflow semantics were used to optimize code,
8848 issue an appropriate warning. */
8849 if ((is_negative_overflow_infinity (vr->min)
8850 || is_positive_overflow_infinity (vr->max))
8851 && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_CONDITIONAL))
8852 {
8853 location_t location;
8854
8855 if (!gimple_has_location (stmt))
8856 location = input_location;
8857 else
8858 location = gimple_location (stmt);
8859 warning_at (location, OPT_Wstrict_overflow,
8860 "assuming signed overflow does not occur when "
8861 "simplifying conditional");
8862 }
8863
8864 tree newconst = fold_convert (TREE_TYPE (innerop), op1);
8865 gimple_cond_set_lhs (stmt, innerop);
8866 gimple_cond_set_rhs (stmt, newconst);
8867 return true;
8868 }
8869 }
8870 }
8871
8872 return false;
8873 }
8874
8875 /* Simplify a switch statement using the value range of the switch
8876 argument. */
8877
8878 static bool
8879 simplify_switch_using_ranges (gimple stmt)
8880 {
8881 tree op = gimple_switch_index (stmt);
8882 value_range_t *vr;
8883 bool take_default;
8884 edge e;
8885 edge_iterator ei;
8886 size_t i = 0, j = 0, n, n2;
8887 tree vec2;
8888 switch_update su;
8889 size_t k = 1, l = 0;
8890
8891 if (TREE_CODE (op) == SSA_NAME)
8892 {
8893 vr = get_value_range (op);
8894
8895 /* We can only handle integer ranges. */
8896 if ((vr->type != VR_RANGE
8897 && vr->type != VR_ANTI_RANGE)
8898 || symbolic_range_p (vr))
8899 return false;
8900
8901 /* Find case label for min/max of the value range. */
8902 take_default = !find_case_label_ranges (stmt, vr, &i, &j, &k, &l);
8903 }
8904 else if (TREE_CODE (op) == INTEGER_CST)
8905 {
8906 take_default = !find_case_label_index (stmt, 1, op, &i);
8907 if (take_default)
8908 {
8909 i = 1;
8910 j = 0;
8911 }
8912 else
8913 {
8914 j = i;
8915 }
8916 }
8917 else
8918 return false;
8919
8920 n = gimple_switch_num_labels (stmt);
8921
8922 /* Bail out if this is just all edges taken. */
8923 if (i == 1
8924 && j == n - 1
8925 && take_default)
8926 return false;
8927
8928 /* Build a new vector of taken case labels. */
8929 vec2 = make_tree_vec (j - i + 1 + l - k + 1 + (int)take_default);
8930 n2 = 0;
8931
8932 /* Add the default edge, if necessary. */
8933 if (take_default)
8934 TREE_VEC_ELT (vec2, n2++) = gimple_switch_default_label (stmt);
8935
8936 for (; i <= j; ++i, ++n2)
8937 TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, i);
8938
8939 for (; k <= l; ++k, ++n2)
8940 TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, k);
8941
8942 /* Mark needed edges. */
8943 for (i = 0; i < n2; ++i)
8944 {
8945 e = find_edge (gimple_bb (stmt),
8946 label_to_block (CASE_LABEL (TREE_VEC_ELT (vec2, i))));
8947 e->aux = (void *)-1;
8948 }
8949
8950 /* Queue not needed edges for later removal. */
8951 FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
8952 {
8953 if (e->aux == (void *)-1)
8954 {
8955 e->aux = NULL;
8956 continue;
8957 }
8958
8959 if (dump_file && (dump_flags & TDF_DETAILS))
8960 {
8961 fprintf (dump_file, "removing unreachable case label\n");
8962 }
8963 to_remove_edges.safe_push (e);
8964 e->flags &= ~EDGE_EXECUTABLE;
8965 }
8966
8967 /* And queue an update for the stmt. */
8968 su.stmt = stmt;
8969 su.vec = vec2;
8970 to_update_switch_stmts.safe_push (su);
8971 return false;
8972 }
8973
8974 /* Simplify an integral conversion from an SSA name in STMT. */
8975
8976 static bool
8977 simplify_conversion_using_ranges (gimple stmt)
8978 {
8979 tree innerop, middleop, finaltype;
8980 gimple def_stmt;
8981 value_range_t *innervr;
8982 bool inner_unsigned_p, middle_unsigned_p, final_unsigned_p;
8983 unsigned inner_prec, middle_prec, final_prec;
8984 double_int innermin, innermed, innermax, middlemin, middlemed, middlemax;
8985
8986 finaltype = TREE_TYPE (gimple_assign_lhs (stmt));
8987 if (!INTEGRAL_TYPE_P (finaltype))
8988 return false;
8989 middleop = gimple_assign_rhs1 (stmt);
8990 def_stmt = SSA_NAME_DEF_STMT (middleop);
8991 if (!is_gimple_assign (def_stmt)
8992 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)))
8993 return false;
8994 innerop = gimple_assign_rhs1 (def_stmt);
8995 if (TREE_CODE (innerop) != SSA_NAME
8996 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (innerop))
8997 return false;
8998
8999 /* Get the value-range of the inner operand. */
9000 innervr = get_value_range (innerop);
9001 if (innervr->type != VR_RANGE
9002 || TREE_CODE (innervr->min) != INTEGER_CST
9003 || TREE_CODE (innervr->max) != INTEGER_CST)
9004 return false;
9005
9006 /* Simulate the conversion chain to check if the result is equal if
9007 the middle conversion is removed. */
9008 innermin = tree_to_double_int (innervr->min);
9009 innermax = tree_to_double_int (innervr->max);
9010
9011 inner_prec = TYPE_PRECISION (TREE_TYPE (innerop));
9012 middle_prec = TYPE_PRECISION (TREE_TYPE (middleop));
9013 final_prec = TYPE_PRECISION (finaltype);
9014
9015 /* If the first conversion is not injective, the second must not
9016 be widening. */
9017 if ((innermax - innermin).ugt (double_int::mask (middle_prec))
9018 && middle_prec < final_prec)
9019 return false;
9020 /* We also want a medium value so that we can track the effect that
9021 narrowing conversions with sign change have. */
9022 inner_unsigned_p = TYPE_UNSIGNED (TREE_TYPE (innerop));
9023 if (inner_unsigned_p)
9024 innermed = double_int::mask (inner_prec).lrshift (1, inner_prec);
9025 else
9026 innermed = double_int_zero;
9027 if (innermin.cmp (innermed, inner_unsigned_p) >= 0
9028 || innermed.cmp (innermax, inner_unsigned_p) >= 0)
9029 innermed = innermin;
9030
9031 middle_unsigned_p = TYPE_UNSIGNED (TREE_TYPE (middleop));
9032 middlemin = innermin.ext (middle_prec, middle_unsigned_p);
9033 middlemed = innermed.ext (middle_prec, middle_unsigned_p);
9034 middlemax = innermax.ext (middle_prec, middle_unsigned_p);
9035
9036 /* Require that the final conversion applied to both the original
9037 and the intermediate range produces the same result. */
9038 final_unsigned_p = TYPE_UNSIGNED (finaltype);
9039 if (middlemin.ext (final_prec, final_unsigned_p)
9040 != innermin.ext (final_prec, final_unsigned_p)
9041 || middlemed.ext (final_prec, final_unsigned_p)
9042 != innermed.ext (final_prec, final_unsigned_p)
9043 || middlemax.ext (final_prec, final_unsigned_p)
9044 != innermax.ext (final_prec, final_unsigned_p))
9045 return false;
9046
9047 gimple_assign_set_rhs1 (stmt, innerop);
9048 update_stmt (stmt);
9049 return true;
9050 }
9051
9052 /* Simplify a conversion from integral SSA name to float in STMT. */
9053
9054 static bool
9055 simplify_float_conversion_using_ranges (gimple_stmt_iterator *gsi, gimple stmt)
9056 {
9057 tree rhs1 = gimple_assign_rhs1 (stmt);
9058 value_range_t *vr = get_value_range (rhs1);
9059 enum machine_mode fltmode = TYPE_MODE (TREE_TYPE (gimple_assign_lhs (stmt)));
9060 enum machine_mode mode;
9061 tree tem;
9062 gimple conv;
9063
9064 /* We can only handle constant ranges. */
9065 if (vr->type != VR_RANGE
9066 || TREE_CODE (vr->min) != INTEGER_CST
9067 || TREE_CODE (vr->max) != INTEGER_CST)
9068 return false;
9069
9070 /* First check if we can use a signed type in place of an unsigned. */
9071 if (TYPE_UNSIGNED (TREE_TYPE (rhs1))
9072 && (can_float_p (fltmode, TYPE_MODE (TREE_TYPE (rhs1)), 0)
9073 != CODE_FOR_nothing)
9074 && range_fits_type_p (vr, GET_MODE_PRECISION
9075 (TYPE_MODE (TREE_TYPE (rhs1))), 0))
9076 mode = TYPE_MODE (TREE_TYPE (rhs1));
9077 /* If we can do the conversion in the current input mode do nothing. */
9078 else if (can_float_p (fltmode, TYPE_MODE (TREE_TYPE (rhs1)),
9079 TYPE_UNSIGNED (TREE_TYPE (rhs1))) != CODE_FOR_nothing)
9080 return false;
9081 /* Otherwise search for a mode we can use, starting from the narrowest
9082 integer mode available. */
9083 else
9084 {
9085 mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
9086 do
9087 {
9088 /* If we cannot do a signed conversion to float from mode
9089 or if the value-range does not fit in the signed type
9090 try with a wider mode. */
9091 if (can_float_p (fltmode, mode, 0) != CODE_FOR_nothing
9092 && range_fits_type_p (vr, GET_MODE_PRECISION (mode), 0))
9093 break;
9094
9095 mode = GET_MODE_WIDER_MODE (mode);
9096 /* But do not widen the input. Instead leave that to the
9097 optabs expansion code. */
9098 if (GET_MODE_PRECISION (mode) > TYPE_PRECISION (TREE_TYPE (rhs1)))
9099 return false;
9100 }
9101 while (mode != VOIDmode);
9102 if (mode == VOIDmode)
9103 return false;
9104 }
9105
9106 /* It works, insert a truncation or sign-change before the
9107 float conversion. */
9108 tem = make_ssa_name (build_nonstandard_integer_type
9109 (GET_MODE_PRECISION (mode), 0), NULL);
9110 conv = gimple_build_assign_with_ops (NOP_EXPR, tem, rhs1, NULL_TREE);
9111 gsi_insert_before (gsi, conv, GSI_SAME_STMT);
9112 gimple_assign_set_rhs1 (stmt, tem);
9113 update_stmt (stmt);
9114
9115 return true;
9116 }
9117
9118 /* Simplify STMT using ranges if possible. */
9119
9120 static bool
9121 simplify_stmt_using_ranges (gimple_stmt_iterator *gsi)
9122 {
9123 gimple stmt = gsi_stmt (*gsi);
9124 if (is_gimple_assign (stmt))
9125 {
9126 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
9127 tree rhs1 = gimple_assign_rhs1 (stmt);
9128
9129 switch (rhs_code)
9130 {
9131 case EQ_EXPR:
9132 case NE_EXPR:
9133 /* Transform EQ_EXPR, NE_EXPR into BIT_XOR_EXPR or identity
9134 if the RHS is zero or one, and the LHS are known to be boolean
9135 values. */
9136 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
9137 return simplify_truth_ops_using_ranges (gsi, stmt);
9138 break;
9139
9140 /* Transform TRUNC_DIV_EXPR and TRUNC_MOD_EXPR into RSHIFT_EXPR
9141 and BIT_AND_EXPR respectively if the first operand is greater
9142 than zero and the second operand is an exact power of two. */
9143 case TRUNC_DIV_EXPR:
9144 case TRUNC_MOD_EXPR:
9145 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
9146 && integer_pow2p (gimple_assign_rhs2 (stmt)))
9147 return simplify_div_or_mod_using_ranges (stmt);
9148 break;
9149
9150 /* Transform ABS (X) into X or -X as appropriate. */
9151 case ABS_EXPR:
9152 if (TREE_CODE (rhs1) == SSA_NAME
9153 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
9154 return simplify_abs_using_ranges (stmt);
9155 break;
9156
9157 case BIT_AND_EXPR:
9158 case BIT_IOR_EXPR:
9159 /* Optimize away BIT_AND_EXPR and BIT_IOR_EXPR
9160 if all the bits being cleared are already cleared or
9161 all the bits being set are already set. */
9162 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
9163 return simplify_bit_ops_using_ranges (gsi, stmt);
9164 break;
9165
9166 CASE_CONVERT:
9167 if (TREE_CODE (rhs1) == SSA_NAME
9168 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
9169 return simplify_conversion_using_ranges (stmt);
9170 break;
9171
9172 case FLOAT_EXPR:
9173 if (TREE_CODE (rhs1) == SSA_NAME
9174 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
9175 return simplify_float_conversion_using_ranges (gsi, stmt);
9176 break;
9177
9178 default:
9179 break;
9180 }
9181 }
9182 else if (gimple_code (stmt) == GIMPLE_COND)
9183 return simplify_cond_using_ranges (stmt);
9184 else if (gimple_code (stmt) == GIMPLE_SWITCH)
9185 return simplify_switch_using_ranges (stmt);
9186
9187 return false;
9188 }
9189
9190 /* If the statement pointed by SI has a predicate whose value can be
9191 computed using the value range information computed by VRP, compute
9192 its value and return true. Otherwise, return false. */
9193
9194 static bool
9195 fold_predicate_in (gimple_stmt_iterator *si)
9196 {
9197 bool assignment_p = false;
9198 tree val;
9199 gimple stmt = gsi_stmt (*si);
9200
9201 if (is_gimple_assign (stmt)
9202 && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison)
9203 {
9204 assignment_p = true;
9205 val = vrp_evaluate_conditional (gimple_assign_rhs_code (stmt),
9206 gimple_assign_rhs1 (stmt),
9207 gimple_assign_rhs2 (stmt),
9208 stmt);
9209 }
9210 else if (gimple_code (stmt) == GIMPLE_COND)
9211 val = vrp_evaluate_conditional (gimple_cond_code (stmt),
9212 gimple_cond_lhs (stmt),
9213 gimple_cond_rhs (stmt),
9214 stmt);
9215 else
9216 return false;
9217
9218 if (val)
9219 {
9220 if (assignment_p)
9221 val = fold_convert (gimple_expr_type (stmt), val);
9222
9223 if (dump_file)
9224 {
9225 fprintf (dump_file, "Folding predicate ");
9226 print_gimple_expr (dump_file, stmt, 0, 0);
9227 fprintf (dump_file, " to ");
9228 print_generic_expr (dump_file, val, 0);
9229 fprintf (dump_file, "\n");
9230 }
9231
9232 if (is_gimple_assign (stmt))
9233 gimple_assign_set_rhs_from_tree (si, val);
9234 else
9235 {
9236 gcc_assert (gimple_code (stmt) == GIMPLE_COND);
9237 if (integer_zerop (val))
9238 gimple_cond_make_false (stmt);
9239 else if (integer_onep (val))
9240 gimple_cond_make_true (stmt);
9241 else
9242 gcc_unreachable ();
9243 }
9244
9245 return true;
9246 }
9247
9248 return false;
9249 }
9250
9251 /* Callback for substitute_and_fold folding the stmt at *SI. */
9252
9253 static bool
9254 vrp_fold_stmt (gimple_stmt_iterator *si)
9255 {
9256 if (fold_predicate_in (si))
9257 return true;
9258
9259 return simplify_stmt_using_ranges (si);
9260 }
9261
9262 /* Stack of dest,src equivalency pairs that need to be restored after
9263 each attempt to thread a block's incoming edge to an outgoing edge.
9264
9265 A NULL entry is used to mark the end of pairs which need to be
9266 restored. */
9267 static vec<tree> equiv_stack;
9268
9269 /* A trivial wrapper so that we can present the generic jump threading
9270 code with a simple API for simplifying statements. STMT is the
9271 statement we want to simplify, WITHIN_STMT provides the location
9272 for any overflow warnings. */
9273
9274 static tree
9275 simplify_stmt_for_jump_threading (gimple stmt, gimple within_stmt)
9276 {
9277 if (gimple_code (stmt) == GIMPLE_COND)
9278 return vrp_evaluate_conditional (gimple_cond_code (stmt),
9279 gimple_cond_lhs (stmt),
9280 gimple_cond_rhs (stmt), within_stmt);
9281
9282 if (gimple_code (stmt) == GIMPLE_ASSIGN)
9283 {
9284 value_range_t new_vr = VR_INITIALIZER;
9285 tree lhs = gimple_assign_lhs (stmt);
9286
9287 if (TREE_CODE (lhs) == SSA_NAME
9288 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
9289 || POINTER_TYPE_P (TREE_TYPE (lhs))))
9290 {
9291 extract_range_from_assignment (&new_vr, stmt);
9292 if (range_int_cst_singleton_p (&new_vr))
9293 return new_vr.min;
9294 }
9295 }
9296
9297 return NULL_TREE;
9298 }
9299
9300 /* Blocks which have more than one predecessor and more than
9301 one successor present jump threading opportunities, i.e.,
9302 when the block is reached from a specific predecessor, we
9303 may be able to determine which of the outgoing edges will
9304 be traversed. When this optimization applies, we are able
9305 to avoid conditionals at runtime and we may expose secondary
9306 optimization opportunities.
9307
9308 This routine is effectively a driver for the generic jump
9309 threading code. It basically just presents the generic code
9310 with edges that may be suitable for jump threading.
9311
9312 Unlike DOM, we do not iterate VRP if jump threading was successful.
9313 While iterating may expose new opportunities for VRP, it is expected
9314 those opportunities would be very limited and the compile time cost
9315 to expose those opportunities would be significant.
9316
9317 As jump threading opportunities are discovered, they are registered
9318 for later realization. */
9319
9320 static void
9321 identify_jump_threads (void)
9322 {
9323 basic_block bb;
9324 gimple dummy;
9325 int i;
9326 edge e;
9327
9328 /* Ugh. When substituting values earlier in this pass we can
9329 wipe the dominance information. So rebuild the dominator
9330 information as we need it within the jump threading code. */
9331 calculate_dominance_info (CDI_DOMINATORS);
9332
9333 /* We do not allow VRP information to be used for jump threading
9334 across a back edge in the CFG. Otherwise it becomes too
9335 difficult to avoid eliminating loop exit tests. Of course
9336 EDGE_DFS_BACK is not accurate at this time so we have to
9337 recompute it. */
9338 mark_dfs_back_edges ();
9339
9340 /* Do not thread across edges we are about to remove. Just marking
9341 them as EDGE_DFS_BACK will do. */
9342 FOR_EACH_VEC_ELT (to_remove_edges, i, e)
9343 e->flags |= EDGE_DFS_BACK;
9344
9345 /* Allocate our unwinder stack to unwind any temporary equivalences
9346 that might be recorded. */
9347 equiv_stack.create (20);
9348
9349 /* To avoid lots of silly node creation, we create a single
9350 conditional and just modify it in-place when attempting to
9351 thread jumps. */
9352 dummy = gimple_build_cond (EQ_EXPR,
9353 integer_zero_node, integer_zero_node,
9354 NULL, NULL);
9355
9356 /* Walk through all the blocks finding those which present a
9357 potential jump threading opportunity. We could set this up
9358 as a dominator walker and record data during the walk, but
9359 I doubt it's worth the effort for the classes of jump
9360 threading opportunities we are trying to identify at this
9361 point in compilation. */
9362 FOR_EACH_BB (bb)
9363 {
9364 gimple last;
9365
9366 /* If the generic jump threading code does not find this block
9367 interesting, then there is nothing to do. */
9368 if (! potentially_threadable_block (bb))
9369 continue;
9370
9371 /* We only care about blocks ending in a COND_EXPR. While there
9372 may be some value in handling SWITCH_EXPR here, I doubt it's
9373 terribly important. */
9374 last = gsi_stmt (gsi_last_bb (bb));
9375
9376 /* We're basically looking for a switch or any kind of conditional with
9377 integral or pointer type arguments. Note the type of the second
9378 argument will be the same as the first argument, so no need to
9379 check it explicitly. */
9380 if (gimple_code (last) == GIMPLE_SWITCH
9381 || (gimple_code (last) == GIMPLE_COND
9382 && TREE_CODE (gimple_cond_lhs (last)) == SSA_NAME
9383 && (INTEGRAL_TYPE_P (TREE_TYPE (gimple_cond_lhs (last)))
9384 || POINTER_TYPE_P (TREE_TYPE (gimple_cond_lhs (last))))
9385 && (TREE_CODE (gimple_cond_rhs (last)) == SSA_NAME
9386 || is_gimple_min_invariant (gimple_cond_rhs (last)))))
9387 {
9388 edge_iterator ei;
9389
9390 /* We've got a block with multiple predecessors and multiple
9391 successors which also ends in a suitable conditional or
9392 switch statement. For each predecessor, see if we can thread
9393 it to a specific successor. */
9394 FOR_EACH_EDGE (e, ei, bb->preds)
9395 {
9396 /* Do not thread across back edges or abnormal edges
9397 in the CFG. */
9398 if (e->flags & (EDGE_DFS_BACK | EDGE_COMPLEX))
9399 continue;
9400
9401 thread_across_edge (dummy, e, true, &equiv_stack,
9402 simplify_stmt_for_jump_threading);
9403 }
9404 }
9405 }
9406
9407 /* We do not actually update the CFG or SSA graphs at this point as
9408 ASSERT_EXPRs are still in the IL and cfg cleanup code does not yet
9409 handle ASSERT_EXPRs gracefully. */
9410 }
9411
9412 /* We identified all the jump threading opportunities earlier, but could
9413 not transform the CFG at that time. This routine transforms the
9414 CFG and arranges for the dominator tree to be rebuilt if necessary.
9415
9416 Note the SSA graph update will occur during the normal TODO
9417 processing by the pass manager. */
9418 static void
9419 finalize_jump_threads (void)
9420 {
9421 thread_through_all_blocks (false);
9422 equiv_stack.release ();
9423 }
9424
9425
9426 /* Traverse all the blocks folding conditionals with known ranges. */
9427
9428 static void
9429 vrp_finalize (void)
9430 {
9431 size_t i;
9432
9433 values_propagated = true;
9434
9435 if (dump_file)
9436 {
9437 fprintf (dump_file, "\nValue ranges after VRP:\n\n");
9438 dump_all_value_ranges (dump_file);
9439 fprintf (dump_file, "\n");
9440 }
9441
9442 substitute_and_fold (op_with_constant_singleton_value_range,
9443 vrp_fold_stmt, false);
9444
9445 if (warn_array_bounds)
9446 check_all_array_refs ();
9447
9448 /* We must identify jump threading opportunities before we release
9449 the datastructures built by VRP. */
9450 identify_jump_threads ();
9451
9452 /* Set value range to non pointer SSA_NAMEs. */
9453 for (i = 0; i < num_vr_values; i++)
9454 if (vr_value[i])
9455 {
9456 tree name = ssa_name (i);
9457
9458 if (!name
9459 || POINTER_TYPE_P (TREE_TYPE (name))
9460 || (vr_value[i]->type == VR_VARYING)
9461 || (vr_value[i]->type == VR_UNDEFINED))
9462 continue;
9463
9464 if ((TREE_CODE (vr_value[i]->min) == INTEGER_CST)
9465 && (TREE_CODE (vr_value[i]->max) == INTEGER_CST))
9466 {
9467 if (vr_value[i]->type == VR_RANGE)
9468 set_range_info (name,
9469 tree_to_double_int (vr_value[i]->min),
9470 tree_to_double_int (vr_value[i]->max));
9471 else if (vr_value[i]->type == VR_ANTI_RANGE)
9472 {
9473 /* VR_ANTI_RANGE ~[min, max] is encoded compactly as
9474 [max + 1, min - 1] without additional attributes.
9475 When min value > max value, we know that it is
9476 VR_ANTI_RANGE; it is VR_RANGE otherwise. */
9477
9478 /* ~[0,0] anti-range is represented as
9479 range. */
9480 if (TYPE_UNSIGNED (TREE_TYPE (name))
9481 && integer_zerop (vr_value[i]->min)
9482 && integer_zerop (vr_value[i]->max))
9483 set_range_info (name,
9484 double_int_one,
9485 double_int::max_value
9486 (TYPE_PRECISION (TREE_TYPE (name)), true));
9487 else
9488 set_range_info (name,
9489 tree_to_double_int (vr_value[i]->max)
9490 + double_int_one,
9491 tree_to_double_int (vr_value[i]->min)
9492 - double_int_one);
9493 }
9494 }
9495 }
9496
9497 /* Free allocated memory. */
9498 for (i = 0; i < num_vr_values; i++)
9499 if (vr_value[i])
9500 {
9501 BITMAP_FREE (vr_value[i]->equiv);
9502 free (vr_value[i]);
9503 }
9504
9505 free (vr_value);
9506 free (vr_phi_edge_counts);
9507
9508 /* So that we can distinguish between VRP data being available
9509 and not available. */
9510 vr_value = NULL;
9511 vr_phi_edge_counts = NULL;
9512 }
9513
9514
9515 /* Main entry point to VRP (Value Range Propagation). This pass is
9516 loosely based on J. R. C. Patterson, ``Accurate Static Branch
9517 Prediction by Value Range Propagation,'' in SIGPLAN Conference on
9518 Programming Language Design and Implementation, pp. 67-78, 1995.
9519 Also available at http://citeseer.ist.psu.edu/patterson95accurate.html
9520
9521 This is essentially an SSA-CCP pass modified to deal with ranges
9522 instead of constants.
9523
9524 While propagating ranges, we may find that two or more SSA name
9525 have equivalent, though distinct ranges. For instance,
9526
9527 1 x_9 = p_3->a;
9528 2 p_4 = ASSERT_EXPR <p_3, p_3 != 0>
9529 3 if (p_4 == q_2)
9530 4 p_5 = ASSERT_EXPR <p_4, p_4 == q_2>;
9531 5 endif
9532 6 if (q_2)
9533
9534 In the code above, pointer p_5 has range [q_2, q_2], but from the
9535 code we can also determine that p_5 cannot be NULL and, if q_2 had
9536 a non-varying range, p_5's range should also be compatible with it.
9537
9538 These equivalences are created by two expressions: ASSERT_EXPR and
9539 copy operations. Since p_5 is an assertion on p_4, and p_4 was the
9540 result of another assertion, then we can use the fact that p_5 and
9541 p_4 are equivalent when evaluating p_5's range.
9542
9543 Together with value ranges, we also propagate these equivalences
9544 between names so that we can take advantage of information from
9545 multiple ranges when doing final replacement. Note that this
9546 equivalency relation is transitive but not symmetric.
9547
9548 In the example above, p_5 is equivalent to p_4, q_2 and p_3, but we
9549 cannot assert that q_2 is equivalent to p_5 because q_2 may be used
9550 in contexts where that assertion does not hold (e.g., in line 6).
9551
9552 TODO, the main difference between this pass and Patterson's is that
9553 we do not propagate edge probabilities. We only compute whether
9554 edges can be taken or not. That is, instead of having a spectrum
9555 of jump probabilities between 0 and 1, we only deal with 0, 1 and
9556 DON'T KNOW. In the future, it may be worthwhile to propagate
9557 probabilities to aid branch prediction. */
9558
9559 static unsigned int
9560 execute_vrp (void)
9561 {
9562 int i;
9563 edge e;
9564 switch_update *su;
9565
9566 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
9567 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
9568 scev_initialize ();
9569
9570 /* ??? This ends up using stale EDGE_DFS_BACK for liveness computation.
9571 Inserting assertions may split edges which will invalidate
9572 EDGE_DFS_BACK. */
9573 insert_range_assertions ();
9574
9575 to_remove_edges.create (10);
9576 to_update_switch_stmts.create (5);
9577 threadedge_initialize_values ();
9578
9579 /* For visiting PHI nodes we need EDGE_DFS_BACK computed. */
9580 mark_dfs_back_edges ();
9581
9582 vrp_initialize ();
9583 ssa_propagate (vrp_visit_stmt, vrp_visit_phi_node);
9584 vrp_finalize ();
9585
9586 free_numbers_of_iterations_estimates ();
9587
9588 /* ASSERT_EXPRs must be removed before finalizing jump threads
9589 as finalizing jump threads calls the CFG cleanup code which
9590 does not properly handle ASSERT_EXPRs. */
9591 remove_range_assertions ();
9592
9593 /* If we exposed any new variables, go ahead and put them into
9594 SSA form now, before we handle jump threading. This simplifies
9595 interactions between rewriting of _DECL nodes into SSA form
9596 and rewriting SSA_NAME nodes into SSA form after block
9597 duplication and CFG manipulation. */
9598 update_ssa (TODO_update_ssa);
9599
9600 finalize_jump_threads ();
9601
9602 /* Remove dead edges from SWITCH_EXPR optimization. This leaves the
9603 CFG in a broken state and requires a cfg_cleanup run. */
9604 FOR_EACH_VEC_ELT (to_remove_edges, i, e)
9605 remove_edge (e);
9606 /* Update SWITCH_EXPR case label vector. */
9607 FOR_EACH_VEC_ELT (to_update_switch_stmts, i, su)
9608 {
9609 size_t j;
9610 size_t n = TREE_VEC_LENGTH (su->vec);
9611 tree label;
9612 gimple_switch_set_num_labels (su->stmt, n);
9613 for (j = 0; j < n; j++)
9614 gimple_switch_set_label (su->stmt, j, TREE_VEC_ELT (su->vec, j));
9615 /* As we may have replaced the default label with a regular one
9616 make sure to make it a real default label again. This ensures
9617 optimal expansion. */
9618 label = gimple_switch_label (su->stmt, 0);
9619 CASE_LOW (label) = NULL_TREE;
9620 CASE_HIGH (label) = NULL_TREE;
9621 }
9622
9623 if (to_remove_edges.length () > 0)
9624 {
9625 free_dominance_info (CDI_DOMINATORS);
9626 if (current_loops)
9627 loops_state_set (LOOPS_NEED_FIXUP);
9628 }
9629
9630 to_remove_edges.release ();
9631 to_update_switch_stmts.release ();
9632 threadedge_finalize_values ();
9633
9634 scev_finalize ();
9635 loop_optimizer_finalize ();
9636 return 0;
9637 }
9638
9639 static bool
9640 gate_vrp (void)
9641 {
9642 return flag_tree_vrp != 0;
9643 }
9644
9645 namespace {
9646
9647 const pass_data pass_data_vrp =
9648 {
9649 GIMPLE_PASS, /* type */
9650 "vrp", /* name */
9651 OPTGROUP_NONE, /* optinfo_flags */
9652 true, /* has_gate */
9653 true, /* has_execute */
9654 TV_TREE_VRP, /* tv_id */
9655 PROP_ssa, /* properties_required */
9656 0, /* properties_provided */
9657 0, /* properties_destroyed */
9658 0, /* todo_flags_start */
9659 ( TODO_cleanup_cfg | TODO_update_ssa
9660 | TODO_verify_ssa
9661 | TODO_verify_flow ), /* todo_flags_finish */
9662 };
9663
9664 class pass_vrp : public gimple_opt_pass
9665 {
9666 public:
9667 pass_vrp (gcc::context *ctxt)
9668 : gimple_opt_pass (pass_data_vrp, ctxt)
9669 {}
9670
9671 /* opt_pass methods: */
9672 opt_pass * clone () { return new pass_vrp (m_ctxt); }
9673 bool gate () { return gate_vrp (); }
9674 unsigned int execute () { return execute_vrp (); }
9675
9676 }; // class pass_vrp
9677
9678 } // anon namespace
9679
9680 gimple_opt_pass *
9681 make_pass_vrp (gcc::context *ctxt)
9682 {
9683 return new pass_vrp (ctxt);
9684 }