1 /* Code for range operators.
2 Copyright (C) 2017-2020 Free Software Foundation, Inc.
3 Contributed by Andrew MacLeod <amacleod@redhat.com>
4 and Aldy Hernandez <aldyh@redhat.com>.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
26 #include "insn-codes.h"
31 #include "tree-pass.h"
33 #include "optabs-tree.h"
34 #include "gimple-pretty-print.h"
35 #include "diagnostic-core.h"
37 #include "fold-const.h"
38 #include "stor-layout.h"
41 #include "gimple-fold.h"
43 #include "gimple-iterator.h"
44 #include "gimple-walk.h"
49 // Return the upper limit for a type.
51 static inline wide_int
52 max_limit (const_tree type
)
54 return wi::max_value (TYPE_PRECISION (type
) , TYPE_SIGN (type
));
57 // Return the lower limit for a type.
59 static inline wide_int
60 min_limit (const_tree type
)
62 return wi::min_value (TYPE_PRECISION (type
) , TYPE_SIGN (type
));
65 // If the range of either op1 or op2 is undefined, set the result to
66 // varying and return TRUE. If the caller truely cares about a result,
67 // they should pass in a varying if it has an undefined that it wants
68 // treated as a varying.
71 empty_range_varying (irange
&r
, tree type
,
72 const irange
&op1
, const irange
& op2
)
74 if (op1
.undefined_p () || op2
.undefined_p ())
83 // Return TRUE if shifting by OP is undefined behavior, and set R to
84 // the appropriate range.
87 undefined_shift_range_check (irange
&r
, tree type
, const irange
&op
)
89 if (op
.undefined_p ())
95 // Shifting by any values outside [0..prec-1], gets undefined
96 // behavior from the shift operation. We cannot even trust
97 // SHIFT_COUNT_TRUNCATED at this stage, because that applies to rtl
98 // shifts, and the operation at the tree level may be widened.
99 if (wi::lt_p (op
.lower_bound (), 0, TYPE_SIGN (op
.type ()))
100 || wi::ge_p (op
.upper_bound (),
101 TYPE_PRECISION (type
), TYPE_SIGN (op
.type ())))
103 r
.set_varying (type
);
109 // Return TRUE if 0 is within [WMIN, WMAX].
112 wi_includes_zero_p (tree type
, const wide_int
&wmin
, const wide_int
&wmax
)
114 signop sign
= TYPE_SIGN (type
);
115 return wi::le_p (wmin
, 0, sign
) && wi::ge_p (wmax
, 0, sign
);
118 // Return TRUE if [WMIN, WMAX] is the singleton 0.
121 wi_zero_p (tree type
, const wide_int
&wmin
, const wide_int
&wmax
)
123 unsigned prec
= TYPE_PRECISION (type
);
124 return wmin
== wmax
&& wi::eq_p (wmin
, wi::zero (prec
));
127 // Default wide_int fold operation returns [MIN, MAX].
130 range_operator::wi_fold (irange
&r
, tree type
,
131 const wide_int
&lh_lb ATTRIBUTE_UNUSED
,
132 const wide_int
&lh_ub ATTRIBUTE_UNUSED
,
133 const wide_int
&rh_lb ATTRIBUTE_UNUSED
,
134 const wide_int
&rh_ub ATTRIBUTE_UNUSED
) const
136 gcc_checking_assert (irange::supports_type_p (type
));
137 r
.set_varying (type
);
140 // The default for fold is to break all ranges into sub-ranges and
141 // invoke the wi_fold method on each sub-range pair.
144 range_operator::fold_range (irange
&r
, tree type
,
146 const irange
&rh
) const
148 gcc_checking_assert (irange::supports_type_p (type
));
149 if (empty_range_varying (r
, type
, lh
, rh
))
152 unsigned num_lh
= lh
.num_pairs ();
153 unsigned num_rh
= rh
.num_pairs ();
155 // If both ranges are single pairs, fold directly into the result range.
156 if (num_lh
== 1 && num_rh
== 1)
158 wi_fold (r
, type
, lh
.lower_bound (0), lh
.upper_bound (0),
159 rh
.lower_bound (0), rh
.upper_bound (0));
165 for (unsigned x
= 0; x
< num_lh
; ++x
)
166 for (unsigned y
= 0; y
< num_rh
; ++y
)
168 wide_int lh_lb
= lh
.lower_bound (x
);
169 wide_int lh_ub
= lh
.upper_bound (x
);
170 wide_int rh_lb
= rh
.lower_bound (y
);
171 wide_int rh_ub
= rh
.upper_bound (y
);
172 wi_fold (tmp
, type
, lh_lb
, lh_ub
, rh_lb
, rh_ub
);
180 // The default for op1_range is to return false.
183 range_operator::op1_range (irange
&r ATTRIBUTE_UNUSED
,
184 tree type ATTRIBUTE_UNUSED
,
185 const irange
&lhs ATTRIBUTE_UNUSED
,
186 const irange
&op2 ATTRIBUTE_UNUSED
) const
191 // The default for op2_range is to return false.
194 range_operator::op2_range (irange
&r ATTRIBUTE_UNUSED
,
195 tree type ATTRIBUTE_UNUSED
,
196 const irange
&lhs ATTRIBUTE_UNUSED
,
197 const irange
&op1 ATTRIBUTE_UNUSED
) const
203 // Create and return a range from a pair of wide-ints that are known
204 // to have overflowed (or underflowed).
207 value_range_from_overflowed_bounds (irange
&r
, tree type
,
208 const wide_int
&wmin
,
209 const wide_int
&wmax
)
211 const signop sgn
= TYPE_SIGN (type
);
212 const unsigned int prec
= TYPE_PRECISION (type
);
214 wide_int tmin
= wide_int::from (wmin
, prec
, sgn
);
215 wide_int tmax
= wide_int::from (wmax
, prec
, sgn
);
220 if (wi::cmp (tmin
, tmax
, sgn
) < 0)
223 if (wi::cmp (tmax
, tem
, sgn
) > 0)
226 // If the anti-range would cover nothing, drop to varying.
227 // Likewise if the anti-range bounds are outside of the types
229 if (covers
|| wi::cmp (tmin
, tmax
, sgn
) > 0)
230 r
.set_varying (type
);
233 tree tree_min
= wide_int_to_tree (type
, tmin
);
234 tree tree_max
= wide_int_to_tree (type
, tmax
);
235 r
.set (tree_min
, tree_max
, VR_ANTI_RANGE
);
239 // Create and return a range from a pair of wide-ints. MIN_OVF and
240 // MAX_OVF describe any overflow that might have occurred while
241 // calculating WMIN and WMAX respectively.
244 value_range_with_overflow (irange
&r
, tree type
,
245 const wide_int
&wmin
, const wide_int
&wmax
,
246 wi::overflow_type min_ovf
= wi::OVF_NONE
,
247 wi::overflow_type max_ovf
= wi::OVF_NONE
)
249 const signop sgn
= TYPE_SIGN (type
);
250 const unsigned int prec
= TYPE_PRECISION (type
);
251 const bool overflow_wraps
= TYPE_OVERFLOW_WRAPS (type
);
253 // For one bit precision if max != min, then the range covers all
255 if (prec
== 1 && wi::ne_p (wmax
, wmin
))
257 r
.set_varying (type
);
263 // If overflow wraps, truncate the values and adjust the range,
264 // kind, and bounds appropriately.
265 if ((min_ovf
!= wi::OVF_NONE
) == (max_ovf
!= wi::OVF_NONE
))
267 wide_int tmin
= wide_int::from (wmin
, prec
, sgn
);
268 wide_int tmax
= wide_int::from (wmax
, prec
, sgn
);
269 // If the limits are swapped, we wrapped around and cover
271 if (wi::gt_p (tmin
, tmax
, sgn
))
272 r
.set_varying (type
);
274 // No overflow or both overflow or underflow. The range
275 // kind stays normal.
276 r
.set (wide_int_to_tree (type
, tmin
),
277 wide_int_to_tree (type
, tmax
));
281 if ((min_ovf
== wi::OVF_UNDERFLOW
&& max_ovf
== wi::OVF_NONE
)
282 || (max_ovf
== wi::OVF_OVERFLOW
&& min_ovf
== wi::OVF_NONE
))
283 value_range_from_overflowed_bounds (r
, type
, wmin
, wmax
);
285 // Other underflow and/or overflow, drop to VR_VARYING.
286 r
.set_varying (type
);
290 // If overflow does not wrap, saturate to [MIN, MAX].
291 wide_int new_lb
, new_ub
;
292 if (min_ovf
== wi::OVF_UNDERFLOW
)
293 new_lb
= wi::min_value (prec
, sgn
);
294 else if (min_ovf
== wi::OVF_OVERFLOW
)
295 new_lb
= wi::max_value (prec
, sgn
);
299 if (max_ovf
== wi::OVF_UNDERFLOW
)
300 new_ub
= wi::min_value (prec
, sgn
);
301 else if (max_ovf
== wi::OVF_OVERFLOW
)
302 new_ub
= wi::max_value (prec
, sgn
);
306 r
.set (wide_int_to_tree (type
, new_lb
),
307 wide_int_to_tree (type
, new_ub
));
311 // Create and return a range from a pair of wide-ints. Canonicalize
312 // the case where the bounds are swapped. In which case, we transform
313 // [10,5] into [MIN,5][10,MAX].
316 create_possibly_reversed_range (irange
&r
, tree type
,
317 const wide_int
&new_lb
, const wide_int
&new_ub
)
319 signop s
= TYPE_SIGN (type
);
320 // If the bounds are swapped, treat the result as if an overflow occured.
321 if (wi::gt_p (new_lb
, new_ub
, s
))
322 value_range_from_overflowed_bounds (r
, type
, new_lb
, new_ub
);
324 // Otherwise it's just a normal range.
325 r
.set (wide_int_to_tree (type
, new_lb
), wide_int_to_tree (type
, new_ub
));
328 // Return an irange instance that is a boolean TRUE.
330 static inline int_range
<1>
331 range_true (tree type
)
333 unsigned prec
= TYPE_PRECISION (type
);
334 return int_range
<1> (type
, wi::one (prec
), wi::one (prec
));
337 // Return an irange instance that is a boolean FALSE.
339 static inline int_range
<1>
340 range_false (tree type
)
342 unsigned prec
= TYPE_PRECISION (type
);
343 return int_range
<1> (type
, wi::zero (prec
), wi::zero (prec
));
346 // Return an irange that covers both true and false.
348 static inline int_range
<1>
349 range_true_and_false (tree type
)
351 unsigned prec
= TYPE_PRECISION (type
);
352 return int_range
<1> (type
, wi::zero (prec
), wi::one (prec
));
355 enum bool_range_state
{ BRS_FALSE
, BRS_TRUE
, BRS_EMPTY
, BRS_FULL
};
357 // Return the summary information about boolean range LHS. Return an
358 // "interesting" range in R. For EMPTY or FULL, return the equivalent
359 // range for TYPE, for BRS_TRUE and BRS false, return the negation of
362 static bool_range_state
363 get_bool_state (irange
&r
, const irange
&lhs
, tree val_type
)
365 // If there is no result, then this is unexecutable.
366 if (lhs
.undefined_p ())
375 // For TRUE, we can't just test for [1,1] because Ada can have
376 // multi-bit booleans, and TRUE values can be: [1, MAX], ~[0], etc.
377 if (lhs
.contains_p (build_zero_cst (lhs
.type ())))
379 r
.set_varying (val_type
);
386 class operator_equal
: public range_operator
389 virtual bool fold_range (irange
&r
, tree type
,
391 const irange
&op2
) const;
392 virtual bool op1_range (irange
&r
, tree type
,
394 const irange
&val
) const;
395 virtual bool op2_range (irange
&r
, tree type
,
397 const irange
&val
) const;
401 operator_equal::fold_range (irange
&r
, tree type
,
403 const irange
&op2
) const
405 if (empty_range_varying (r
, type
, op1
, op2
))
408 // We can be sure the values are always equal or not if both ranges
409 // consist of a single value, and then compare them.
410 if (wi::eq_p (op1
.lower_bound (), op1
.upper_bound ())
411 && wi::eq_p (op2
.lower_bound (), op2
.upper_bound ()))
413 if (wi::eq_p (op1
.lower_bound (), op2
.upper_bound()))
414 r
= range_true (type
);
416 r
= range_false (type
);
420 // If ranges do not intersect, we know the range is not equal,
421 // otherwise we don't know anything for sure.
424 if (r
.undefined_p ())
425 r
= range_false (type
);
427 r
= range_true_and_false (type
);
433 operator_equal::op1_range (irange
&r
, tree type
,
435 const irange
&op2
) const
437 switch (get_bool_state (r
, lhs
, type
))
440 // If the result is false, the only time we know anything is
441 // if OP2 is a constant.
442 if (wi::eq_p (op2
.lower_bound(), op2
.upper_bound()))
448 r
.set_varying (type
);
452 // If it's true, the result is the same as OP2.
463 operator_equal::op2_range (irange
&r
, tree type
,
465 const irange
&op1
) const
467 return operator_equal::op1_range (r
, type
, lhs
, op1
);
471 class operator_not_equal
: public range_operator
474 virtual bool fold_range (irange
&r
, tree type
,
476 const irange
&op2
) const;
477 virtual bool op1_range (irange
&r
, tree type
,
479 const irange
&op2
) const;
480 virtual bool op2_range (irange
&r
, tree type
,
482 const irange
&op1
) const;
486 operator_not_equal::fold_range (irange
&r
, tree type
,
488 const irange
&op2
) const
490 if (empty_range_varying (r
, type
, op1
, op2
))
493 // We can be sure the values are always equal or not if both ranges
494 // consist of a single value, and then compare them.
495 if (wi::eq_p (op1
.lower_bound (), op1
.upper_bound ())
496 && wi::eq_p (op2
.lower_bound (), op2
.upper_bound ()))
498 if (wi::ne_p (op1
.lower_bound (), op2
.upper_bound()))
499 r
= range_true (type
);
501 r
= range_false (type
);
505 // If ranges do not intersect, we know the range is not equal,
506 // otherwise we don't know anything for sure.
509 if (r
.undefined_p ())
510 r
= range_true (type
);
512 r
= range_true_and_false (type
);
518 operator_not_equal::op1_range (irange
&r
, tree type
,
520 const irange
&op2
) const
522 switch (get_bool_state (r
, lhs
, type
))
525 // If the result is true, the only time we know anything is if
526 // OP2 is a constant.
527 if (wi::eq_p (op2
.lower_bound(), op2
.upper_bound()))
533 r
.set_varying (type
);
537 // If its true, the result is the same as OP2.
549 operator_not_equal::op2_range (irange
&r
, tree type
,
551 const irange
&op1
) const
553 return operator_not_equal::op1_range (r
, type
, lhs
, op1
);
556 // (X < VAL) produces the range of [MIN, VAL - 1].
559 build_lt (irange
&r
, tree type
, const wide_int
&val
)
561 wi::overflow_type ov
;
562 wide_int lim
= wi::sub (val
, 1, TYPE_SIGN (type
), &ov
);
564 // If val - 1 underflows, check if X < MIN, which is an empty range.
568 r
= int_range
<1> (type
, min_limit (type
), lim
);
571 // (X <= VAL) produces the range of [MIN, VAL].
574 build_le (irange
&r
, tree type
, const wide_int
&val
)
576 r
= int_range
<1> (type
, min_limit (type
), val
);
579 // (X > VAL) produces the range of [VAL + 1, MAX].
582 build_gt (irange
&r
, tree type
, const wide_int
&val
)
584 wi::overflow_type ov
;
585 wide_int lim
= wi::add (val
, 1, TYPE_SIGN (type
), &ov
);
586 // If val + 1 overflows, check is for X > MAX, which is an empty range.
590 r
= int_range
<1> (type
, lim
, max_limit (type
));
593 // (X >= val) produces the range of [VAL, MAX].
596 build_ge (irange
&r
, tree type
, const wide_int
&val
)
598 r
= int_range
<1> (type
, val
, max_limit (type
));
602 class operator_lt
: public range_operator
605 virtual bool fold_range (irange
&r
, tree type
,
607 const irange
&op2
) const;
608 virtual bool op1_range (irange
&r
, tree type
,
610 const irange
&op2
) const;
611 virtual bool op2_range (irange
&r
, tree type
,
613 const irange
&op1
) const;
617 operator_lt::fold_range (irange
&r
, tree type
,
619 const irange
&op2
) const
621 if (empty_range_varying (r
, type
, op1
, op2
))
624 signop sign
= TYPE_SIGN (op1
.type ());
625 gcc_checking_assert (sign
== TYPE_SIGN (op2
.type ()));
627 if (wi::lt_p (op1
.upper_bound (), op2
.lower_bound (), sign
))
628 r
= range_true (type
);
629 else if (!wi::lt_p (op1
.lower_bound (), op2
.upper_bound (), sign
))
630 r
= range_false (type
);
632 r
= range_true_and_false (type
);
637 operator_lt::op1_range (irange
&r
, tree type
,
639 const irange
&op2
) const
641 switch (get_bool_state (r
, lhs
, type
))
644 build_lt (r
, type
, op2
.upper_bound ());
648 build_ge (r
, type
, op2
.lower_bound ());
658 operator_lt::op2_range (irange
&r
, tree type
,
660 const irange
&op1
) const
662 switch (get_bool_state (r
, lhs
, type
))
665 build_le (r
, type
, op1
.upper_bound ());
669 build_gt (r
, type
, op1
.lower_bound ());
679 class operator_le
: public range_operator
682 virtual bool fold_range (irange
&r
, tree type
,
684 const irange
&op2
) const;
685 virtual bool op1_range (irange
&r
, tree type
,
687 const irange
&op2
) const;
688 virtual bool op2_range (irange
&r
, tree type
,
690 const irange
&op1
) const;
694 operator_le::fold_range (irange
&r
, tree type
,
696 const irange
&op2
) const
698 if (empty_range_varying (r
, type
, op1
, op2
))
701 signop sign
= TYPE_SIGN (op1
.type ());
702 gcc_checking_assert (sign
== TYPE_SIGN (op2
.type ()));
704 if (wi::le_p (op1
.upper_bound (), op2
.lower_bound (), sign
))
705 r
= range_true (type
);
706 else if (!wi::le_p (op1
.lower_bound (), op2
.upper_bound (), sign
))
707 r
= range_false (type
);
709 r
= range_true_and_false (type
);
714 operator_le::op1_range (irange
&r
, tree type
,
716 const irange
&op2
) const
718 switch (get_bool_state (r
, lhs
, type
))
721 build_le (r
, type
, op2
.upper_bound ());
725 build_gt (r
, type
, op2
.lower_bound ());
735 operator_le::op2_range (irange
&r
, tree type
,
737 const irange
&op1
) const
739 switch (get_bool_state (r
, lhs
, type
))
742 build_lt (r
, type
, op1
.upper_bound ());
746 build_ge (r
, type
, op1
.lower_bound ());
756 class operator_gt
: public range_operator
759 virtual bool fold_range (irange
&r
, tree type
,
761 const irange
&op2
) const;
762 virtual bool op1_range (irange
&r
, tree type
,
764 const irange
&op2
) const;
765 virtual bool op2_range (irange
&r
, tree type
,
767 const irange
&op1
) const;
771 operator_gt::fold_range (irange
&r
, tree type
,
772 const irange
&op1
, const irange
&op2
) const
774 if (empty_range_varying (r
, type
, op1
, op2
))
777 signop sign
= TYPE_SIGN (op1
.type ());
778 gcc_checking_assert (sign
== TYPE_SIGN (op2
.type ()));
780 if (wi::gt_p (op1
.lower_bound (), op2
.upper_bound (), sign
))
781 r
= range_true (type
);
782 else if (!wi::gt_p (op1
.upper_bound (), op2
.lower_bound (), sign
))
783 r
= range_false (type
);
785 r
= range_true_and_false (type
);
790 operator_gt::op1_range (irange
&r
, tree type
,
791 const irange
&lhs
, const irange
&op2
) const
793 switch (get_bool_state (r
, lhs
, type
))
796 build_gt (r
, type
, op2
.lower_bound ());
800 build_le (r
, type
, op2
.upper_bound ());
810 operator_gt::op2_range (irange
&r
, tree type
,
812 const irange
&op1
) const
814 switch (get_bool_state (r
, lhs
, type
))
817 build_ge (r
, type
, op1
.lower_bound ());
821 build_lt (r
, type
, op1
.upper_bound ());
831 class operator_ge
: public range_operator
834 virtual bool fold_range (irange
&r
, tree type
,
836 const irange
&op2
) const;
837 virtual bool op1_range (irange
&r
, tree type
,
839 const irange
&op2
) const;
840 virtual bool op2_range (irange
&r
, tree type
,
842 const irange
&op1
) const;
846 operator_ge::fold_range (irange
&r
, tree type
,
848 const irange
&op2
) const
850 if (empty_range_varying (r
, type
, op1
, op2
))
853 signop sign
= TYPE_SIGN (op1
.type ());
854 gcc_checking_assert (sign
== TYPE_SIGN (op2
.type ()));
856 if (wi::ge_p (op1
.lower_bound (), op2
.upper_bound (), sign
))
857 r
= range_true (type
);
858 else if (!wi::ge_p (op1
.upper_bound (), op2
.lower_bound (), sign
))
859 r
= range_false (type
);
861 r
= range_true_and_false (type
);
866 operator_ge::op1_range (irange
&r
, tree type
,
868 const irange
&op2
) const
870 switch (get_bool_state (r
, lhs
, type
))
873 build_ge (r
, type
, op2
.lower_bound ());
877 build_lt (r
, type
, op2
.upper_bound ());
887 operator_ge::op2_range (irange
&r
, tree type
,
889 const irange
&op1
) const
891 switch (get_bool_state (r
, lhs
, type
))
894 build_gt (r
, type
, op1
.lower_bound ());
898 build_le (r
, type
, op1
.upper_bound ());
908 class operator_plus
: public range_operator
911 virtual bool op1_range (irange
&r
, tree type
,
913 const irange
&op2
) const;
914 virtual bool op2_range (irange
&r
, tree type
,
916 const irange
&op1
) const;
917 virtual void wi_fold (irange
&r
, tree type
,
918 const wide_int
&lh_lb
,
919 const wide_int
&lh_ub
,
920 const wide_int
&rh_lb
,
921 const wide_int
&rh_ub
) const;
925 operator_plus::wi_fold (irange
&r
, tree type
,
926 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
927 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
929 wi::overflow_type ov_lb
, ov_ub
;
930 signop s
= TYPE_SIGN (type
);
931 wide_int new_lb
= wi::add (lh_lb
, rh_lb
, s
, &ov_lb
);
932 wide_int new_ub
= wi::add (lh_ub
, rh_ub
, s
, &ov_ub
);
933 value_range_with_overflow (r
, type
, new_lb
, new_ub
, ov_lb
, ov_ub
);
937 operator_plus::op1_range (irange
&r
, tree type
,
939 const irange
&op2
) const
941 return range_op_handler (MINUS_EXPR
, type
)->fold_range (r
, type
, lhs
, op2
);
945 operator_plus::op2_range (irange
&r
, tree type
,
947 const irange
&op1
) const
949 return range_op_handler (MINUS_EXPR
, type
)->fold_range (r
, type
, lhs
, op1
);
953 class operator_minus
: public range_operator
956 virtual bool op1_range (irange
&r
, tree type
,
958 const irange
&op2
) const;
959 virtual bool op2_range (irange
&r
, tree type
,
961 const irange
&op1
) const;
962 virtual void wi_fold (irange
&r
, tree type
,
963 const wide_int
&lh_lb
,
964 const wide_int
&lh_ub
,
965 const wide_int
&rh_lb
,
966 const wide_int
&rh_ub
) const;
970 operator_minus::wi_fold (irange
&r
, tree type
,
971 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
972 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
974 wi::overflow_type ov_lb
, ov_ub
;
975 signop s
= TYPE_SIGN (type
);
976 wide_int new_lb
= wi::sub (lh_lb
, rh_ub
, s
, &ov_lb
);
977 wide_int new_ub
= wi::sub (lh_ub
, rh_lb
, s
, &ov_ub
);
978 value_range_with_overflow (r
, type
, new_lb
, new_ub
, ov_lb
, ov_ub
);
982 operator_minus::op1_range (irange
&r
, tree type
,
984 const irange
&op2
) const
986 return range_op_handler (PLUS_EXPR
, type
)->fold_range (r
, type
, lhs
, op2
);
990 operator_minus::op2_range (irange
&r
, tree type
,
992 const irange
&op1
) const
994 return fold_range (r
, type
, op1
, lhs
);
998 class operator_min
: public range_operator
1001 virtual void wi_fold (irange
&r
, tree type
,
1002 const wide_int
&lh_lb
,
1003 const wide_int
&lh_ub
,
1004 const wide_int
&rh_lb
,
1005 const wide_int
&rh_ub
) const;
1009 operator_min::wi_fold (irange
&r
, tree type
,
1010 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
1011 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
1013 signop s
= TYPE_SIGN (type
);
1014 wide_int new_lb
= wi::min (lh_lb
, rh_lb
, s
);
1015 wide_int new_ub
= wi::min (lh_ub
, rh_ub
, s
);
1016 value_range_with_overflow (r
, type
, new_lb
, new_ub
);
1020 class operator_max
: public range_operator
1023 virtual void wi_fold (irange
&r
, tree type
,
1024 const wide_int
&lh_lb
,
1025 const wide_int
&lh_ub
,
1026 const wide_int
&rh_lb
,
1027 const wide_int
&rh_ub
) const;
1031 operator_max::wi_fold (irange
&r
, tree type
,
1032 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
1033 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
1035 signop s
= TYPE_SIGN (type
);
1036 wide_int new_lb
= wi::max (lh_lb
, rh_lb
, s
);
1037 wide_int new_ub
= wi::max (lh_ub
, rh_ub
, s
);
1038 value_range_with_overflow (r
, type
, new_lb
, new_ub
);
1042 class cross_product_operator
: public range_operator
1045 // Perform an operation between two wide-ints and place the result
1046 // in R. Return true if the operation overflowed.
1047 virtual bool wi_op_overflows (wide_int
&r
,
1050 const wide_int
&) const = 0;
1052 // Calculate the cross product of two sets of sub-ranges and return it.
1053 void wi_cross_product (irange
&r
, tree type
,
1054 const wide_int
&lh_lb
,
1055 const wide_int
&lh_ub
,
1056 const wide_int
&rh_lb
,
1057 const wide_int
&rh_ub
) const;
1060 // Calculate the cross product of two sets of ranges and return it.
1062 // Multiplications, divisions and shifts are a bit tricky to handle,
1063 // depending on the mix of signs we have in the two ranges, we need to
1064 // operate on different values to get the minimum and maximum values
1065 // for the new range. One approach is to figure out all the
1066 // variations of range combinations and do the operations.
1068 // However, this involves several calls to compare_values and it is
1069 // pretty convoluted. It's simpler to do the 4 operations (MIN0 OP
1070 // MIN1, MIN0 OP MAX1, MAX0 OP MIN1 and MAX0 OP MAX0 OP MAX1) and then
1071 // figure the smallest and largest values to form the new range.
1074 cross_product_operator::wi_cross_product (irange
&r
, tree type
,
1075 const wide_int
&lh_lb
,
1076 const wide_int
&lh_ub
,
1077 const wide_int
&rh_lb
,
1078 const wide_int
&rh_ub
) const
1080 wide_int cp1
, cp2
, cp3
, cp4
;
1081 // Default to varying.
1082 r
.set_varying (type
);
1084 // Compute the 4 cross operations, bailing if we get an overflow we
1086 if (wi_op_overflows (cp1
, type
, lh_lb
, rh_lb
))
1088 if (wi::eq_p (lh_lb
, lh_ub
))
1090 else if (wi_op_overflows (cp3
, type
, lh_ub
, rh_lb
))
1092 if (wi::eq_p (rh_lb
, rh_ub
))
1094 else if (wi_op_overflows (cp2
, type
, lh_lb
, rh_ub
))
1096 if (wi::eq_p (lh_lb
, lh_ub
))
1098 else if (wi_op_overflows (cp4
, type
, lh_ub
, rh_ub
))
1102 signop sign
= TYPE_SIGN (type
);
1103 if (wi::gt_p (cp1
, cp2
, sign
))
1104 std::swap (cp1
, cp2
);
1105 if (wi::gt_p (cp3
, cp4
, sign
))
1106 std::swap (cp3
, cp4
);
1108 // Choose min and max from the ordered pairs.
1109 wide_int res_lb
= wi::min (cp1
, cp3
, sign
);
1110 wide_int res_ub
= wi::max (cp2
, cp4
, sign
);
1111 value_range_with_overflow (r
, type
, res_lb
, res_ub
);
1115 class operator_mult
: public cross_product_operator
1118 virtual void wi_fold (irange
&r
, tree type
,
1119 const wide_int
&lh_lb
,
1120 const wide_int
&lh_ub
,
1121 const wide_int
&rh_lb
,
1122 const wide_int
&rh_ub
) const;
1123 virtual bool wi_op_overflows (wide_int
&res
, tree type
,
1124 const wide_int
&w0
, const wide_int
&w1
) const;
1125 virtual bool op1_range (irange
&r
, tree type
,
1127 const irange
&op2
) const;
1128 virtual bool op2_range (irange
&r
, tree type
,
1130 const irange
&op1
) const;
1134 operator_mult::op1_range (irange
&r
, tree type
,
1135 const irange
&lhs
, const irange
&op2
) const
1139 // We can't solve 0 = OP1 * N by dividing by N with a wrapping type.
1140 // For example: For 0 = OP1 * 2, OP1 could be 0, or MAXINT, whereas
1141 // for 4 = OP1 * 2, OP1 could be 2 or 130 (unsigned 8-bit)
1142 if (TYPE_OVERFLOW_WRAPS (type
))
1145 if (op2
.singleton_p (&offset
) && !integer_zerop (offset
))
1146 return range_op_handler (TRUNC_DIV_EXPR
, type
)->fold_range (r
, type
,
1152 operator_mult::op2_range (irange
&r
, tree type
,
1153 const irange
&lhs
, const irange
&op1
) const
1155 return operator_mult::op1_range (r
, type
, lhs
, op1
);
1159 operator_mult::wi_op_overflows (wide_int
&res
, tree type
,
1160 const wide_int
&w0
, const wide_int
&w1
) const
1162 wi::overflow_type overflow
= wi::OVF_NONE
;
1163 signop sign
= TYPE_SIGN (type
);
1164 res
= wi::mul (w0
, w1
, sign
, &overflow
);
1165 if (overflow
&& TYPE_OVERFLOW_UNDEFINED (type
))
1167 // For multiplication, the sign of the overflow is given
1168 // by the comparison of the signs of the operands.
1169 if (sign
== UNSIGNED
|| w0
.sign_mask () == w1
.sign_mask ())
1170 res
= wi::max_value (w0
.get_precision (), sign
);
1172 res
= wi::min_value (w0
.get_precision (), sign
);
1179 operator_mult::wi_fold (irange
&r
, tree type
,
1180 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
1181 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
1183 if (TYPE_OVERFLOW_UNDEFINED (type
))
1185 wi_cross_product (r
, type
, lh_lb
, lh_ub
, rh_lb
, rh_ub
);
1189 // Multiply the ranges when overflow wraps. This is basically fancy
1190 // code so we don't drop to varying with an unsigned
1193 // This test requires 2*prec bits if both operands are signed and
1194 // 2*prec + 2 bits if either is not. Therefore, extend the values
1195 // using the sign of the result to PREC2. From here on out,
1196 // everthing is just signed math no matter what the input types
1199 signop sign
= TYPE_SIGN (type
);
1200 unsigned prec
= TYPE_PRECISION (type
);
1201 widest2_int min0
= widest2_int::from (lh_lb
, sign
);
1202 widest2_int max0
= widest2_int::from (lh_ub
, sign
);
1203 widest2_int min1
= widest2_int::from (rh_lb
, sign
);
1204 widest2_int max1
= widest2_int::from (rh_ub
, sign
);
1205 widest2_int sizem1
= wi::mask
<widest2_int
> (prec
, false);
1206 widest2_int size
= sizem1
+ 1;
1208 // Canonicalize the intervals.
1209 if (sign
== UNSIGNED
)
1211 if (wi::ltu_p (size
, min0
+ max0
))
1216 if (wi::ltu_p (size
, min1
+ max1
))
1223 // Sort the 4 products so that min is in prod0 and max is in
1225 widest2_int prod0
= min0
* min1
;
1226 widest2_int prod1
= min0
* max1
;
1227 widest2_int prod2
= max0
* min1
;
1228 widest2_int prod3
= max0
* max1
;
1230 // min0min1 > max0max1
1232 std::swap (prod0
, prod3
);
1234 // min0max1 > max0min1
1236 std::swap (prod1
, prod2
);
1239 std::swap (prod0
, prod1
);
1242 std::swap (prod2
, prod3
);
1245 prod2
= prod3
- prod0
;
1246 if (wi::geu_p (prod2
, sizem1
))
1247 // The range covers all values.
1248 r
.set_varying (type
);
1251 wide_int new_lb
= wide_int::from (prod0
, prec
, sign
);
1252 wide_int new_ub
= wide_int::from (prod3
, prec
, sign
);
1253 create_possibly_reversed_range (r
, type
, new_lb
, new_ub
);
1258 class operator_div
: public cross_product_operator
1261 operator_div (enum tree_code c
) { code
= c
; }
1262 virtual void wi_fold (irange
&r
, tree type
,
1263 const wide_int
&lh_lb
,
1264 const wide_int
&lh_ub
,
1265 const wide_int
&rh_lb
,
1266 const wide_int
&rh_ub
) const;
1267 virtual bool wi_op_overflows (wide_int
&res
, tree type
,
1268 const wide_int
&, const wide_int
&) const;
1270 enum tree_code code
;
1274 operator_div::wi_op_overflows (wide_int
&res
, tree type
,
1275 const wide_int
&w0
, const wide_int
&w1
) const
1280 wi::overflow_type overflow
= wi::OVF_NONE
;
1281 signop sign
= TYPE_SIGN (type
);
1285 case EXACT_DIV_EXPR
:
1286 // EXACT_DIV_EXPR is implemented as TRUNC_DIV_EXPR in
1287 // operator_exact_divide. No need to handle it here.
1290 case TRUNC_DIV_EXPR
:
1291 res
= wi::div_trunc (w0
, w1
, sign
, &overflow
);
1293 case FLOOR_DIV_EXPR
:
1294 res
= wi::div_floor (w0
, w1
, sign
, &overflow
);
1296 case ROUND_DIV_EXPR
:
1297 res
= wi::div_round (w0
, w1
, sign
, &overflow
);
1300 res
= wi::div_ceil (w0
, w1
, sign
, &overflow
);
1306 if (overflow
&& TYPE_OVERFLOW_UNDEFINED (type
))
1308 // For division, the only case is -INF / -1 = +INF.
1309 res
= wi::max_value (w0
.get_precision (), sign
);
1316 operator_div::wi_fold (irange
&r
, tree type
,
1317 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
1318 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
1320 // If we know we will divide by zero, return undefined.
1321 if (rh_lb
== 0 && rh_ub
== 0)
1327 const wide_int dividend_min
= lh_lb
;
1328 const wide_int dividend_max
= lh_ub
;
1329 const wide_int divisor_min
= rh_lb
;
1330 const wide_int divisor_max
= rh_ub
;
1331 signop sign
= TYPE_SIGN (type
);
1332 unsigned prec
= TYPE_PRECISION (type
);
1333 wide_int extra_min
, extra_max
;
1335 // If we know we won't divide by zero, just do the division.
1336 if (!wi_includes_zero_p (type
, divisor_min
, divisor_max
))
1338 wi_cross_product (r
, type
, dividend_min
, dividend_max
,
1339 divisor_min
, divisor_max
);
1343 // If flag_non_call_exceptions, we must not eliminate a division by zero.
1344 if (cfun
->can_throw_non_call_exceptions
)
1346 r
.set_varying (type
);
1350 // If we're definitely dividing by zero, there's nothing to do.
1351 if (wi_zero_p (type
, divisor_min
, divisor_max
))
1357 // Perform the division in 2 parts, [LB, -1] and [1, UB], which will
1358 // skip any division by zero.
1360 // First divide by the negative numbers, if any.
1361 if (wi::neg_p (divisor_min
, sign
))
1362 wi_cross_product (r
, type
, dividend_min
, dividend_max
,
1363 divisor_min
, wi::minus_one (prec
));
1367 // Then divide by the non-zero positive numbers, if any.
1368 if (wi::gt_p (divisor_max
, wi::zero (prec
), sign
))
1371 wi_cross_product (tmp
, type
, dividend_min
, dividend_max
,
1372 wi::one (prec
), divisor_max
);
1375 // We shouldn't still have undefined here.
1376 gcc_checking_assert (!r
.undefined_p ());
1379 operator_div
op_trunc_div (TRUNC_DIV_EXPR
);
1380 operator_div
op_floor_div (FLOOR_DIV_EXPR
);
1381 operator_div
op_round_div (ROUND_DIV_EXPR
);
1382 operator_div
op_ceil_div (CEIL_DIV_EXPR
);
1385 class operator_exact_divide
: public operator_div
1388 operator_exact_divide () : operator_div (TRUNC_DIV_EXPR
) { }
1389 virtual bool op1_range (irange
&r
, tree type
,
1391 const irange
&op2
) const;
1396 operator_exact_divide::op1_range (irange
&r
, tree type
,
1398 const irange
&op2
) const
1401 // [2, 4] = op1 / [3,3] since its exact divide, no need to worry about
1402 // remainders in the endpoints, so op1 = [2,4] * [3,3] = [6,12].
1403 // We wont bother trying to enumerate all the in between stuff :-P
1404 // TRUE accuraacy is [6,6][9,9][12,12]. This is unlikely to matter most of
1405 // the time however.
1406 // If op2 is a multiple of 2, we would be able to set some non-zero bits.
1407 if (op2
.singleton_p (&offset
)
1408 && !integer_zerop (offset
))
1409 return range_op_handler (MULT_EXPR
, type
)->fold_range (r
, type
, lhs
, op2
);
1414 class operator_lshift
: public cross_product_operator
1417 virtual bool op1_range (irange
&r
, tree type
,
1419 const irange
&op2
) const;
1420 virtual bool fold_range (irange
&r
, tree type
,
1422 const irange
&op2
) const;
1424 virtual void wi_fold (irange
&r
, tree type
,
1425 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
1426 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const;
1427 virtual bool wi_op_overflows (wide_int
&res
,
1430 const wide_int
&) const;
1434 operator_lshift::fold_range (irange
&r
, tree type
,
1436 const irange
&op2
) const
1438 if (undefined_shift_range_check (r
, type
, op2
))
1441 // Transform left shifts by constants into multiplies.
1442 if (op2
.singleton_p ())
1444 unsigned shift
= op2
.lower_bound ().to_uhwi ();
1445 wide_int tmp
= wi::set_bit_in_zero (shift
, TYPE_PRECISION (type
));
1446 int_range
<1> mult (type
, tmp
, tmp
);
1448 // Force wrapping multiplication.
1449 bool saved_flag_wrapv
= flag_wrapv
;
1450 bool saved_flag_wrapv_pointer
= flag_wrapv_pointer
;
1452 flag_wrapv_pointer
= 1;
1453 bool b
= op_mult
.fold_range (r
, type
, op1
, mult
);
1454 flag_wrapv
= saved_flag_wrapv
;
1455 flag_wrapv_pointer
= saved_flag_wrapv_pointer
;
1459 // Otherwise, invoke the generic fold routine.
1460 return range_operator::fold_range (r
, type
, op1
, op2
);
1464 operator_lshift::wi_fold (irange
&r
, tree type
,
1465 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
1466 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
1468 signop sign
= TYPE_SIGN (type
);
1469 unsigned prec
= TYPE_PRECISION (type
);
1470 int overflow_pos
= sign
== SIGNED
? prec
- 1 : prec
;
1471 int bound_shift
= overflow_pos
- rh_ub
.to_shwi ();
1472 // If bound_shift == HOST_BITS_PER_WIDE_INT, the llshift can
1473 // overflow. However, for that to happen, rh.max needs to be zero,
1474 // which means rh is a singleton range of zero, which means it
1475 // should be handled by the lshift fold_range above.
1476 wide_int bound
= wi::set_bit_in_zero (bound_shift
, prec
);
1477 wide_int complement
= ~(bound
- 1);
1478 wide_int low_bound
, high_bound
;
1479 bool in_bounds
= false;
1481 if (sign
== UNSIGNED
)
1484 high_bound
= complement
;
1485 if (wi::ltu_p (lh_ub
, low_bound
))
1487 // [5, 6] << [1, 2] == [10, 24].
1488 // We're shifting out only zeroes, the value increases
1492 else if (wi::ltu_p (high_bound
, lh_lb
))
1494 // [0xffffff00, 0xffffffff] << [1, 2]
1495 // == [0xfffffc00, 0xfffffffe].
1496 // We're shifting out only ones, the value decreases
1503 // [-1, 1] << [1, 2] == [-4, 4]
1504 low_bound
= complement
;
1506 if (wi::lts_p (lh_ub
, high_bound
)
1507 && wi::lts_p (low_bound
, lh_lb
))
1509 // For non-negative numbers, we're shifting out only zeroes,
1510 // the value increases monotonically. For negative numbers,
1511 // we're shifting out only ones, the value decreases
1518 wi_cross_product (r
, type
, lh_lb
, lh_ub
, rh_lb
, rh_ub
);
1520 r
.set_varying (type
);
1524 operator_lshift::wi_op_overflows (wide_int
&res
, tree type
,
1525 const wide_int
&w0
, const wide_int
&w1
) const
1527 signop sign
= TYPE_SIGN (type
);
1530 // It's unclear from the C standard whether shifts can overflow.
1531 // The following code ignores overflow; perhaps a C standard
1532 // interpretation ruling is needed.
1533 res
= wi::rshift (w0
, -w1
, sign
);
1536 res
= wi::lshift (w0
, w1
);
1541 operator_lshift::op1_range (irange
&r
,
1544 const irange
&op2
) const
1547 if (op2
.singleton_p (&shift_amount
))
1549 int_range
<1> shifted (shift_amount
, shift_amount
), ub
, lb
;
1550 const range_operator
*rshift_op
= range_op_handler (RSHIFT_EXPR
, type
);
1551 rshift_op
->fold_range (ub
, type
, lhs
, shifted
);
1552 if (TYPE_UNSIGNED (type
))
1557 // For signed types, we can't just do an arithmetic rshift,
1558 // because that will propagate the sign bit.
1563 // Assuming a 4-bit signed integer, a right shift will result in
1564 // OP1=1111, but OP1 could have also been 0111. What we want is
1565 // a range from 0111 to 1111. That is, a range from the logical
1566 // rshift (0111) to the arithmetic rshift (1111).
1568 // Perform a logical rshift by doing the rshift as unsigned.
1569 tree unsigned_type
= unsigned_type_for (type
);
1570 int_range_max unsigned_lhs
= lhs
;
1571 range_cast (unsigned_lhs
, unsigned_type
);
1572 rshift_op
= range_op_handler (RSHIFT_EXPR
, unsigned_type
);
1573 rshift_op
->fold_range (lb
, unsigned_type
, unsigned_lhs
, shifted
);
1574 range_cast (lb
, type
);
1583 class operator_rshift
: public cross_product_operator
1586 virtual bool fold_range (irange
&r
, tree type
,
1588 const irange
&op2
) const;
1589 virtual void wi_fold (irange
&r
, tree type
,
1590 const wide_int
&lh_lb
,
1591 const wide_int
&lh_ub
,
1592 const wide_int
&rh_lb
,
1593 const wide_int
&rh_ub
) const;
1594 virtual bool wi_op_overflows (wide_int
&res
,
1597 const wide_int
&w1
) const;
1598 virtual bool op1_range (irange
&, tree type
,
1600 const irange
&op2
) const;
1604 operator_rshift::op1_range (irange
&r
,
1607 const irange
&op2
) const
1610 if (op2
.singleton_p (&shift
))
1612 // Folding the original operation may discard some impossible
1613 // ranges from the LHS.
1614 int_range_max lhs_refined
;
1615 op_rshift
.fold_range (lhs_refined
, type
, int_range
<1> (type
), op2
);
1616 lhs_refined
.intersect (lhs
);
1617 if (lhs_refined
.undefined_p ())
1622 int_range_max
shift_range (shift
, shift
);
1623 int_range_max lb
, ub
;
1624 op_lshift
.fold_range (lb
, type
, lhs_refined
, shift_range
);
1626 // 0000 0111 = OP1 >> 3
1628 // OP1 is anything from 0011 1000 to 0011 1111. That is, a
1629 // range from LHS<<3 plus a mask of the 3 bits we shifted on the
1630 // right hand side (0x07).
1631 tree mask
= fold_build1 (BIT_NOT_EXPR
, type
,
1632 fold_build2 (LSHIFT_EXPR
, type
,
1633 build_minus_one_cst (type
),
1635 int_range_max
mask_range (build_zero_cst (type
), mask
);
1636 op_plus
.fold_range (ub
, type
, lb
, mask_range
);
1639 if (!lhs_refined
.contains_p (build_zero_cst (type
)))
1641 mask_range
.invert ();
1642 r
.intersect (mask_range
);
1650 operator_rshift::wi_op_overflows (wide_int
&res
,
1653 const wide_int
&w1
) const
1655 signop sign
= TYPE_SIGN (type
);
1657 res
= wi::lshift (w0
, -w1
);
1660 // It's unclear from the C standard whether shifts can overflow.
1661 // The following code ignores overflow; perhaps a C standard
1662 // interpretation ruling is needed.
1663 res
= wi::rshift (w0
, w1
, sign
);
1669 operator_rshift::fold_range (irange
&r
, tree type
,
1671 const irange
&op2
) const
1673 // Invoke the generic fold routine if not undefined..
1674 if (undefined_shift_range_check (r
, type
, op2
))
1677 return range_operator::fold_range (r
, type
, op1
, op2
);
1681 operator_rshift::wi_fold (irange
&r
, tree type
,
1682 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
1683 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const
1685 wi_cross_product (r
, type
, lh_lb
, lh_ub
, rh_lb
, rh_ub
);
1689 class operator_cast
: public range_operator
1692 virtual bool fold_range (irange
&r
, tree type
,
1694 const irange
&op2
) const;
1695 virtual bool op1_range (irange
&r
, tree type
,
1697 const irange
&op2
) const;
1699 bool truncating_cast_p (const irange
&inner
, const irange
&outer
) const;
1700 bool inside_domain_p (const wide_int
&min
, const wide_int
&max
,
1701 const irange
&outer
) const;
1702 void fold_pair (irange
&r
, unsigned index
, const irange
&inner
,
1703 const irange
&outer
) const;
1706 // Return TRUE if casting from INNER to OUTER is a truncating cast.
1709 operator_cast::truncating_cast_p (const irange
&inner
,
1710 const irange
&outer
) const
1712 return TYPE_PRECISION (outer
.type ()) < TYPE_PRECISION (inner
.type ());
1715 // Return TRUE if [MIN,MAX] is inside the domain of RANGE's type.
1718 operator_cast::inside_domain_p (const wide_int
&min
,
1719 const wide_int
&max
,
1720 const irange
&range
) const
1722 wide_int domain_min
= wi::to_wide (vrp_val_min (range
.type ()));
1723 wide_int domain_max
= wi::to_wide (vrp_val_max (range
.type ()));
1724 signop domain_sign
= TYPE_SIGN (range
.type ());
1725 return (wi::le_p (min
, domain_max
, domain_sign
)
1726 && wi::le_p (max
, domain_max
, domain_sign
)
1727 && wi::ge_p (min
, domain_min
, domain_sign
)
1728 && wi::ge_p (max
, domain_min
, domain_sign
));
1732 // Helper for fold_range which work on a pair at a time.
1735 operator_cast::fold_pair (irange
&r
, unsigned index
,
1736 const irange
&inner
,
1737 const irange
&outer
) const
1739 tree inner_type
= inner
.type ();
1740 tree outer_type
= outer
.type ();
1741 signop inner_sign
= TYPE_SIGN (inner_type
);
1742 unsigned outer_prec
= TYPE_PRECISION (outer_type
);
1744 // check to see if casting from INNER to OUTER is a conversion that
1745 // fits in the resulting OUTER type.
1746 wide_int inner_lb
= inner
.lower_bound (index
);
1747 wide_int inner_ub
= inner
.upper_bound (index
);
1748 if (truncating_cast_p (inner
, outer
))
1750 // We may be able to accomodate a truncating cast if the
1751 // resulting range can be represented in the target type...
1752 if (wi::rshift (wi::sub (inner_ub
, inner_lb
),
1753 wi::uhwi (outer_prec
, TYPE_PRECISION (inner
.type ())),
1756 r
.set_varying (outer_type
);
1760 // ...but we must still verify that the final range fits in the
1761 // domain. This catches -fstrict-enum restrictions where the domain
1762 // range is smaller than what fits in the underlying type.
1763 wide_int min
= wide_int::from (inner_lb
, outer_prec
, inner_sign
);
1764 wide_int max
= wide_int::from (inner_ub
, outer_prec
, inner_sign
);
1765 if (inside_domain_p (min
, max
, outer
))
1766 create_possibly_reversed_range (r
, outer_type
, min
, max
);
1768 r
.set_varying (outer_type
);
1773 operator_cast::fold_range (irange
&r
, tree type ATTRIBUTE_UNUSED
,
1774 const irange
&inner
,
1775 const irange
&outer
) const
1777 if (empty_range_varying (r
, type
, inner
, outer
))
1780 gcc_checking_assert (outer
.varying_p ());
1781 gcc_checking_assert (inner
.num_pairs () > 0);
1783 // Avoid a temporary by folding the first pair directly into the result.
1784 fold_pair (r
, 0, inner
, outer
);
1786 // Then process any additonal pairs by unioning with their results.
1787 for (unsigned x
= 1; x
< inner
.num_pairs (); ++x
)
1790 fold_pair (tmp
, x
, inner
, outer
);
1799 operator_cast::op1_range (irange
&r
, tree type
,
1801 const irange
&op2
) const
1803 tree lhs_type
= lhs
.type ();
1804 gcc_checking_assert (types_compatible_p (op2
.type(), type
));
1806 if (truncating_cast_p (op2
, lhs
))
1808 if (lhs
.varying_p ())
1809 r
.set_varying (type
);
1812 // We want to insert the LHS as an unsigned value since it
1813 // would not trigger the signed bit of the larger type.
1814 int_range_max converted_lhs
= lhs
;
1815 range_cast (converted_lhs
, unsigned_type_for (lhs_type
));
1816 range_cast (converted_lhs
, type
);
1817 // Start by building the positive signed outer range for the type.
1818 wide_int lim
= wi::set_bit_in_zero (TYPE_PRECISION (lhs_type
),
1819 TYPE_PRECISION (type
));
1820 r
= int_range
<1> (type
, lim
, wi::max_value (TYPE_PRECISION (type
),
1822 // For the signed part, we need to simply union the 2 ranges now.
1823 r
.union_ (converted_lhs
);
1825 // Create maximal negative number outside of LHS bits.
1826 lim
= wi::mask (TYPE_PRECISION (lhs_type
), true,
1827 TYPE_PRECISION (type
));
1828 // Add this to the unsigned LHS range(s).
1829 int_range_max
lim_range (type
, lim
, lim
);
1830 int_range_max lhs_neg
;
1831 range_op_handler (PLUS_EXPR
, type
)->fold_range (lhs_neg
,
1835 // And union this with the entire outer types negative range.
1836 int_range_max
neg (type
,
1837 wi::min_value (TYPE_PRECISION (type
),
1840 neg
.union_ (lhs_neg
);
1841 // And finally, munge the signed and unsigned portions.
1844 // And intersect with any known value passed in the extra operand.
1850 if (TYPE_PRECISION (lhs_type
) == TYPE_PRECISION (type
))
1854 // The cast is not truncating, and the range is restricted to
1855 // the range of the RHS by this assignment.
1857 // Cast the range of the RHS to the type of the LHS.
1858 fold_range (tmp
, lhs_type
, int_range
<1> (type
), int_range
<1> (lhs_type
));
1859 // Intersect this with the LHS range will produce the range,
1860 // which will be cast to the RHS type before returning.
1861 tmp
.intersect (lhs
);
1864 // Cast the calculated range to the type of the RHS.
1865 fold_range (r
, type
, tmp
, int_range
<1> (type
));
1870 class operator_logical_and
: public range_operator
1873 virtual bool fold_range (irange
&r
, tree type
,
1875 const irange
&rh
) const;
1876 virtual bool op1_range (irange
&r
, tree type
,
1878 const irange
&op2
) const;
1879 virtual bool op2_range (irange
&r
, tree type
,
1881 const irange
&op1
) const;
1886 operator_logical_and::fold_range (irange
&r
, tree type
,
1888 const irange
&rh
) const
1890 if (empty_range_varying (r
, type
, lh
, rh
))
1893 // 0 && anything is 0.
1894 if ((wi::eq_p (lh
.lower_bound (), 0) && wi::eq_p (lh
.upper_bound (), 0))
1895 || (wi::eq_p (lh
.lower_bound (), 0) && wi::eq_p (rh
.upper_bound (), 0)))
1896 r
= range_false (type
);
1897 else if (lh
.contains_p (build_zero_cst (lh
.type ()))
1898 || rh
.contains_p (build_zero_cst (rh
.type ())))
1899 // To reach this point, there must be a logical 1 on each side, and
1900 // the only remaining question is whether there is a zero or not.
1901 r
= range_true_and_false (type
);
1903 r
= range_true (type
);
1908 operator_logical_and::op1_range (irange
&r
, tree type
,
1910 const irange
&op2 ATTRIBUTE_UNUSED
) const
1912 switch (get_bool_state (r
, lhs
, type
))
1915 // A true result means both sides of the AND must be true.
1916 r
= range_true (type
);
1919 // Any other result means only one side has to be false, the
1920 // other side can be anything. So we cannott be sure of any
1922 r
= range_true_and_false (type
);
1929 operator_logical_and::op2_range (irange
&r
, tree type
,
1931 const irange
&op1
) const
1933 return operator_logical_and::op1_range (r
, type
, lhs
, op1
);
1937 class operator_bitwise_and
: public range_operator
1940 virtual bool fold_range (irange
&r
, tree type
,
1942 const irange
&rh
) const;
1943 virtual bool op1_range (irange
&r
, tree type
,
1945 const irange
&op2
) const;
1946 virtual bool op2_range (irange
&r
, tree type
,
1948 const irange
&op1
) const;
1949 virtual void wi_fold (irange
&r
, tree type
,
1950 const wide_int
&lh_lb
,
1951 const wide_int
&lh_ub
,
1952 const wide_int
&rh_lb
,
1953 const wide_int
&rh_ub
) const;
1955 void simple_op1_range_solver (irange
&r
, tree type
,
1957 const irange
&op2
) const;
1958 void remove_impossible_ranges (irange
&r
, const irange
&rh
) const;
1962 unsigned_singleton_p (const irange
&op
)
1965 if (op
.singleton_p (&mask
))
1967 wide_int x
= wi::to_wide (mask
);
1968 return wi::ge_p (x
, 0, TYPE_SIGN (op
.type ()));
1973 // Remove any ranges from R that are known to be impossible when an
1974 // range is ANDed with MASK.
1977 operator_bitwise_and::remove_impossible_ranges (irange
&r
,
1978 const irange
&rmask
) const
1980 if (r
.undefined_p () || !unsigned_singleton_p (rmask
))
1983 wide_int mask
= rmask
.lower_bound ();
1984 tree type
= r
.type ();
1985 int prec
= TYPE_PRECISION (type
);
1986 int leading_zeros
= wi::clz (mask
);
1987 int_range_max impossible_ranges
;
1989 /* We know that starting at the most significant bit, any 0 in the
1990 mask means the resulting range cannot contain a 1 in that same
1991 position. This means the following ranges are impossible:
1995 01xx xxxx [0100 0000, 0111 1111]
1996 001x xxxx [0010 0000, 0011 1111]
1997 0000 01xx [0000 0100, 0000 0111]
1998 0000 0001 [0000 0001, 0000 0001]
2000 wide_int one
= wi::one (prec
);
2001 for (int i
= 0; i
< prec
- leading_zeros
- 1; ++i
)
2002 if (wi::bit_and (mask
, wi::lshift (one
, wi::uhwi (i
, prec
))) == 0)
2004 tree lb
= fold_build2 (LSHIFT_EXPR
, type
,
2005 build_one_cst (type
),
2006 build_int_cst (type
, i
));
2007 tree ub_left
= fold_build1 (BIT_NOT_EXPR
, type
,
2008 fold_build2 (LSHIFT_EXPR
, type
,
2009 build_minus_one_cst (type
),
2010 build_int_cst (type
, i
)));
2011 tree ub_right
= fold_build2 (LSHIFT_EXPR
, type
,
2012 build_one_cst (type
),
2013 build_int_cst (type
, i
));
2014 tree ub
= fold_build2 (BIT_IOR_EXPR
, type
, ub_left
, ub_right
);
2015 impossible_ranges
.union_ (int_range
<1> (lb
, ub
));
2017 if (!impossible_ranges
.undefined_p ())
2019 impossible_ranges
.invert ();
2020 r
.intersect (impossible_ranges
);
2025 operator_bitwise_and::fold_range (irange
&r
, tree type
,
2027 const irange
&rh
) const
2029 if (range_operator::fold_range (r
, type
, lh
, rh
))
2031 // FIXME: This is temporarily disabled because, though it
2032 // generates better ranges, it's noticeably slower for evrp.
2033 // remove_impossible_ranges (r, rh);
2040 // Optimize BIT_AND_EXPR and BIT_IOR_EXPR in terms of a mask if
2041 // possible. Basically, see if we can optimize:
2045 // [LB op Z, UB op Z]
2047 // If the optimization was successful, accumulate the range in R and
2051 wi_optimize_and_or (irange
&r
,
2052 enum tree_code code
,
2054 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
2055 const wide_int
&rh_lb
, const wide_int
&rh_ub
)
2057 // Calculate the singleton mask among the ranges, if any.
2058 wide_int lower_bound
, upper_bound
, mask
;
2059 if (wi::eq_p (rh_lb
, rh_ub
))
2062 lower_bound
= lh_lb
;
2063 upper_bound
= lh_ub
;
2065 else if (wi::eq_p (lh_lb
, lh_ub
))
2068 lower_bound
= rh_lb
;
2069 upper_bound
= rh_ub
;
2074 // If Z is a constant which (for op | its bitwise not) has n
2075 // consecutive least significant bits cleared followed by m 1
2076 // consecutive bits set immediately above it and either
2077 // m + n == precision, or (x >> (m + n)) == (y >> (m + n)).
2079 // The least significant n bits of all the values in the range are
2080 // cleared or set, the m bits above it are preserved and any bits
2081 // above these are required to be the same for all values in the
2085 if (code
== BIT_IOR_EXPR
)
2087 if (wi::eq_p (w
, 0))
2088 n
= w
.get_precision ();
2092 w
= ~(w
| wi::mask (n
, false, w
.get_precision ()));
2093 if (wi::eq_p (w
, 0))
2094 m
= w
.get_precision () - n
;
2096 m
= wi::ctz (w
) - n
;
2098 wide_int new_mask
= wi::mask (m
+ n
, true, w
.get_precision ());
2099 if ((new_mask
& lower_bound
) != (new_mask
& upper_bound
))
2102 wide_int res_lb
, res_ub
;
2103 if (code
== BIT_AND_EXPR
)
2105 res_lb
= wi::bit_and (lower_bound
, mask
);
2106 res_ub
= wi::bit_and (upper_bound
, mask
);
2108 else if (code
== BIT_IOR_EXPR
)
2110 res_lb
= wi::bit_or (lower_bound
, mask
);
2111 res_ub
= wi::bit_or (upper_bound
, mask
);
2115 value_range_with_overflow (r
, type
, res_lb
, res_ub
);
2119 // For range [LB, UB] compute two wide_int bit masks.
2121 // In the MAYBE_NONZERO bit mask, if some bit is unset, it means that
2122 // for all numbers in the range the bit is 0, otherwise it might be 0
2125 // In the MUSTBE_NONZERO bit mask, if some bit is set, it means that
2126 // for all numbers in the range the bit is 1, otherwise it might be 0
2130 wi_set_zero_nonzero_bits (tree type
,
2131 const wide_int
&lb
, const wide_int
&ub
,
2132 wide_int
&maybe_nonzero
,
2133 wide_int
&mustbe_nonzero
)
2135 signop sign
= TYPE_SIGN (type
);
2137 if (wi::eq_p (lb
, ub
))
2138 maybe_nonzero
= mustbe_nonzero
= lb
;
2139 else if (wi::ge_p (lb
, 0, sign
) || wi::lt_p (ub
, 0, sign
))
2141 wide_int xor_mask
= lb
^ ub
;
2142 maybe_nonzero
= lb
| ub
;
2143 mustbe_nonzero
= lb
& ub
;
2146 wide_int mask
= wi::mask (wi::floor_log2 (xor_mask
), false,
2147 maybe_nonzero
.get_precision ());
2148 maybe_nonzero
= maybe_nonzero
| mask
;
2149 mustbe_nonzero
= wi::bit_and_not (mustbe_nonzero
, mask
);
2154 maybe_nonzero
= wi::minus_one (lb
.get_precision ());
2155 mustbe_nonzero
= wi::zero (lb
.get_precision ());
2160 operator_bitwise_and::wi_fold (irange
&r
, tree type
,
2161 const wide_int
&lh_lb
,
2162 const wide_int
&lh_ub
,
2163 const wide_int
&rh_lb
,
2164 const wide_int
&rh_ub
) const
2166 if (wi_optimize_and_or (r
, BIT_AND_EXPR
, type
, lh_lb
, lh_ub
, rh_lb
, rh_ub
))
2169 wide_int maybe_nonzero_lh
, mustbe_nonzero_lh
;
2170 wide_int maybe_nonzero_rh
, mustbe_nonzero_rh
;
2171 wi_set_zero_nonzero_bits (type
, lh_lb
, lh_ub
,
2172 maybe_nonzero_lh
, mustbe_nonzero_lh
);
2173 wi_set_zero_nonzero_bits (type
, rh_lb
, rh_ub
,
2174 maybe_nonzero_rh
, mustbe_nonzero_rh
);
2176 wide_int new_lb
= mustbe_nonzero_lh
& mustbe_nonzero_rh
;
2177 wide_int new_ub
= maybe_nonzero_lh
& maybe_nonzero_rh
;
2178 signop sign
= TYPE_SIGN (type
);
2179 unsigned prec
= TYPE_PRECISION (type
);
2180 // If both input ranges contain only negative values, we can
2181 // truncate the result range maximum to the minimum of the
2182 // input range maxima.
2183 if (wi::lt_p (lh_ub
, 0, sign
) && wi::lt_p (rh_ub
, 0, sign
))
2185 new_ub
= wi::min (new_ub
, lh_ub
, sign
);
2186 new_ub
= wi::min (new_ub
, rh_ub
, sign
);
2188 // If either input range contains only non-negative values
2189 // we can truncate the result range maximum to the respective
2190 // maximum of the input range.
2191 if (wi::ge_p (lh_lb
, 0, sign
))
2192 new_ub
= wi::min (new_ub
, lh_ub
, sign
);
2193 if (wi::ge_p (rh_lb
, 0, sign
))
2194 new_ub
= wi::min (new_ub
, rh_ub
, sign
);
2195 // PR68217: In case of signed & sign-bit-CST should
2196 // result in [-INF, 0] instead of [-INF, INF].
2197 if (wi::gt_p (new_lb
, new_ub
, sign
))
2199 wide_int sign_bit
= wi::set_bit_in_zero (prec
- 1, prec
);
2201 && ((wi::eq_p (lh_lb
, lh_ub
)
2202 && !wi::cmps (lh_lb
, sign_bit
))
2203 || (wi::eq_p (rh_lb
, rh_ub
)
2204 && !wi::cmps (rh_lb
, sign_bit
))))
2206 new_lb
= wi::min_value (prec
, sign
);
2207 new_ub
= wi::zero (prec
);
2210 // If the limits got swapped around, return varying.
2211 if (wi::gt_p (new_lb
, new_ub
,sign
))
2212 r
.set_varying (type
);
2214 value_range_with_overflow (r
, type
, new_lb
, new_ub
);
2218 set_nonzero_range_from_mask (irange
&r
, tree type
, const irange
&lhs
)
2220 if (!lhs
.contains_p (build_zero_cst (type
)))
2221 r
= range_nonzero (type
);
2223 r
.set_varying (type
);
2226 // This was shamelessly stolen from register_edge_assert_for_2 and
2227 // adjusted to work with iranges.
2230 operator_bitwise_and::simple_op1_range_solver (irange
&r
, tree type
,
2232 const irange
&op2
) const
2234 if (!op2
.singleton_p ())
2236 set_nonzero_range_from_mask (r
, type
, lhs
);
2239 unsigned int nprec
= TYPE_PRECISION (type
);
2240 wide_int cst2v
= op2
.lower_bound ();
2241 bool cst2n
= wi::neg_p (cst2v
, TYPE_SIGN (type
));
2244 sgnbit
= wi::set_bit_in_zero (nprec
- 1, nprec
);
2246 sgnbit
= wi::zero (nprec
);
2248 // Solve [lhs.lower_bound (), +INF] = x & MASK.
2250 // Minimum unsigned value for >= if (VAL & CST2) == VAL is VAL and
2251 // maximum unsigned value is ~0. For signed comparison, if CST2
2252 // doesn't have the most significant bit set, handle it similarly. If
2253 // CST2 has MSB set, the minimum is the same, and maximum is ~0U/2.
2254 wide_int valv
= lhs
.lower_bound ();
2255 wide_int minv
= valv
& cst2v
, maxv
;
2256 bool we_know_nothing
= false;
2259 // If (VAL & CST2) != VAL, X & CST2 can't be equal to VAL.
2260 minv
= masked_increment (valv
, cst2v
, sgnbit
, nprec
);
2263 // If we can't determine anything on this bound, fall
2264 // through and conservatively solve for the other end point.
2265 we_know_nothing
= true;
2268 maxv
= wi::mask (nprec
- (cst2n
? 1 : 0), false, nprec
);
2269 if (we_know_nothing
)
2270 r
.set_varying (type
);
2272 r
= int_range
<1> (type
, minv
, maxv
);
2274 // Solve [-INF, lhs.upper_bound ()] = x & MASK.
2276 // Minimum unsigned value for <= is 0 and maximum unsigned value is
2277 // VAL | ~CST2 if (VAL & CST2) == VAL. Otherwise, find smallest
2279 // VAL2 > VAL && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
2281 // For signed comparison, if CST2 doesn't have most significant bit
2282 // set, handle it similarly. If CST2 has MSB set, the maximum is
2283 // the same and minimum is INT_MIN.
2284 valv
= lhs
.upper_bound ();
2285 minv
= valv
& cst2v
;
2290 maxv
= masked_increment (valv
, cst2v
, sgnbit
, nprec
);
2293 // If we couldn't determine anything on either bound, return
2295 if (we_know_nothing
)
2303 int_range
<1> upper_bits (type
, minv
, maxv
);
2304 r
.intersect (upper_bits
);
2308 operator_bitwise_and::op1_range (irange
&r
, tree type
,
2310 const irange
&op2
) const
2312 if (types_compatible_p (type
, boolean_type_node
))
2313 return op_logical_and
.op1_range (r
, type
, lhs
, op2
);
2316 for (unsigned i
= 0; i
< lhs
.num_pairs (); ++i
)
2318 int_range_max
chunk (lhs
.type (),
2319 lhs
.lower_bound (i
),
2320 lhs
.upper_bound (i
));
2322 simple_op1_range_solver (res
, type
, chunk
, op2
);
2325 if (r
.undefined_p ())
2326 set_nonzero_range_from_mask (r
, type
, lhs
);
2331 operator_bitwise_and::op2_range (irange
&r
, tree type
,
2333 const irange
&op1
) const
2335 return operator_bitwise_and::op1_range (r
, type
, lhs
, op1
);
2339 class operator_logical_or
: public range_operator
2342 virtual bool fold_range (irange
&r
, tree type
,
2344 const irange
&rh
) const;
2345 virtual bool op1_range (irange
&r
, tree type
,
2347 const irange
&op2
) const;
2348 virtual bool op2_range (irange
&r
, tree type
,
2350 const irange
&op1
) const;
2354 operator_logical_or::fold_range (irange
&r
, tree type ATTRIBUTE_UNUSED
,
2356 const irange
&rh
) const
2358 if (empty_range_varying (r
, type
, lh
, rh
))
2367 operator_logical_or::op1_range (irange
&r
, tree type
,
2369 const irange
&op2 ATTRIBUTE_UNUSED
) const
2371 switch (get_bool_state (r
, lhs
, type
))
2374 // A false result means both sides of the OR must be false.
2375 r
= range_false (type
);
2378 // Any other result means only one side has to be true, the
2379 // other side can be anything. so we can't be sure of any result
2381 r
= range_true_and_false (type
);
2388 operator_logical_or::op2_range (irange
&r
, tree type
,
2390 const irange
&op1
) const
2392 return operator_logical_or::op1_range (r
, type
, lhs
, op1
);
2396 class operator_bitwise_or
: public range_operator
2399 virtual bool op1_range (irange
&r
, tree type
,
2401 const irange
&op2
) const;
2402 virtual bool op2_range (irange
&r
, tree type
,
2404 const irange
&op1
) const;
2405 virtual void wi_fold (irange
&r
, tree type
,
2406 const wide_int
&lh_lb
,
2407 const wide_int
&lh_ub
,
2408 const wide_int
&rh_lb
,
2409 const wide_int
&rh_ub
) const;
2413 operator_bitwise_or::wi_fold (irange
&r
, tree type
,
2414 const wide_int
&lh_lb
,
2415 const wide_int
&lh_ub
,
2416 const wide_int
&rh_lb
,
2417 const wide_int
&rh_ub
) const
2419 if (wi_optimize_and_or (r
, BIT_IOR_EXPR
, type
, lh_lb
, lh_ub
, rh_lb
, rh_ub
))
2422 wide_int maybe_nonzero_lh
, mustbe_nonzero_lh
;
2423 wide_int maybe_nonzero_rh
, mustbe_nonzero_rh
;
2424 wi_set_zero_nonzero_bits (type
, lh_lb
, lh_ub
,
2425 maybe_nonzero_lh
, mustbe_nonzero_lh
);
2426 wi_set_zero_nonzero_bits (type
, rh_lb
, rh_ub
,
2427 maybe_nonzero_rh
, mustbe_nonzero_rh
);
2428 wide_int new_lb
= mustbe_nonzero_lh
| mustbe_nonzero_rh
;
2429 wide_int new_ub
= maybe_nonzero_lh
| maybe_nonzero_rh
;
2430 signop sign
= TYPE_SIGN (type
);
2431 // If the input ranges contain only positive values we can
2432 // truncate the minimum of the result range to the maximum
2433 // of the input range minima.
2434 if (wi::ge_p (lh_lb
, 0, sign
)
2435 && wi::ge_p (rh_lb
, 0, sign
))
2437 new_lb
= wi::max (new_lb
, lh_lb
, sign
);
2438 new_lb
= wi::max (new_lb
, rh_lb
, sign
);
2440 // If either input range contains only negative values
2441 // we can truncate the minimum of the result range to the
2442 // respective minimum range.
2443 if (wi::lt_p (lh_ub
, 0, sign
))
2444 new_lb
= wi::max (new_lb
, lh_lb
, sign
);
2445 if (wi::lt_p (rh_ub
, 0, sign
))
2446 new_lb
= wi::max (new_lb
, rh_lb
, sign
);
2447 // If the limits got swapped around, return varying.
2448 if (wi::gt_p (new_lb
, new_ub
,sign
))
2449 r
.set_varying (type
);
2451 value_range_with_overflow (r
, type
, new_lb
, new_ub
);
2455 operator_bitwise_or::op1_range (irange
&r
, tree type
,
2457 const irange
&op2
) const
2459 // If this is really a logical wi_fold, call that.
2460 if (types_compatible_p (type
, boolean_type_node
))
2461 return op_logical_or
.op1_range (r
, type
, lhs
, op2
);
2465 tree zero
= build_zero_cst (type
);
2466 r
= int_range
<1> (zero
, zero
);
2469 r
.set_varying (type
);
2474 operator_bitwise_or::op2_range (irange
&r
, tree type
,
2476 const irange
&op1
) const
2478 return operator_bitwise_or::op1_range (r
, type
, lhs
, op1
);
2482 class operator_bitwise_xor
: public range_operator
2485 virtual void wi_fold (irange
&r
, tree type
,
2486 const wide_int
&lh_lb
,
2487 const wide_int
&lh_ub
,
2488 const wide_int
&rh_lb
,
2489 const wide_int
&rh_ub
) const;
2490 virtual bool op1_range (irange
&r
, tree type
,
2492 const irange
&op2
) const;
2493 virtual bool op2_range (irange
&r
, tree type
,
2495 const irange
&op1
) const;
2499 operator_bitwise_xor::wi_fold (irange
&r
, tree type
,
2500 const wide_int
&lh_lb
,
2501 const wide_int
&lh_ub
,
2502 const wide_int
&rh_lb
,
2503 const wide_int
&rh_ub
) const
2505 signop sign
= TYPE_SIGN (type
);
2506 wide_int maybe_nonzero_lh
, mustbe_nonzero_lh
;
2507 wide_int maybe_nonzero_rh
, mustbe_nonzero_rh
;
2508 wi_set_zero_nonzero_bits (type
, lh_lb
, lh_ub
,
2509 maybe_nonzero_lh
, mustbe_nonzero_lh
);
2510 wi_set_zero_nonzero_bits (type
, rh_lb
, rh_ub
,
2511 maybe_nonzero_rh
, mustbe_nonzero_rh
);
2513 wide_int result_zero_bits
= ((mustbe_nonzero_lh
& mustbe_nonzero_rh
)
2514 | ~(maybe_nonzero_lh
| maybe_nonzero_rh
));
2515 wide_int result_one_bits
2516 = (wi::bit_and_not (mustbe_nonzero_lh
, maybe_nonzero_rh
)
2517 | wi::bit_and_not (mustbe_nonzero_rh
, maybe_nonzero_lh
));
2518 wide_int new_ub
= ~result_zero_bits
;
2519 wide_int new_lb
= result_one_bits
;
2521 // If the range has all positive or all negative values, the result
2522 // is better than VARYING.
2523 if (wi::lt_p (new_lb
, 0, sign
) || wi::ge_p (new_ub
, 0, sign
))
2524 value_range_with_overflow (r
, type
, new_lb
, new_ub
);
2526 r
.set_varying (type
);
2530 operator_bitwise_xor::op1_range (irange
&r
, tree type
,
2532 const irange
&op2
) const
2534 if (lhs
.undefined_p () || lhs
.varying_p ())
2539 if (types_compatible_p (type
, boolean_type_node
))
2541 switch (get_bool_state (r
, lhs
, type
))
2544 if (op2
.varying_p ())
2545 r
.set_varying (type
);
2546 else if (op2
.zero_p ())
2547 r
= range_true (type
);
2549 r
= range_false (type
);
2559 r
.set_varying (type
);
2564 operator_bitwise_xor::op2_range (irange
&r
, tree type
,
2566 const irange
&op1
) const
2568 return operator_bitwise_xor::op1_range (r
, type
, lhs
, op1
);
2571 class operator_trunc_mod
: public range_operator
2574 virtual void wi_fold (irange
&r
, tree type
,
2575 const wide_int
&lh_lb
,
2576 const wide_int
&lh_ub
,
2577 const wide_int
&rh_lb
,
2578 const wide_int
&rh_ub
) const;
2582 operator_trunc_mod::wi_fold (irange
&r
, tree type
,
2583 const wide_int
&lh_lb
,
2584 const wide_int
&lh_ub
,
2585 const wide_int
&rh_lb
,
2586 const wide_int
&rh_ub
) const
2588 wide_int new_lb
, new_ub
, tmp
;
2589 signop sign
= TYPE_SIGN (type
);
2590 unsigned prec
= TYPE_PRECISION (type
);
2592 // Mod 0 is undefined. Return undefined.
2593 if (wi_zero_p (type
, rh_lb
, rh_ub
))
2599 // ABS (A % B) < ABS (B) and either 0 <= A % B <= A or A <= A % B <= 0.
2604 new_ub
= wi::smax (new_ub
, tmp
);
2607 if (sign
== UNSIGNED
)
2608 new_lb
= wi::zero (prec
);
2613 if (wi::gts_p (tmp
, 0))
2614 tmp
= wi::zero (prec
);
2615 new_lb
= wi::smax (new_lb
, tmp
);
2618 if (sign
== SIGNED
&& wi::neg_p (tmp
))
2619 tmp
= wi::zero (prec
);
2620 new_ub
= wi::min (new_ub
, tmp
, sign
);
2622 value_range_with_overflow (r
, type
, new_lb
, new_ub
);
2626 class operator_logical_not
: public range_operator
2629 virtual bool fold_range (irange
&r
, tree type
,
2631 const irange
&rh
) const;
2632 virtual bool op1_range (irange
&r
, tree type
,
2634 const irange
&op2
) const;
2637 // Folding a logical NOT, oddly enough, involves doing nothing on the
2638 // forward pass through. During the initial walk backwards, the
2639 // logical NOT reversed the desired outcome on the way back, so on the
2640 // way forward all we do is pass the range forward.
2645 // to determine the TRUE branch, walking backward
2646 // if (b_3) if ([1,1])
2647 // b_3 = !b_2 [1,1] = ![0,0]
2648 // b_2 = x_1 < 20 [0,0] = x_1 < 20, false, so x_1 == [20, 255]
2649 // which is the result we are looking for.. so.. pass it through.
2652 operator_logical_not::fold_range (irange
&r
, tree type
,
2654 const irange
&rh ATTRIBUTE_UNUSED
) const
2656 if (empty_range_varying (r
, type
, lh
, rh
))
2659 if (lh
.varying_p () || lh
.undefined_p ())
2666 gcc_checking_assert (lh
.type() == type
);
2671 operator_logical_not::op1_range (irange
&r
,
2672 tree type ATTRIBUTE_UNUSED
,
2674 const irange
&op2 ATTRIBUTE_UNUSED
) const
2677 if (!lhs
.varying_p () && !lhs
.undefined_p ())
2683 class operator_bitwise_not
: public range_operator
2686 virtual bool fold_range (irange
&r
, tree type
,
2688 const irange
&rh
) const;
2689 virtual bool op1_range (irange
&r
, tree type
,
2691 const irange
&op2
) const;
2695 operator_bitwise_not::fold_range (irange
&r
, tree type
,
2697 const irange
&rh
) const
2699 if (empty_range_varying (r
, type
, lh
, rh
))
2702 // ~X is simply -1 - X.
2703 int_range
<1> minusone (type
, wi::minus_one (TYPE_PRECISION (type
)),
2704 wi::minus_one (TYPE_PRECISION (type
)));
2705 return range_op_handler (MINUS_EXPR
, type
)->fold_range (r
, type
, minusone
,
2710 operator_bitwise_not::op1_range (irange
&r
, tree type
,
2712 const irange
&op2
) const
2714 // ~X is -1 - X and since bitwise NOT is involutary...do it again.
2715 return fold_range (r
, type
, lhs
, op2
);
2719 class operator_cst
: public range_operator
2722 virtual bool fold_range (irange
&r
, tree type
,
2724 const irange
&op2
) const;
2728 operator_cst::fold_range (irange
&r
, tree type ATTRIBUTE_UNUSED
,
2730 const irange
&rh ATTRIBUTE_UNUSED
) const
2737 class operator_identity
: public range_operator
2740 virtual bool fold_range (irange
&r
, tree type
,
2742 const irange
&op2
) const;
2743 virtual bool op1_range (irange
&r
, tree type
,
2745 const irange
&op2
) const;
2749 operator_identity::fold_range (irange
&r
, tree type ATTRIBUTE_UNUSED
,
2751 const irange
&rh ATTRIBUTE_UNUSED
) const
2758 operator_identity::op1_range (irange
&r
, tree type ATTRIBUTE_UNUSED
,
2760 const irange
&op2 ATTRIBUTE_UNUSED
) const
2767 class operator_unknown
: public range_operator
2770 virtual bool fold_range (irange
&r
, tree type
,
2772 const irange
&op2
) const;
2776 operator_unknown::fold_range (irange
&r
, tree type
,
2777 const irange
&lh ATTRIBUTE_UNUSED
,
2778 const irange
&rh ATTRIBUTE_UNUSED
) const
2780 r
.set_varying (type
);
2785 class operator_abs
: public range_operator
2788 virtual void wi_fold (irange
&r
, tree type
,
2789 const wide_int
&lh_lb
,
2790 const wide_int
&lh_ub
,
2791 const wide_int
&rh_lb
,
2792 const wide_int
&rh_ub
) const;
2793 virtual bool op1_range (irange
&r
, tree type
,
2795 const irange
&op2
) const;
2799 operator_abs::wi_fold (irange
&r
, tree type
,
2800 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
2801 const wide_int
&rh_lb ATTRIBUTE_UNUSED
,
2802 const wide_int
&rh_ub ATTRIBUTE_UNUSED
) const
2805 signop sign
= TYPE_SIGN (type
);
2806 unsigned prec
= TYPE_PRECISION (type
);
2808 // Pass through LH for the easy cases.
2809 if (sign
== UNSIGNED
|| wi::ge_p (lh_lb
, 0, sign
))
2811 r
= int_range
<1> (type
, lh_lb
, lh_ub
);
2815 // -TYPE_MIN_VALUE = TYPE_MIN_VALUE with flag_wrapv so we can't get
2817 wide_int min_value
= wi::min_value (prec
, sign
);
2818 wide_int max_value
= wi::max_value (prec
, sign
);
2819 if (!TYPE_OVERFLOW_UNDEFINED (type
) && wi::eq_p (lh_lb
, min_value
))
2821 r
.set_varying (type
);
2825 // ABS_EXPR may flip the range around, if the original range
2826 // included negative values.
2827 if (wi::eq_p (lh_lb
, min_value
))
2830 min
= wi::abs (lh_lb
);
2831 if (wi::eq_p (lh_ub
, min_value
))
2834 max
= wi::abs (lh_ub
);
2836 // If the range contains zero then we know that the minimum value in the
2837 // range will be zero.
2838 if (wi::le_p (lh_lb
, 0, sign
) && wi::ge_p (lh_ub
, 0, sign
))
2840 if (wi::gt_p (min
, max
, sign
))
2842 min
= wi::zero (prec
);
2846 // If the range was reversed, swap MIN and MAX.
2847 if (wi::gt_p (min
, max
, sign
))
2848 std::swap (min
, max
);
2851 // If the new range has its limits swapped around (MIN > MAX), then
2852 // the operation caused one of them to wrap around. The only thing
2853 // we know is that the result is positive.
2854 if (wi::gt_p (min
, max
, sign
))
2856 min
= wi::zero (prec
);
2859 r
= int_range
<1> (type
, min
, max
);
2863 operator_abs::op1_range (irange
&r
, tree type
,
2865 const irange
&op2
) const
2867 if (empty_range_varying (r
, type
, lhs
, op2
))
2869 if (TYPE_UNSIGNED (type
))
2874 // Start with the positives because negatives are an impossible result.
2875 int_range_max positives
= range_positives (type
);
2876 positives
.intersect (lhs
);
2878 // Then add the negative of each pair:
2879 // ABS(op1) = [5,20] would yield op1 => [-20,-5][5,20].
2880 for (unsigned i
= 0; i
< positives
.num_pairs (); ++i
)
2881 r
.union_ (int_range
<1> (type
,
2882 -positives
.upper_bound (i
),
2883 -positives
.lower_bound (i
)));
2888 class operator_absu
: public range_operator
2891 virtual void wi_fold (irange
&r
, tree type
,
2892 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
2893 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const;
2897 operator_absu::wi_fold (irange
&r
, tree type
,
2898 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
2899 const wide_int
&rh_lb ATTRIBUTE_UNUSED
,
2900 const wide_int
&rh_ub ATTRIBUTE_UNUSED
) const
2902 wide_int new_lb
, new_ub
;
2904 // Pass through VR0 the easy cases.
2905 if (wi::ges_p (lh_lb
, 0))
2912 new_lb
= wi::abs (lh_lb
);
2913 new_ub
= wi::abs (lh_ub
);
2915 // If the range contains zero then we know that the minimum
2916 // value in the range will be zero.
2917 if (wi::ges_p (lh_ub
, 0))
2919 if (wi::gtu_p (new_lb
, new_ub
))
2921 new_lb
= wi::zero (TYPE_PRECISION (type
));
2924 std::swap (new_lb
, new_ub
);
2927 gcc_checking_assert (TYPE_UNSIGNED (type
));
2928 r
= int_range
<1> (type
, new_lb
, new_ub
);
2932 class operator_negate
: public range_operator
2935 virtual bool fold_range (irange
&r
, tree type
,
2937 const irange
&op2
) const;
2938 virtual bool op1_range (irange
&r
, tree type
,
2940 const irange
&op2
) const;
2944 operator_negate::fold_range (irange
&r
, tree type
,
2946 const irange
&rh
) const
2948 if (empty_range_varying (r
, type
, lh
, rh
))
2950 // -X is simply 0 - X.
2951 return range_op_handler (MINUS_EXPR
, type
)->fold_range (r
, type
,
2957 operator_negate::op1_range (irange
&r
, tree type
,
2959 const irange
&op2
) const
2961 // NEGATE is involutory.
2962 return fold_range (r
, type
, lhs
, op2
);
2966 class operator_addr_expr
: public range_operator
2969 virtual bool fold_range (irange
&r
, tree type
,
2971 const irange
&op2
) const;
2972 virtual bool op1_range (irange
&r
, tree type
,
2974 const irange
&op2
) const;
2978 operator_addr_expr::fold_range (irange
&r
, tree type
,
2980 const irange
&rh
) const
2982 if (empty_range_varying (r
, type
, lh
, rh
))
2985 // Return a non-null pointer of the LHS type (passed in op2).
2987 r
= range_zero (type
);
2988 else if (!lh
.contains_p (build_zero_cst (lh
.type ())))
2989 r
= range_nonzero (type
);
2991 r
.set_varying (type
);
2996 operator_addr_expr::op1_range (irange
&r
, tree type
,
2998 const irange
&op2
) const
3000 return operator_addr_expr::fold_range (r
, type
, lhs
, op2
);
3004 class pointer_plus_operator
: public range_operator
3007 virtual void wi_fold (irange
&r
, tree type
,
3008 const wide_int
&lh_lb
,
3009 const wide_int
&lh_ub
,
3010 const wide_int
&rh_lb
,
3011 const wide_int
&rh_ub
) const;
3015 pointer_plus_operator::wi_fold (irange
&r
, tree type
,
3016 const wide_int
&lh_lb
,
3017 const wide_int
&lh_ub
,
3018 const wide_int
&rh_lb
,
3019 const wide_int
&rh_ub
) const
3021 // For pointer types, we are really only interested in asserting
3022 // whether the expression evaluates to non-NULL.
3024 // With -fno-delete-null-pointer-checks we need to be more
3025 // conservative. As some object might reside at address 0,
3026 // then some offset could be added to it and the same offset
3027 // subtracted again and the result would be NULL.
3029 // static int a[12]; where &a[0] is NULL and
3032 // ptr will be NULL here, even when there is POINTER_PLUS_EXPR
3033 // where the first range doesn't include zero and the second one
3034 // doesn't either. As the second operand is sizetype (unsigned),
3035 // consider all ranges where the MSB could be set as possible
3036 // subtractions where the result might be NULL.
3037 if ((!wi_includes_zero_p (type
, lh_lb
, lh_ub
)
3038 || !wi_includes_zero_p (type
, rh_lb
, rh_ub
))
3039 && !TYPE_OVERFLOW_WRAPS (type
)
3040 && (flag_delete_null_pointer_checks
3041 || !wi::sign_mask (rh_ub
)))
3042 r
= range_nonzero (type
);
3043 else if (lh_lb
== lh_ub
&& lh_lb
== 0
3044 && rh_lb
== rh_ub
&& rh_lb
== 0)
3045 r
= range_zero (type
);
3047 r
.set_varying (type
);
3051 class pointer_min_max_operator
: public range_operator
3054 virtual void wi_fold (irange
& r
, tree type
,
3055 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
3056 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const;
3060 pointer_min_max_operator::wi_fold (irange
&r
, tree type
,
3061 const wide_int
&lh_lb
,
3062 const wide_int
&lh_ub
,
3063 const wide_int
&rh_lb
,
3064 const wide_int
&rh_ub
) const
3066 // For MIN/MAX expressions with pointers, we only care about
3067 // nullness. If both are non null, then the result is nonnull.
3068 // If both are null, then the result is null. Otherwise they
3070 if (!wi_includes_zero_p (type
, lh_lb
, lh_ub
)
3071 && !wi_includes_zero_p (type
, rh_lb
, rh_ub
))
3072 r
= range_nonzero (type
);
3073 else if (wi_zero_p (type
, lh_lb
, lh_ub
) && wi_zero_p (type
, rh_lb
, rh_ub
))
3074 r
= range_zero (type
);
3076 r
.set_varying (type
);
3080 class pointer_and_operator
: public range_operator
3083 virtual void wi_fold (irange
&r
, tree type
,
3084 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
3085 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const;
3089 pointer_and_operator::wi_fold (irange
&r
, tree type
,
3090 const wide_int
&lh_lb
,
3091 const wide_int
&lh_ub
,
3092 const wide_int
&rh_lb ATTRIBUTE_UNUSED
,
3093 const wide_int
&rh_ub ATTRIBUTE_UNUSED
) const
3095 // For pointer types, we are really only interested in asserting
3096 // whether the expression evaluates to non-NULL.
3097 if (wi_zero_p (type
, lh_lb
, lh_ub
) || wi_zero_p (type
, lh_lb
, lh_ub
))
3098 r
= range_zero (type
);
3100 r
.set_varying (type
);
3104 class pointer_or_operator
: public range_operator
3107 virtual bool op1_range (irange
&r
, tree type
,
3109 const irange
&op2
) const;
3110 virtual bool op2_range (irange
&r
, tree type
,
3112 const irange
&op1
) const;
3113 virtual void wi_fold (irange
&r
, tree type
,
3114 const wide_int
&lh_lb
, const wide_int
&lh_ub
,
3115 const wide_int
&rh_lb
, const wide_int
&rh_ub
) const;
3119 pointer_or_operator::op1_range (irange
&r
, tree type
,
3121 const irange
&op2 ATTRIBUTE_UNUSED
) const
3125 tree zero
= build_zero_cst (type
);
3126 r
= int_range
<1> (zero
, zero
);
3129 r
.set_varying (type
);
3134 pointer_or_operator::op2_range (irange
&r
, tree type
,
3136 const irange
&op1
) const
3138 return pointer_or_operator::op1_range (r
, type
, lhs
, op1
);
3142 pointer_or_operator::wi_fold (irange
&r
, tree type
,
3143 const wide_int
&lh_lb
,
3144 const wide_int
&lh_ub
,
3145 const wide_int
&rh_lb
,
3146 const wide_int
&rh_ub
) const
3148 // For pointer types, we are really only interested in asserting
3149 // whether the expression evaluates to non-NULL.
3150 if (!wi_includes_zero_p (type
, lh_lb
, lh_ub
)
3151 && !wi_includes_zero_p (type
, rh_lb
, rh_ub
))
3152 r
= range_nonzero (type
);
3153 else if (wi_zero_p (type
, lh_lb
, lh_ub
) && wi_zero_p (type
, rh_lb
, rh_ub
))
3154 r
= range_zero (type
);
3156 r
.set_varying (type
);
3159 // This implements the range operator tables as local objects in this file.
3161 class range_op_table
3164 inline range_operator
*operator[] (enum tree_code code
);
3166 void set (enum tree_code code
, range_operator
&op
);
3168 range_operator
*m_range_tree
[MAX_TREE_CODES
];
3171 // Return a pointer to the range_operator instance, if there is one
3172 // associated with tree_code CODE.
3175 range_op_table::operator[] (enum tree_code code
)
3177 gcc_checking_assert (code
> 0 && code
< MAX_TREE_CODES
);
3178 return m_range_tree
[code
];
3181 // Add OP to the handler table for CODE.
3184 range_op_table::set (enum tree_code code
, range_operator
&op
)
3186 gcc_checking_assert (m_range_tree
[code
] == NULL
);
3187 m_range_tree
[code
] = &op
;
3190 // Instantiate a range op table for integral operations.
3192 class integral_table
: public range_op_table
3196 } integral_tree_table
;
3198 integral_table::integral_table ()
3200 set (EQ_EXPR
, op_equal
);
3201 set (NE_EXPR
, op_not_equal
);
3202 set (LT_EXPR
, op_lt
);
3203 set (LE_EXPR
, op_le
);
3204 set (GT_EXPR
, op_gt
);
3205 set (GE_EXPR
, op_ge
);
3206 set (PLUS_EXPR
, op_plus
);
3207 set (MINUS_EXPR
, op_minus
);
3208 set (MIN_EXPR
, op_min
);
3209 set (MAX_EXPR
, op_max
);
3210 set (MULT_EXPR
, op_mult
);
3211 set (TRUNC_DIV_EXPR
, op_trunc_div
);
3212 set (FLOOR_DIV_EXPR
, op_floor_div
);
3213 set (ROUND_DIV_EXPR
, op_round_div
);
3214 set (CEIL_DIV_EXPR
, op_ceil_div
);
3215 set (EXACT_DIV_EXPR
, op_exact_div
);
3216 set (LSHIFT_EXPR
, op_lshift
);
3217 set (RSHIFT_EXPR
, op_rshift
);
3218 set (NOP_EXPR
, op_convert
);
3219 set (CONVERT_EXPR
, op_convert
);
3220 set (TRUTH_AND_EXPR
, op_logical_and
);
3221 set (BIT_AND_EXPR
, op_bitwise_and
);
3222 set (TRUTH_OR_EXPR
, op_logical_or
);
3223 set (BIT_IOR_EXPR
, op_bitwise_or
);
3224 set (BIT_XOR_EXPR
, op_bitwise_xor
);
3225 set (TRUNC_MOD_EXPR
, op_trunc_mod
);
3226 set (TRUTH_NOT_EXPR
, op_logical_not
);
3227 set (BIT_NOT_EXPR
, op_bitwise_not
);
3228 set (INTEGER_CST
, op_integer_cst
);
3229 set (SSA_NAME
, op_identity
);
3230 set (PAREN_EXPR
, op_identity
);
3231 set (OBJ_TYPE_REF
, op_identity
);
3232 set (IMAGPART_EXPR
, op_unknown
);
3233 set (POINTER_DIFF_EXPR
, op_unknown
);
3234 set (ABS_EXPR
, op_abs
);
3235 set (ABSU_EXPR
, op_absu
);
3236 set (NEGATE_EXPR
, op_negate
);
3237 set (ADDR_EXPR
, op_addr
);
3240 // Instantiate a range op table for pointer operations.
3242 class pointer_table
: public range_op_table
3246 } pointer_tree_table
;
3248 pointer_table::pointer_table ()
3250 set (BIT_AND_EXPR
, op_pointer_and
);
3251 set (BIT_IOR_EXPR
, op_pointer_or
);
3252 set (MIN_EXPR
, op_ptr_min_max
);
3253 set (MAX_EXPR
, op_ptr_min_max
);
3254 set (POINTER_PLUS_EXPR
, op_pointer_plus
);
3256 set (EQ_EXPR
, op_equal
);
3257 set (NE_EXPR
, op_not_equal
);
3258 set (LT_EXPR
, op_lt
);
3259 set (LE_EXPR
, op_le
);
3260 set (GT_EXPR
, op_gt
);
3261 set (GE_EXPR
, op_ge
);
3262 set (SSA_NAME
, op_identity
);
3263 set (ADDR_EXPR
, op_addr
);
3264 set (NOP_EXPR
, op_convert
);
3265 set (CONVERT_EXPR
, op_convert
);
3267 set (BIT_NOT_EXPR
, op_bitwise_not
);
3268 set (BIT_XOR_EXPR
, op_bitwise_xor
);
3271 // The tables are hidden and accessed via a simple extern function.
3274 range_op_handler (enum tree_code code
, tree type
)
3276 // First check if there is apointer specialization.
3277 if (POINTER_TYPE_P (type
))
3278 return pointer_tree_table
[code
];
3279 return integral_tree_table
[code
];
3282 // Cast the range in R to TYPE.
3285 range_cast (irange
&r
, tree type
)
3287 int_range_max tmp
= r
;
3288 range_operator
*op
= range_op_handler (CONVERT_EXPR
, type
);
3289 // Call op_convert, if it fails, the result is varying.
3290 if (!op
->fold_range (r
, type
, tmp
, int_range
<1> (type
)))
3291 r
.set_varying (type
);
3295 #include "selftest.h"
3296 #include "stor-layout.h"
3300 #define INT(N) build_int_cst (integer_type_node, (N))
3301 #define UINT(N) build_int_cstu (unsigned_type_node, (N))
3302 #define INT16(N) build_int_cst (short_integer_type_node, (N))
3303 #define UINT16(N) build_int_cstu (short_unsigned_type_node, (N))
3304 #define INT64(N) build_int_cstu (long_long_integer_type_node, (N))
3305 #define UINT64(N) build_int_cstu (long_long_unsigned_type_node, (N))
3306 #define UINT128(N) build_int_cstu (u128_type, (N))
3307 #define UCHAR(N) build_int_cstu (unsigned_char_type_node, (N))
3308 #define SCHAR(N) build_int_cst (signed_char_type_node, (N))
3311 build_range3 (int a
, int b
, int c
, int d
, int e
, int f
)
3313 int_range
<3> i1 (INT (a
), INT (b
));
3314 int_range
<3> i2 (INT (c
), INT (d
));
3315 int_range
<3> i3 (INT (e
), INT (f
));
3324 typedef int_range
<3> int_range3
;
3325 int_range3 r0
, r1
, r2
;
3326 int_range3 i1
, i2
, i3
;
3328 // ([10,20] U [5,8]) U [1,3] ==> [1,3][5,8][10,20].
3329 r0
= int_range3 (INT (10), INT (20));
3330 r1
= int_range3 (INT (5), INT (8));
3332 r1
= int_range3 (INT (1), INT (3));
3334 ASSERT_TRUE (r0
== build_range3 (1, 3, 5, 8, 10, 20));
3336 // [1,3][5,8][10,20] U [-5,0] => [-5,3][5,8][10,20].
3337 r1
= int_range3 (INT (-5), INT (0));
3339 ASSERT_TRUE (r0
== build_range3 (-5, 3, 5, 8, 10, 20));
3341 // [10,20][30,40] U [50,60] ==> [10,20][30,40][50,60].
3342 r1
= int_range3 (INT (50), INT (60));
3343 r0
= int_range3 (INT (10), INT (20));
3344 r0
.union_ (int_range3 (INT (30), INT (40)));
3346 ASSERT_TRUE (r0
== build_range3 (10, 20, 30, 40, 50, 60));
3347 // [10,20][30,40][50,60] U [70, 80] ==> [10,20][30,40][50,60][70,80].
3348 r1
= int_range3 (INT (70), INT (80));
3351 r2
= build_range3 (10, 20, 30, 40, 50, 60);
3352 r2
.union_ (int_range3 (INT (70), INT (80)));
3353 ASSERT_TRUE (r0
== r2
);
3355 // [10,20][30,40][50,60] U [6,35] => [6,40][50,60].
3356 r0
= build_range3 (10, 20, 30, 40, 50, 60);
3357 r1
= int_range3 (INT (6), INT (35));
3359 r1
= int_range3 (INT (6), INT (40));
3360 r1
.union_ (int_range3 (INT (50), INT (60)));
3361 ASSERT_TRUE (r0
== r1
);
3363 // [10,20][30,40][50,60] U [6,60] => [6,60].
3364 r0
= build_range3 (10, 20, 30, 40, 50, 60);
3365 r1
= int_range3 (INT (6), INT (60));
3367 ASSERT_TRUE (r0
== int_range3 (INT (6), INT (60)));
3369 // [10,20][30,40][50,60] U [6,70] => [6,70].
3370 r0
= build_range3 (10, 20, 30, 40, 50, 60);
3371 r1
= int_range3 (INT (6), INT (70));
3373 ASSERT_TRUE (r0
== int_range3 (INT (6), INT (70)));
3375 // [10,20][30,40][50,60] U [35,70] => [10,20][30,70].
3376 r0
= build_range3 (10, 20, 30, 40, 50, 60);
3377 r1
= int_range3 (INT (35), INT (70));
3379 r1
= int_range3 (INT (10), INT (20));
3380 r1
.union_ (int_range3 (INT (30), INT (70)));
3381 ASSERT_TRUE (r0
== r1
);
3383 // [10,20][30,40][50,60] U [15,35] => [10,40][50,60].
3384 r0
= build_range3 (10, 20, 30, 40, 50, 60);
3385 r1
= int_range3 (INT (15), INT (35));
3387 r1
= int_range3 (INT (10), INT (40));
3388 r1
.union_ (int_range3 (INT (50), INT (60)));
3389 ASSERT_TRUE (r0
== r1
);
3391 // [10,20][30,40][50,60] U [35,35] => [10,20][30,40][50,60].
3392 r0
= build_range3 (10, 20, 30, 40, 50, 60);
3393 r1
= int_range3 (INT (35), INT (35));
3395 ASSERT_TRUE (r0
== build_range3 (10, 20, 30, 40, 50, 60));
3399 int_range_max_tests ()
3402 unsigned int nrange
;
3404 // Build a huge multi-range range.
3405 for (nrange
= 0; nrange
< 50; ++nrange
)
3407 int_range
<1> tmp (INT (nrange
*10), INT (nrange
*10 + 5));
3410 ASSERT_TRUE (big
.num_pairs () == nrange
);
3412 // Verify that we can copy it without loosing precision.
3413 int_range_max
copy (big
);
3414 ASSERT_TRUE (copy
.num_pairs () == nrange
);
3416 // Inverting it should produce one more sub-range.
3418 ASSERT_TRUE (big
.num_pairs () == nrange
+ 1);
3420 int_range
<1> tmp (INT (5), INT (37));
3421 big
.intersect (tmp
);
3422 ASSERT_TRUE (big
.num_pairs () == 4);
3424 // Test that [10,10][20,20] does NOT contain 15.
3426 int_range_max
i1 (build_int_cst (integer_type_node
, 10),
3427 build_int_cst (integer_type_node
, 10));
3428 int_range_max
i2 (build_int_cst (integer_type_node
, 20),
3429 build_int_cst (integer_type_node
, 20));
3431 ASSERT_FALSE (i1
.contains_p (build_int_cst (integer_type_node
, 15)));
3437 multi_precision_range_tests ()
3439 // Test truncating copy to int_range<1>.
3440 int_range
<3> big
= build_range3 (10, 20, 30, 40, 50, 60);
3441 int_range
<1> small
= big
;
3442 ASSERT_TRUE (small
== int_range
<1> (INT (10), INT (60)));
3444 // Test truncating copy to int_range<2>.
3445 int_range
<2> medium
= big
;
3446 ASSERT_TRUE (!medium
.undefined_p ());
3448 // Test that a truncating copy of [MIN,20][22,40][80,MAX]
3449 // ends up as a conservative anti-range of ~[21,21].
3450 big
= int_range
<3> (vrp_val_min (integer_type_node
), INT (20));
3451 big
.union_ (int_range
<1> (INT (22), INT (40)));
3452 big
.union_ (int_range
<1> (INT (80), vrp_val_max (integer_type_node
)));
3454 ASSERT_TRUE (small
== int_range
<1> (INT (21), INT (21), VR_ANTI_RANGE
));
3456 // Copying a legacy symbolic to an int_range should normalize the
3457 // symbolic at copy time.
3459 tree ssa
= make_ssa_name (integer_type_node
);
3460 value_range
legacy_range (ssa
, INT (25));
3461 int_range
<2> copy
= legacy_range
;
3462 ASSERT_TRUE (copy
== int_range
<2> (vrp_val_min (integer_type_node
),
3465 // Test that copying ~[abc_23, abc_23] to a multi-range yields varying.
3466 legacy_range
= value_range (ssa
, ssa
, VR_ANTI_RANGE
);
3467 copy
= legacy_range
;
3468 ASSERT_TRUE (copy
.varying_p ());
3477 tree min
= vrp_val_min (integer_type_node
);
3478 tree max
= vrp_val_max (integer_type_node
);
3479 tree tiny
= fold_build2 (PLUS_EXPR
, integer_type_node
, min
,
3480 build_one_cst (integer_type_node
));
3482 int_range_max
i1 (tiny
, max
);
3483 int_range_max
i2 (build_int_cst (integer_type_node
, 255),
3484 build_int_cst (integer_type_node
, 255));
3486 // [MIN+1, MAX] = OP1 & 255: OP1 is VARYING
3487 op_bitwise_and
.op1_range (res
, integer_type_node
, i1
, i2
);
3488 ASSERT_TRUE (res
== int_range
<1> (integer_type_node
));
3490 // VARYING = OP1 & 255: OP1 is VARYING
3491 i1
= int_range
<1> (integer_type_node
);
3492 op_bitwise_and
.op1_range (res
, integer_type_node
, i1
, i2
);
3493 ASSERT_TRUE (res
== int_range
<1> (integer_type_node
));
3495 // Test that 0x808.... & 0x8.... still contains 0x8....
3496 // for a large set of numbers.
3498 tree big_type
= long_long_unsigned_type_node
;
3499 // big_num = 0x808,0000,0000,0000
3500 tree big_num
= fold_build2 (LSHIFT_EXPR
, big_type
,
3501 build_int_cst (big_type
, 0x808),
3502 build_int_cst (big_type
, 48));
3503 op_bitwise_and
.fold_range (res
, big_type
,
3504 int_range
<1> (big_type
),
3505 int_range
<1> (big_num
, big_num
));
3506 // val = 0x8,0000,0000,0000
3507 tree val
= fold_build2 (LSHIFT_EXPR
, big_type
,
3508 build_int_cst (big_type
, 0x8),
3509 build_int_cst (big_type
, 48));
3510 ASSERT_TRUE (res
.contains_p (val
));
3513 // unsigned: [3, MAX] = OP1 >> 1
3515 int_range_max
lhs (build_int_cst (unsigned_type_node
, 3),
3516 TYPE_MAX_VALUE (unsigned_type_node
));
3517 int_range_max
one (build_one_cst (unsigned_type_node
),
3518 build_one_cst (unsigned_type_node
));
3520 op_rshift
.op1_range (op1
, unsigned_type_node
, lhs
, one
);
3521 ASSERT_FALSE (op1
.contains_p (UINT (3)));
3524 // signed: [3, MAX] = OP1 >> 1
3526 int_range_max
lhs (INT (3), TYPE_MAX_VALUE (integer_type_node
));
3527 int_range_max
one (INT (1), INT (1));
3529 op_rshift
.op1_range (op1
, integer_type_node
, lhs
, one
);
3530 ASSERT_FALSE (op1
.contains_p (INT (-2)));
3533 // This is impossible, so OP1 should be [].
3534 // signed: [MIN, MIN] = OP1 >> 1
3536 int_range_max
lhs (TYPE_MIN_VALUE (integer_type_node
),
3537 TYPE_MIN_VALUE (integer_type_node
));
3538 int_range_max
one (INT (1), INT (1));
3540 op_rshift
.op1_range (op1
, integer_type_node
, lhs
, one
);
3541 ASSERT_TRUE (op1
.undefined_p ());
3544 // signed: ~[-1] = OP1 >> 31
3545 if (TYPE_PRECISION (integer_type_node
) > 31)
3547 int_range_max
lhs (INT (-1), INT (-1), VR_ANTI_RANGE
);
3548 int_range_max
shift (INT (31), INT (31));
3550 op_rshift
.op1_range (op1
, integer_type_node
, lhs
, shift
);
3551 int_range_max negatives
= range_negatives (integer_type_node
);
3552 negatives
.intersect (op1
);
3553 ASSERT_TRUE (negatives
.undefined_p ());
3557 // Run all of the selftests within this file.
3562 tree u128_type
= build_nonstandard_integer_type (128, /*unsigned=*/1);
3563 int_range
<1> i1
, i2
, i3
;
3564 int_range
<1> r0
, r1
, rold
;
3566 // Test 1-bit signed integer union.
3567 // [-1,-1] U [0,0] = VARYING.
3568 tree one_bit_type
= build_nonstandard_integer_type (1, 0);
3570 tree one_bit_min
= vrp_val_min (one_bit_type
);
3571 tree one_bit_max
= vrp_val_max (one_bit_type
);
3572 int_range
<2> min (one_bit_min
, one_bit_min
);
3573 int_range
<2> max (one_bit_max
, one_bit_max
);
3575 ASSERT_TRUE (max
.varying_p ());
3578 // Test that NOT(255) is [0..254] in 8-bit land.
3579 int_range
<1> not_255 (UCHAR (255), UCHAR (255), VR_ANTI_RANGE
);
3580 ASSERT_TRUE (not_255
== int_range
<1> (UCHAR (0), UCHAR (254)));
3582 // Test that NOT(0) is [1..255] in 8-bit land.
3583 int_range
<1> not_zero
= range_nonzero (unsigned_char_type_node
);
3584 ASSERT_TRUE (not_zero
== int_range
<1> (UCHAR (1), UCHAR (255)));
3586 // Check that [0,127][0x..ffffff80,0x..ffffff]
3587 // => ~[128, 0x..ffffff7f].
3588 r0
= int_range
<1> (UINT128 (0), UINT128 (127));
3589 tree high
= build_minus_one_cst (u128_type
);
3590 // low = -1 - 127 => 0x..ffffff80.
3591 tree low
= fold_build2 (MINUS_EXPR
, u128_type
, high
, UINT128(127));
3592 r1
= int_range
<1> (low
, high
); // [0x..ffffff80, 0x..ffffffff]
3593 // r0 = [0,127][0x..ffffff80,0x..fffffff].
3595 // r1 = [128, 0x..ffffff7f].
3596 r1
= int_range
<1> (UINT128(128),
3597 fold_build2 (MINUS_EXPR
, u128_type
,
3598 build_minus_one_cst (u128_type
),
3601 ASSERT_TRUE (r0
== r1
);
3603 r0
.set_varying (integer_type_node
);
3604 tree minint
= wide_int_to_tree (integer_type_node
, r0
.lower_bound ());
3605 tree maxint
= wide_int_to_tree (integer_type_node
, r0
.upper_bound ());
3607 r0
.set_varying (short_integer_type_node
);
3608 tree minshort
= wide_int_to_tree (short_integer_type_node
, r0
.lower_bound ());
3609 tree maxshort
= wide_int_to_tree (short_integer_type_node
, r0
.upper_bound ());
3611 r0
.set_varying (unsigned_type_node
);
3612 tree maxuint
= wide_int_to_tree (unsigned_type_node
, r0
.upper_bound ());
3614 // Check that ~[0,5] => [6,MAX] for unsigned int.
3615 r0
= int_range
<1> (UINT (0), UINT (5));
3617 ASSERT_TRUE (r0
== int_range
<1> (UINT(6), maxuint
));
3619 // Check that ~[10,MAX] => [0,9] for unsigned int.
3620 r0
= int_range
<1> (UINT(10), maxuint
);
3622 ASSERT_TRUE (r0
== int_range
<1> (UINT (0), UINT (9)));
3624 // Check that ~[0,5] => [6,MAX] for unsigned 128-bit numbers.
3625 r0
= int_range
<1> (UINT128 (0), UINT128 (5), VR_ANTI_RANGE
);
3626 r1
= int_range
<1> (UINT128(6), build_minus_one_cst (u128_type
));
3627 ASSERT_TRUE (r0
== r1
);
3629 // Check that [~5] is really [-MIN,4][6,MAX].
3630 r0
= int_range
<1> (INT (5), INT (5), VR_ANTI_RANGE
);
3631 r1
= int_range
<1> (minint
, INT (4));
3632 r1
.union_ (int_range
<1> (INT (6), maxint
));
3633 ASSERT_FALSE (r1
.undefined_p ());
3634 ASSERT_TRUE (r0
== r1
);
3636 r1
= int_range
<1> (INT (5), INT (5));
3637 int_range
<1> r2 (r1
);
3638 ASSERT_TRUE (r1
== r2
);
3640 r1
= int_range
<1> (INT (5), INT (10));
3642 r1
= int_range
<1> (integer_type_node
,
3643 wi::to_wide (INT (5)), wi::to_wide (INT (10)));
3644 ASSERT_TRUE (r1
.contains_p (INT (7)));
3646 r1
= int_range
<1> (SCHAR (0), SCHAR (20));
3647 ASSERT_TRUE (r1
.contains_p (SCHAR(15)));
3648 ASSERT_FALSE (r1
.contains_p (SCHAR(300)));
3650 // If a range is in any way outside of the range for the converted
3651 // to range, default to the range for the new type.
3652 if (TYPE_PRECISION (TREE_TYPE (maxint
))
3653 > TYPE_PRECISION (short_integer_type_node
))
3655 r1
= int_range
<1> (integer_zero_node
, maxint
);
3656 range_cast (r1
, short_integer_type_node
);
3657 ASSERT_TRUE (r1
.lower_bound () == wi::to_wide (minshort
)
3658 && r1
.upper_bound() == wi::to_wide (maxshort
));
3661 // (unsigned char)[-5,-1] => [251,255].
3662 r0
= rold
= int_range
<1> (SCHAR (-5), SCHAR (-1));
3663 range_cast (r0
, unsigned_char_type_node
);
3664 ASSERT_TRUE (r0
== int_range
<1> (UCHAR (251), UCHAR (255)));
3665 range_cast (r0
, signed_char_type_node
);
3666 ASSERT_TRUE (r0
== rold
);
3668 // (signed char)[15, 150] => [-128,-106][15,127].
3669 r0
= rold
= int_range
<1> (UCHAR (15), UCHAR (150));
3670 range_cast (r0
, signed_char_type_node
);
3671 r1
= int_range
<1> (SCHAR (15), SCHAR (127));
3672 r2
= int_range
<1> (SCHAR (-128), SCHAR (-106));
3674 ASSERT_TRUE (r1
== r0
);
3675 range_cast (r0
, unsigned_char_type_node
);
3676 ASSERT_TRUE (r0
== rold
);
3678 // (unsigned char)[-5, 5] => [0,5][251,255].
3679 r0
= rold
= int_range
<1> (SCHAR (-5), SCHAR (5));
3680 range_cast (r0
, unsigned_char_type_node
);
3681 r1
= int_range
<1> (UCHAR (251), UCHAR (255));
3682 r2
= int_range
<1> (UCHAR (0), UCHAR (5));
3684 ASSERT_TRUE (r0
== r1
);
3685 range_cast (r0
, signed_char_type_node
);
3686 ASSERT_TRUE (r0
== rold
);
3688 // (unsigned char)[-5,5] => [0,5][251,255].
3689 r0
= int_range
<1> (INT (-5), INT (5));
3690 range_cast (r0
, unsigned_char_type_node
);
3691 r1
= int_range
<1> (UCHAR (0), UCHAR (5));
3692 r1
.union_ (int_range
<1> (UCHAR (251), UCHAR (255)));
3693 ASSERT_TRUE (r0
== r1
);
3695 // (unsigned char)[5U,1974U] => [0,255].
3696 r0
= int_range
<1> (UINT (5), UINT (1974));
3697 range_cast (r0
, unsigned_char_type_node
);
3698 ASSERT_TRUE (r0
== int_range
<1> (UCHAR (0), UCHAR (255)));
3699 range_cast (r0
, integer_type_node
);
3700 // Going to a wider range should not sign extend.
3701 ASSERT_TRUE (r0
== int_range
<1> (INT (0), INT (255)));
3703 // (unsigned char)[-350,15] => [0,255].
3704 r0
= int_range
<1> (INT (-350), INT (15));
3705 range_cast (r0
, unsigned_char_type_node
);
3706 ASSERT_TRUE (r0
== (int_range
<1>
3707 (TYPE_MIN_VALUE (unsigned_char_type_node
),
3708 TYPE_MAX_VALUE (unsigned_char_type_node
))));
3710 // Casting [-120,20] from signed char to unsigned short.
3711 // => [0, 20][0xff88, 0xffff].
3712 r0
= int_range
<1> (SCHAR (-120), SCHAR (20));
3713 range_cast (r0
, short_unsigned_type_node
);
3714 r1
= int_range
<1> (UINT16 (0), UINT16 (20));
3715 r2
= int_range
<1> (UINT16 (0xff88), UINT16 (0xffff));
3717 ASSERT_TRUE (r0
== r1
);
3718 // A truncating cast back to signed char will work because [-120, 20]
3719 // is representable in signed char.
3720 range_cast (r0
, signed_char_type_node
);
3721 ASSERT_TRUE (r0
== int_range
<1> (SCHAR (-120), SCHAR (20)));
3723 // unsigned char -> signed short
3724 // (signed short)[(unsigned char)25, (unsigned char)250]
3725 // => [(signed short)25, (signed short)250]
3726 r0
= rold
= int_range
<1> (UCHAR (25), UCHAR (250));
3727 range_cast (r0
, short_integer_type_node
);
3728 r1
= int_range
<1> (INT16 (25), INT16 (250));
3729 ASSERT_TRUE (r0
== r1
);
3730 range_cast (r0
, unsigned_char_type_node
);
3731 ASSERT_TRUE (r0
== rold
);
3733 // Test casting a wider signed [-MIN,MAX] to a nar`rower unsigned.
3734 r0
= int_range
<1> (TYPE_MIN_VALUE (long_long_integer_type_node
),
3735 TYPE_MAX_VALUE (long_long_integer_type_node
));
3736 range_cast (r0
, short_unsigned_type_node
);
3737 r1
= int_range
<1> (TYPE_MIN_VALUE (short_unsigned_type_node
),
3738 TYPE_MAX_VALUE (short_unsigned_type_node
));
3739 ASSERT_TRUE (r0
== r1
);
3741 // NOT([10,20]) ==> [-MIN,9][21,MAX].
3742 r0
= r1
= int_range
<1> (INT (10), INT (20));
3743 r2
= int_range
<1> (minint
, INT(9));
3744 r2
.union_ (int_range
<1> (INT(21), maxint
));
3745 ASSERT_FALSE (r2
.undefined_p ());
3747 ASSERT_TRUE (r1
== r2
);
3748 // Test that NOT(NOT(x)) == x.
3750 ASSERT_TRUE (r0
== r2
);
3752 // Test that booleans and their inverse work as expected.
3753 r0
= range_zero (boolean_type_node
);
3754 ASSERT_TRUE (r0
== int_range
<1> (build_zero_cst (boolean_type_node
),
3755 build_zero_cst (boolean_type_node
)));
3757 ASSERT_TRUE (r0
== int_range
<1> (build_one_cst (boolean_type_node
),
3758 build_one_cst (boolean_type_node
)));
3760 // Casting NONZERO to a narrower type will wrap/overflow so
3761 // it's just the entire range for the narrower type.
3763 // "NOT 0 at signed 32-bits" ==> [-MIN_32,-1][1, +MAX_32]. This is
3764 // is outside of the range of a smaller range, return the full
3766 if (TYPE_PRECISION (integer_type_node
)
3767 > TYPE_PRECISION (short_integer_type_node
))
3769 r0
= range_nonzero (integer_type_node
);
3770 range_cast (r0
, short_integer_type_node
);
3771 r1
= int_range
<1> (TYPE_MIN_VALUE (short_integer_type_node
),
3772 TYPE_MAX_VALUE (short_integer_type_node
));
3773 ASSERT_TRUE (r0
== r1
);
3776 // Casting NONZERO from a narrower signed to a wider signed.
3778 // NONZERO signed 16-bits is [-MIN_16,-1][1, +MAX_16].
3779 // Converting this to 32-bits signed is [-MIN_16,-1][1, +MAX_16].
3780 r0
= range_nonzero (short_integer_type_node
);
3781 range_cast (r0
, integer_type_node
);
3782 r1
= int_range
<1> (INT (-32768), INT (-1));
3783 r2
= int_range
<1> (INT (1), INT (32767));
3785 ASSERT_TRUE (r0
== r1
);
3787 // Make sure NULL and non-NULL of pointer types work, and that
3788 // inverses of them are consistent.
3789 tree voidp
= build_pointer_type (void_type_node
);
3790 r0
= range_zero (voidp
);
3794 ASSERT_TRUE (r0
== r1
);
3796 // [10,20] U [15, 30] => [10, 30].
3797 r0
= int_range
<1> (INT (10), INT (20));
3798 r1
= int_range
<1> (INT (15), INT (30));
3800 ASSERT_TRUE (r0
== int_range
<1> (INT (10), INT (30)));
3802 // [15,40] U [] => [15,40].
3803 r0
= int_range
<1> (INT (15), INT (40));
3804 r1
.set_undefined ();
3806 ASSERT_TRUE (r0
== int_range
<1> (INT (15), INT (40)));
3808 // [10,20] U [10,10] => [10,20].
3809 r0
= int_range
<1> (INT (10), INT (20));
3810 r1
= int_range
<1> (INT (10), INT (10));
3812 ASSERT_TRUE (r0
== int_range
<1> (INT (10), INT (20)));
3814 // [10,20] U [9,9] => [9,20].
3815 r0
= int_range
<1> (INT (10), INT (20));
3816 r1
= int_range
<1> (INT (9), INT (9));
3818 ASSERT_TRUE (r0
== int_range
<1> (INT (9), INT (20)));
3820 // [10,20] ^ [15,30] => [15,20].
3821 r0
= int_range
<1> (INT (10), INT (20));
3822 r1
= int_range
<1> (INT (15), INT (30));
3824 ASSERT_TRUE (r0
== int_range
<1> (INT (15), INT (20)));
3826 // Test the internal sanity of wide_int's wrt HWIs.
3827 ASSERT_TRUE (wi::max_value (TYPE_PRECISION (boolean_type_node
),
3828 TYPE_SIGN (boolean_type_node
))
3829 == wi::uhwi (1, TYPE_PRECISION (boolean_type_node
)));
3832 r0
= int_range
<1> (INT (0), INT (0));
3833 ASSERT_TRUE (r0
.zero_p ());
3835 // Test nonzero_p().
3836 r0
= int_range
<1> (INT (0), INT (0));
3838 ASSERT_TRUE (r0
.nonzero_p ());
3840 multi_precision_range_tests ();
3841 int_range_max_tests ();
3845 } // namespace selftest
3847 #endif // CHECKING_P