1 /* Support routines for Value Range Propagation (VRP).
2 Copyright (C) 2005-2019 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
25 #include "insn-codes.h"
30 #include "tree-pass.h"
32 #include "optabs-tree.h"
33 #include "gimple-pretty-print.h"
34 #include "diagnostic-core.h"
36 #include "fold-const.h"
37 #include "stor-layout.h"
40 #include "gimple-fold.h"
42 #include "gimple-iterator.h"
43 #include "gimple-walk.h"
46 #include "tree-ssa-loop-manip.h"
47 #include "tree-ssa-loop-niter.h"
48 #include "tree-ssa-loop.h"
49 #include "tree-into-ssa.h"
53 #include "tree-scalar-evolution.h"
54 #include "tree-ssa-propagate.h"
55 #include "tree-chrec.h"
56 #include "tree-ssa-threadupdate.h"
57 #include "tree-ssa-scopedtables.h"
58 #include "tree-ssa-threadedge.h"
59 #include "omp-general.h"
61 #include "case-cfn-macros.h"
63 #include "alloc-pool.h"
65 #include "tree-cfgcleanup.h"
66 #include "stringpool.h"
68 #include "vr-values.h"
70 #include "wide-int-range.h"
72 /* Set of SSA names found live during the RPO traversal of the function
73 for still active basic-blocks. */
77 value_range_base::set (enum value_range_kind kind
, tree min
, tree max
)
87 value_range::set_equiv (bitmap equiv
)
89 /* Since updating the equivalence set involves deep copying the
90 bitmaps, only do it if absolutely necessary.
92 All equivalence bitmaps are allocated from the same obstack. So
93 we can use the obstack associated with EQUIV to allocate vr->equiv. */
96 m_equiv
= BITMAP_ALLOC (equiv
->obstack
);
100 if (equiv
&& !bitmap_empty_p (equiv
))
101 bitmap_copy (m_equiv
, equiv
);
103 bitmap_clear (m_equiv
);
107 /* Initialize value_range. */
110 value_range::set (enum value_range_kind kind
, tree min
, tree max
,
113 value_range_base::set (kind
, min
, max
);
119 value_range_base::value_range_base (value_range_kind kind
, tree min
, tree max
)
121 set (kind
, min
, max
);
124 value_range::value_range (value_range_kind kind
, tree min
, tree max
,
128 set (kind
, min
, max
, equiv
);
131 value_range::value_range (const value_range_base
&other
)
134 set (other
.kind (), other
.min(), other
.max (), NULL
);
137 /* Like set, but keep the equivalences in place. */
140 value_range::update (value_range_kind kind
, tree min
, tree max
)
143 (kind
!= VR_UNDEFINED
&& kind
!= VR_VARYING
) ? m_equiv
: NULL
);
146 /* Copy value_range in FROM into THIS while avoiding bitmap sharing.
148 Note: The code that avoids the bitmap sharing looks at the existing
149 this->m_equiv, so this function cannot be used to initalize an
150 object. Use the constructors for initialization. */
153 value_range::deep_copy (const value_range
*from
)
155 set (from
->m_kind
, from
->min (), from
->max (), from
->m_equiv
);
159 value_range::move (value_range
*from
)
161 set (from
->m_kind
, from
->min (), from
->max ());
162 m_equiv
= from
->m_equiv
;
163 from
->m_equiv
= NULL
;
166 /* Check the validity of the range. */
169 value_range_base::check ()
178 gcc_assert (m_min
&& m_max
);
180 gcc_assert (!TREE_OVERFLOW_P (m_min
) && !TREE_OVERFLOW_P (m_max
));
182 /* Creating ~[-MIN, +MAX] is stupid because that would be
184 if (INTEGRAL_TYPE_P (TREE_TYPE (m_min
)) && m_kind
== VR_ANTI_RANGE
)
185 gcc_assert (!vrp_val_is_min (m_min
) || !vrp_val_is_max (m_max
));
187 cmp
= compare_values (m_min
, m_max
);
188 gcc_assert (cmp
== 0 || cmp
== -1 || cmp
== -2);
193 gcc_assert (!min () && !max ());
201 value_range::check ()
203 value_range_base::check ();
208 gcc_assert (!m_equiv
|| bitmap_empty_p (m_equiv
));
213 /* Equality operator. We purposely do not overload ==, to avoid
214 confusion with the equality bitmap in the derived value_range
218 value_range_base::equal_p (const value_range_base
&other
) const
220 return (m_kind
== other
.m_kind
221 && vrp_operand_equal_p (m_min
, other
.m_min
)
222 && vrp_operand_equal_p (m_max
, other
.m_max
));
225 /* Returns TRUE if THIS == OTHER. Ignores the equivalence bitmap if
226 IGNORE_EQUIVS is TRUE. */
229 value_range::equal_p (const value_range
&other
, bool ignore_equivs
) const
231 return (value_range_base::equal_p (other
)
233 || vrp_bitmap_equal_p (m_equiv
, other
.m_equiv
)));
236 /* Return TRUE if this is a symbolic range. */
239 value_range_base::symbolic_p () const
241 return (!varying_p ()
243 && (!is_gimple_min_invariant (m_min
)
244 || !is_gimple_min_invariant (m_max
)));
247 /* NOTE: This is not the inverse of symbolic_p because the range
248 could also be varying or undefined. Ideally they should be inverse
249 of each other, with varying only applying to symbolics. Varying of
250 constants would be represented as [-MIN, +MAX]. */
253 value_range_base::constant_p () const
255 return (!varying_p ()
257 && TREE_CODE (m_min
) == INTEGER_CST
258 && TREE_CODE (m_max
) == INTEGER_CST
);
262 value_range_base::set_undefined ()
264 set (VR_UNDEFINED
, NULL
, NULL
);
268 value_range::set_undefined ()
270 set (VR_UNDEFINED
, NULL
, NULL
, NULL
);
274 value_range_base::set_varying ()
276 set (VR_VARYING
, NULL
, NULL
);
280 value_range::set_varying ()
282 set (VR_VARYING
, NULL
, NULL
, NULL
);
285 /* Return TRUE if it is possible that range contains VAL. */
288 value_range_base::may_contain_p (tree val
) const
296 if (m_kind
== VR_ANTI_RANGE
)
298 int res
= value_inside_range (val
, min (), max ());
299 return res
== 0 || res
== -2;
301 return value_inside_range (val
, min (), max ()) != 0;
305 value_range::equiv_clear ()
308 bitmap_clear (m_equiv
);
311 /* Add VAR and VAR's equivalence set (VAR_VR) to the equivalence
312 bitmap. If no equivalence table has been created, OBSTACK is the
313 obstack to use (NULL for the default obstack).
315 This is the central point where equivalence processing can be
319 value_range::equiv_add (const_tree var
,
320 const value_range
*var_vr
,
321 bitmap_obstack
*obstack
)
324 m_equiv
= BITMAP_ALLOC (obstack
);
325 unsigned ver
= SSA_NAME_VERSION (var
);
326 bitmap_set_bit (m_equiv
, ver
);
327 if (var_vr
&& var_vr
->m_equiv
)
328 bitmap_ior_into (m_equiv
, var_vr
->m_equiv
);
331 /* If range is a singleton, place it in RESULT and return TRUE.
332 Note: A singleton can be any gimple invariant, not just constants.
333 So, [&x, &x] counts as a singleton. */
336 value_range_base::singleton_p (tree
*result
) const
338 if (m_kind
== VR_RANGE
339 && vrp_operand_equal_p (min (), max ())
340 && is_gimple_min_invariant (min ()))
350 value_range_base::type () const
352 /* Types are only valid for VR_RANGE and VR_ANTI_RANGE, which are
353 known to have non-zero min/max. */
355 return TREE_TYPE (min ());
359 value_range_base::dump (FILE *file
) const
362 fprintf (file
, "UNDEFINED");
363 else if (m_kind
== VR_RANGE
|| m_kind
== VR_ANTI_RANGE
)
365 tree ttype
= type ();
367 print_generic_expr (file
, ttype
);
370 fprintf (file
, "%s[", (m_kind
== VR_ANTI_RANGE
) ? "~" : "");
372 if (INTEGRAL_TYPE_P (ttype
)
373 && !TYPE_UNSIGNED (ttype
)
374 && vrp_val_is_min (min ())
375 && TYPE_PRECISION (ttype
) != 1)
376 fprintf (file
, "-INF");
378 print_generic_expr (file
, min ());
380 fprintf (file
, ", ");
382 if (INTEGRAL_TYPE_P (ttype
)
383 && vrp_val_is_max (max ())
384 && TYPE_PRECISION (ttype
) != 1)
385 fprintf (file
, "+INF");
387 print_generic_expr (file
, max ());
391 else if (varying_p ())
392 fprintf (file
, "VARYING");
398 value_range::dump (FILE *file
) const
400 value_range_base::dump (file
);
401 if ((m_kind
== VR_RANGE
|| m_kind
== VR_ANTI_RANGE
)
407 fprintf (file
, " EQUIVALENCES: { ");
409 EXECUTE_IF_SET_IN_BITMAP (m_equiv
, 0, i
, bi
)
411 print_generic_expr (file
, ssa_name (i
));
416 fprintf (file
, "} (%u elements)", c
);
421 dump_value_range (FILE *file
, const value_range
*vr
)
424 fprintf (file
, "[]");
430 dump_value_range (FILE *file
, const value_range_base
*vr
)
433 fprintf (file
, "[]");
439 debug (const value_range_base
*vr
)
441 dump_value_range (stderr
, vr
);
445 debug (const value_range_base
&vr
)
447 dump_value_range (stderr
, &vr
);
451 debug (const value_range
*vr
)
453 dump_value_range (stderr
, vr
);
457 debug (const value_range
&vr
)
459 dump_value_range (stderr
, &vr
);
462 /* Return true if the SSA name NAME is live on the edge E. */
465 live_on_edge (edge e
, tree name
)
467 return (live
[e
->dest
->index
]
468 && bitmap_bit_p (live
[e
->dest
->index
], SSA_NAME_VERSION (name
)));
471 /* Location information for ASSERT_EXPRs. Each instance of this
472 structure describes an ASSERT_EXPR for an SSA name. Since a single
473 SSA name may have more than one assertion associated with it, these
474 locations are kept in a linked list attached to the corresponding
478 /* Basic block where the assertion would be inserted. */
481 /* Some assertions need to be inserted on an edge (e.g., assertions
482 generated by COND_EXPRs). In those cases, BB will be NULL. */
485 /* Pointer to the statement that generated this assertion. */
486 gimple_stmt_iterator si
;
488 /* Predicate code for the ASSERT_EXPR. Must be COMPARISON_CLASS_P. */
489 enum tree_code comp_code
;
491 /* Value being compared against. */
494 /* Expression to compare. */
497 /* Next node in the linked list. */
501 /* If bit I is present, it means that SSA name N_i has a list of
502 assertions that should be inserted in the IL. */
503 static bitmap need_assert_for
;
505 /* Array of locations lists where to insert assertions. ASSERTS_FOR[I]
506 holds a list of ASSERT_LOCUS_T nodes that describe where
507 ASSERT_EXPRs for SSA name N_I should be inserted. */
508 static assert_locus
**asserts_for
;
510 /* Return the maximum value for TYPE. */
513 vrp_val_max (const_tree type
)
515 if (!INTEGRAL_TYPE_P (type
))
518 return TYPE_MAX_VALUE (type
);
521 /* Return the minimum value for TYPE. */
524 vrp_val_min (const_tree type
)
526 if (!INTEGRAL_TYPE_P (type
))
529 return TYPE_MIN_VALUE (type
);
532 /* Return whether VAL is equal to the maximum value of its type.
533 We can't do a simple equality comparison with TYPE_MAX_VALUE because
534 C typedefs and Ada subtypes can produce types whose TYPE_MAX_VALUE
535 is not == to the integer constant with the same value in the type. */
538 vrp_val_is_max (const_tree val
)
540 tree type_max
= vrp_val_max (TREE_TYPE (val
));
541 return (val
== type_max
542 || (type_max
!= NULL_TREE
543 && operand_equal_p (val
, type_max
, 0)));
546 /* Return whether VAL is equal to the minimum value of its type. */
549 vrp_val_is_min (const_tree val
)
551 tree type_min
= vrp_val_min (TREE_TYPE (val
));
552 return (val
== type_min
553 || (type_min
!= NULL_TREE
554 && operand_equal_p (val
, type_min
, 0)));
557 /* VR_TYPE describes a range with mininum value *MIN and maximum
558 value *MAX. Restrict the range to the set of values that have
559 no bits set outside NONZERO_BITS. Update *MIN and *MAX and
560 return the new range type.
562 SGN gives the sign of the values described by the range. */
564 enum value_range_kind
565 intersect_range_with_nonzero_bits (enum value_range_kind vr_type
,
566 wide_int
*min
, wide_int
*max
,
567 const wide_int
&nonzero_bits
,
570 if (vr_type
== VR_ANTI_RANGE
)
572 /* The VR_ANTI_RANGE is equivalent to the union of the ranges
573 A: [-INF, *MIN) and B: (*MAX, +INF]. First use NONZERO_BITS
574 to create an inclusive upper bound for A and an inclusive lower
576 wide_int a_max
= wi::round_down_for_mask (*min
- 1, nonzero_bits
);
577 wide_int b_min
= wi::round_up_for_mask (*max
+ 1, nonzero_bits
);
579 /* If the calculation of A_MAX wrapped, A is effectively empty
580 and A_MAX is the highest value that satisfies NONZERO_BITS.
581 Likewise if the calculation of B_MIN wrapped, B is effectively
582 empty and B_MIN is the lowest value that satisfies NONZERO_BITS. */
583 bool a_empty
= wi::ge_p (a_max
, *min
, sgn
);
584 bool b_empty
= wi::le_p (b_min
, *max
, sgn
);
586 /* If both A and B are empty, there are no valid values. */
587 if (a_empty
&& b_empty
)
590 /* If exactly one of A or B is empty, return a VR_RANGE for the
592 if (a_empty
|| b_empty
)
596 gcc_checking_assert (wi::le_p (*min
, *max
, sgn
));
600 /* Update the VR_ANTI_RANGE bounds. */
603 gcc_checking_assert (wi::le_p (*min
, *max
, sgn
));
605 /* Now check whether the excluded range includes any values that
606 satisfy NONZERO_BITS. If not, switch to a full VR_RANGE. */
607 if (wi::round_up_for_mask (*min
, nonzero_bits
) == b_min
)
609 unsigned int precision
= min
->get_precision ();
610 *min
= wi::min_value (precision
, sgn
);
611 *max
= wi::max_value (precision
, sgn
);
615 if (vr_type
== VR_RANGE
)
617 *max
= wi::round_down_for_mask (*max
, nonzero_bits
);
619 /* Check that the range contains at least one valid value. */
620 if (wi::gt_p (*min
, *max
, sgn
))
623 *min
= wi::round_up_for_mask (*min
, nonzero_bits
);
624 gcc_checking_assert (wi::le_p (*min
, *max
, sgn
));
630 /* Set value range to the canonical form of {VRTYPE, MIN, MAX, EQUIV}.
631 This means adjusting VRTYPE, MIN and MAX representing the case of a
632 wrapping range with MAX < MIN covering [MIN, type_max] U [type_min, MAX]
633 as anti-rage ~[MAX+1, MIN-1]. Likewise for wrapping anti-ranges.
634 In corner cases where MAX+1 or MIN-1 wraps this will fall back
636 This routine exists to ease canonicalization in the case where we
637 extract ranges from var + CST op limit. */
640 value_range_base::set_and_canonicalize (enum value_range_kind kind
,
643 /* Use the canonical setters for VR_UNDEFINED and VR_VARYING. */
644 if (kind
== VR_UNDEFINED
)
649 else if (kind
== VR_VARYING
)
655 /* Nothing to canonicalize for symbolic ranges. */
656 if (TREE_CODE (min
) != INTEGER_CST
657 || TREE_CODE (max
) != INTEGER_CST
)
659 set (kind
, min
, max
);
663 /* Wrong order for min and max, to swap them and the VR type we need
665 if (tree_int_cst_lt (max
, min
))
669 /* For one bit precision if max < min, then the swapped
670 range covers all values, so for VR_RANGE it is varying and
671 for VR_ANTI_RANGE empty range, so drop to varying as well. */
672 if (TYPE_PRECISION (TREE_TYPE (min
)) == 1)
678 one
= build_int_cst (TREE_TYPE (min
), 1);
679 tmp
= int_const_binop (PLUS_EXPR
, max
, one
);
680 max
= int_const_binop (MINUS_EXPR
, min
, one
);
683 /* There's one corner case, if we had [C+1, C] before we now have
684 that again. But this represents an empty value range, so drop
685 to varying in this case. */
686 if (tree_int_cst_lt (max
, min
))
692 kind
= kind
== VR_RANGE
? VR_ANTI_RANGE
: VR_RANGE
;
695 /* Anti-ranges that can be represented as ranges should be so. */
696 if (kind
== VR_ANTI_RANGE
)
698 /* For -fstrict-enums we may receive out-of-range ranges so consider
699 values < -INF and values > INF as -INF/INF as well. */
700 tree type
= TREE_TYPE (min
);
701 bool is_min
= (INTEGRAL_TYPE_P (type
)
702 && tree_int_cst_compare (min
, TYPE_MIN_VALUE (type
)) <= 0);
703 bool is_max
= (INTEGRAL_TYPE_P (type
)
704 && tree_int_cst_compare (max
, TYPE_MAX_VALUE (type
)) >= 0);
706 if (is_min
&& is_max
)
708 /* We cannot deal with empty ranges, drop to varying.
709 ??? This could be VR_UNDEFINED instead. */
713 else if (TYPE_PRECISION (TREE_TYPE (min
)) == 1
714 && (is_min
|| is_max
))
716 /* Non-empty boolean ranges can always be represented
717 as a singleton range. */
719 min
= max
= vrp_val_max (TREE_TYPE (min
));
721 min
= max
= vrp_val_min (TREE_TYPE (min
));
725 /* As a special exception preserve non-null ranges. */
726 && !(TYPE_UNSIGNED (TREE_TYPE (min
))
727 && integer_zerop (max
)))
729 tree one
= build_int_cst (TREE_TYPE (max
), 1);
730 min
= int_const_binop (PLUS_EXPR
, max
, one
);
731 max
= vrp_val_max (TREE_TYPE (max
));
736 tree one
= build_int_cst (TREE_TYPE (min
), 1);
737 max
= int_const_binop (MINUS_EXPR
, min
, one
);
738 min
= vrp_val_min (TREE_TYPE (min
));
743 /* Do not drop [-INF(OVF), +INF(OVF)] to varying. (OVF) has to be sticky
744 to make sure VRP iteration terminates, otherwise we can get into
747 set (kind
, min
, max
);
751 value_range::set_and_canonicalize (enum value_range_kind kind
,
752 tree min
, tree max
, bitmap equiv
)
754 value_range_base::set_and_canonicalize (kind
, min
, max
);
755 if (this->kind () == VR_RANGE
|| this->kind () == VR_ANTI_RANGE
)
762 value_range_base::set (tree val
)
764 gcc_assert (TREE_CODE (val
) == SSA_NAME
|| is_gimple_min_invariant (val
));
765 if (TREE_OVERFLOW_P (val
))
766 val
= drop_tree_overflow (val
);
767 set (VR_RANGE
, val
, val
);
771 value_range::set (tree val
)
773 gcc_assert (TREE_CODE (val
) == SSA_NAME
|| is_gimple_min_invariant (val
));
774 if (TREE_OVERFLOW_P (val
))
775 val
= drop_tree_overflow (val
);
776 set (VR_RANGE
, val
, val
, NULL
);
779 /* Set value range VR to a non-NULL range of type TYPE. */
782 value_range_base::set_nonnull (tree type
)
784 tree zero
= build_int_cst (type
, 0);
785 set (VR_ANTI_RANGE
, zero
, zero
);
789 value_range::set_nonnull (tree type
)
791 tree zero
= build_int_cst (type
, 0);
792 set (VR_ANTI_RANGE
, zero
, zero
, NULL
);
795 /* Set value range VR to a NULL range of type TYPE. */
798 value_range_base::set_null (tree type
)
800 set (build_int_cst (type
, 0));
804 value_range::set_null (tree type
)
806 set (build_int_cst (type
, 0));
809 /* Return true, if VAL1 and VAL2 are equal values for VRP purposes. */
812 vrp_operand_equal_p (const_tree val1
, const_tree val2
)
816 if (!val1
|| !val2
|| !operand_equal_p (val1
, val2
, 0))
821 /* Return true, if the bitmaps B1 and B2 are equal. */
824 vrp_bitmap_equal_p (const_bitmap b1
, const_bitmap b2
)
827 || ((!b1
|| bitmap_empty_p (b1
))
828 && (!b2
|| bitmap_empty_p (b2
)))
830 && bitmap_equal_p (b1
, b2
)));
833 /* Return true if VR is [0, 0]. */
836 range_is_null (const value_range_base
*vr
)
838 return vr
->zero_p ();
842 range_is_nonnull (const value_range_base
*vr
)
844 return (vr
->kind () == VR_ANTI_RANGE
845 && vr
->min () == vr
->max ()
846 && integer_zerop (vr
->min ()));
849 /* Return true if max and min of VR are INTEGER_CST. It's not necessary
853 range_int_cst_p (const value_range_base
*vr
)
855 return (vr
->kind () == VR_RANGE
856 && TREE_CODE (vr
->min ()) == INTEGER_CST
857 && TREE_CODE (vr
->max ()) == INTEGER_CST
);
860 /* Return true if VR is a INTEGER_CST singleton. */
863 range_int_cst_singleton_p (const value_range_base
*vr
)
865 return (range_int_cst_p (vr
)
866 && tree_int_cst_equal (vr
->min (), vr
->max ()));
869 /* Return the single symbol (an SSA_NAME) contained in T if any, or NULL_TREE
870 otherwise. We only handle additive operations and set NEG to true if the
871 symbol is negated and INV to the invariant part, if any. */
874 get_single_symbol (tree t
, bool *neg
, tree
*inv
)
882 if (TREE_CODE (t
) == PLUS_EXPR
883 || TREE_CODE (t
) == POINTER_PLUS_EXPR
884 || TREE_CODE (t
) == MINUS_EXPR
)
886 if (is_gimple_min_invariant (TREE_OPERAND (t
, 0)))
888 neg_
= (TREE_CODE (t
) == MINUS_EXPR
);
889 inv_
= TREE_OPERAND (t
, 0);
890 t
= TREE_OPERAND (t
, 1);
892 else if (is_gimple_min_invariant (TREE_OPERAND (t
, 1)))
895 inv_
= TREE_OPERAND (t
, 1);
896 t
= TREE_OPERAND (t
, 0);
907 if (TREE_CODE (t
) == NEGATE_EXPR
)
909 t
= TREE_OPERAND (t
, 0);
913 if (TREE_CODE (t
) != SSA_NAME
)
916 if (inv_
&& TREE_OVERFLOW_P (inv_
))
917 inv_
= drop_tree_overflow (inv_
);
924 /* The reverse operation: build a symbolic expression with TYPE
925 from symbol SYM, negated according to NEG, and invariant INV. */
928 build_symbolic_expr (tree type
, tree sym
, bool neg
, tree inv
)
930 const bool pointer_p
= POINTER_TYPE_P (type
);
934 t
= build1 (NEGATE_EXPR
, type
, t
);
936 if (integer_zerop (inv
))
939 return build2 (pointer_p
? POINTER_PLUS_EXPR
: PLUS_EXPR
, type
, t
, inv
);
945 -2 if those are incomparable. */
947 operand_less_p (tree val
, tree val2
)
949 /* LT is folded faster than GE and others. Inline the common case. */
950 if (TREE_CODE (val
) == INTEGER_CST
&& TREE_CODE (val2
) == INTEGER_CST
)
951 return tree_int_cst_lt (val
, val2
);
956 fold_defer_overflow_warnings ();
958 tcmp
= fold_binary_to_constant (LT_EXPR
, boolean_type_node
, val
, val2
);
960 fold_undefer_and_ignore_overflow_warnings ();
963 || TREE_CODE (tcmp
) != INTEGER_CST
)
966 if (!integer_zerop (tcmp
))
973 /* Compare two values VAL1 and VAL2. Return
975 -2 if VAL1 and VAL2 cannot be compared at compile-time,
978 +1 if VAL1 > VAL2, and
981 This is similar to tree_int_cst_compare but supports pointer values
982 and values that cannot be compared at compile time.
984 If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to
985 true if the return value is only valid if we assume that signed
986 overflow is undefined. */
989 compare_values_warnv (tree val1
, tree val2
, bool *strict_overflow_p
)
994 /* Below we rely on the fact that VAL1 and VAL2 are both pointers or
996 gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1
))
997 == POINTER_TYPE_P (TREE_TYPE (val2
)));
999 /* Convert the two values into the same type. This is needed because
1000 sizetype causes sign extension even for unsigned types. */
1001 val2
= fold_convert (TREE_TYPE (val1
), val2
);
1002 STRIP_USELESS_TYPE_CONVERSION (val2
);
1004 const bool overflow_undefined
1005 = INTEGRAL_TYPE_P (TREE_TYPE (val1
))
1006 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1
));
1009 tree sym1
= get_single_symbol (val1
, &neg1
, &inv1
);
1010 tree sym2
= get_single_symbol (val2
, &neg2
, &inv2
);
1012 /* If VAL1 and VAL2 are of the form '[-]NAME [+ CST]', return -1 or +1
1013 accordingly. If VAL1 and VAL2 don't use the same name, return -2. */
1016 /* Both values must use the same name with the same sign. */
1017 if (sym1
!= sym2
|| neg1
!= neg2
)
1020 /* [-]NAME + CST == [-]NAME + CST. */
1024 /* If overflow is defined we cannot simplify more. */
1025 if (!overflow_undefined
)
1028 if (strict_overflow_p
!= NULL
1029 /* Symbolic range building sets TREE_NO_WARNING to declare
1030 that overflow doesn't happen. */
1031 && (!inv1
|| !TREE_NO_WARNING (val1
))
1032 && (!inv2
|| !TREE_NO_WARNING (val2
)))
1033 *strict_overflow_p
= true;
1036 inv1
= build_int_cst (TREE_TYPE (val1
), 0);
1038 inv2
= build_int_cst (TREE_TYPE (val2
), 0);
1040 return wi::cmp (wi::to_wide (inv1
), wi::to_wide (inv2
),
1041 TYPE_SIGN (TREE_TYPE (val1
)));
1044 const bool cst1
= is_gimple_min_invariant (val1
);
1045 const bool cst2
= is_gimple_min_invariant (val2
);
1047 /* If one is of the form '[-]NAME + CST' and the other is constant, then
1048 it might be possible to say something depending on the constants. */
1049 if ((sym1
&& inv1
&& cst2
) || (sym2
&& inv2
&& cst1
))
1051 if (!overflow_undefined
)
1054 if (strict_overflow_p
!= NULL
1055 /* Symbolic range building sets TREE_NO_WARNING to declare
1056 that overflow doesn't happen. */
1057 && (!sym1
|| !TREE_NO_WARNING (val1
))
1058 && (!sym2
|| !TREE_NO_WARNING (val2
)))
1059 *strict_overflow_p
= true;
1061 const signop sgn
= TYPE_SIGN (TREE_TYPE (val1
));
1062 tree cst
= cst1
? val1
: val2
;
1063 tree inv
= cst1
? inv2
: inv1
;
1065 /* Compute the difference between the constants. If it overflows or
1066 underflows, this means that we can trivially compare the NAME with
1067 it and, consequently, the two values with each other. */
1068 wide_int diff
= wi::to_wide (cst
) - wi::to_wide (inv
);
1069 if (wi::cmp (0, wi::to_wide (inv
), sgn
)
1070 != wi::cmp (diff
, wi::to_wide (cst
), sgn
))
1072 const int res
= wi::cmp (wi::to_wide (cst
), wi::to_wide (inv
), sgn
);
1073 return cst1
? res
: -res
;
1079 /* We cannot say anything more for non-constants. */
1083 if (!POINTER_TYPE_P (TREE_TYPE (val1
)))
1085 /* We cannot compare overflowed values. */
1086 if (TREE_OVERFLOW (val1
) || TREE_OVERFLOW (val2
))
1089 if (TREE_CODE (val1
) == INTEGER_CST
1090 && TREE_CODE (val2
) == INTEGER_CST
)
1091 return tree_int_cst_compare (val1
, val2
);
1093 if (poly_int_tree_p (val1
) && poly_int_tree_p (val2
))
1095 if (known_eq (wi::to_poly_widest (val1
),
1096 wi::to_poly_widest (val2
)))
1098 if (known_lt (wi::to_poly_widest (val1
),
1099 wi::to_poly_widest (val2
)))
1101 if (known_gt (wi::to_poly_widest (val1
),
1102 wi::to_poly_widest (val2
)))
1112 /* First see if VAL1 and VAL2 are not the same. */
1113 if (val1
== val2
|| operand_equal_p (val1
, val2
, 0))
1116 /* If VAL1 is a lower address than VAL2, return -1. */
1117 if (operand_less_p (val1
, val2
) == 1)
1120 /* If VAL1 is a higher address than VAL2, return +1. */
1121 if (operand_less_p (val2
, val1
) == 1)
1124 /* If VAL1 is different than VAL2, return +2.
1125 For integer constants we either have already returned -1 or 1
1126 or they are equivalent. We still might succeed in proving
1127 something about non-trivial operands. */
1128 if (TREE_CODE (val1
) != INTEGER_CST
1129 || TREE_CODE (val2
) != INTEGER_CST
)
1131 t
= fold_binary_to_constant (NE_EXPR
, boolean_type_node
, val1
, val2
);
1132 if (t
&& integer_onep (t
))
1140 /* Compare values like compare_values_warnv. */
1143 compare_values (tree val1
, tree val2
)
1146 return compare_values_warnv (val1
, val2
, &sop
);
1150 /* Return 1 if VAL is inside value range MIN <= VAL <= MAX,
1151 0 if VAL is not inside [MIN, MAX],
1152 -2 if we cannot tell either way.
1154 Benchmark compile/20001226-1.c compilation time after changing this
1158 value_inside_range (tree val
, tree min
, tree max
)
1162 cmp1
= operand_less_p (val
, min
);
1168 cmp2
= operand_less_p (max
, val
);
1176 /* Return TRUE if *VR includes the value X. */
1179 range_includes_p (const value_range_base
*vr
, HOST_WIDE_INT x
)
1181 if (vr
->varying_p () || vr
->undefined_p ())
1183 return vr
->may_contain_p (build_int_cst (vr
->type (), x
));
1186 /* If *VR has a value range that is a single constant value return that,
1187 otherwise return NULL_TREE.
1189 ?? This actually returns TRUE for [&x, &x], so perhaps "constant"
1190 is not the best name. */
1193 value_range_constant_singleton (const value_range_base
*vr
)
1196 if (vr
->singleton_p (&result
))
1201 /* Value range wrapper for wide_int_range_set_zero_nonzero_bits.
1203 Compute MAY_BE_NONZERO and MUST_BE_NONZERO bit masks for range in VR.
1205 Return TRUE if VR was a constant range and we were able to compute
1209 vrp_set_zero_nonzero_bits (const tree expr_type
,
1210 const value_range_base
*vr
,
1211 wide_int
*may_be_nonzero
,
1212 wide_int
*must_be_nonzero
)
1214 if (!range_int_cst_p (vr
))
1216 *may_be_nonzero
= wi::minus_one (TYPE_PRECISION (expr_type
));
1217 *must_be_nonzero
= wi::zero (TYPE_PRECISION (expr_type
));
1220 wide_int_range_set_zero_nonzero_bits (TYPE_SIGN (expr_type
),
1221 wi::to_wide (vr
->min ()),
1222 wi::to_wide (vr
->max ()),
1223 *may_be_nonzero
, *must_be_nonzero
);
1227 /* Create two value-ranges in *VR0 and *VR1 from the anti-range *AR
1228 so that *VR0 U *VR1 == *AR. Returns true if that is possible,
1229 false otherwise. If *AR can be represented with a single range
1230 *VR1 will be VR_UNDEFINED. */
1233 ranges_from_anti_range (const value_range_base
*ar
,
1234 value_range_base
*vr0
, value_range_base
*vr1
)
1236 tree type
= ar
->type ();
1238 vr0
->set_undefined ();
1239 vr1
->set_undefined ();
1241 /* As a future improvement, we could handle ~[0, A] as: [-INF, -1] U
1242 [A+1, +INF]. Not sure if this helps in practice, though. */
1244 if (ar
->kind () != VR_ANTI_RANGE
1245 || TREE_CODE (ar
->min ()) != INTEGER_CST
1246 || TREE_CODE (ar
->max ()) != INTEGER_CST
1247 || !vrp_val_min (type
)
1248 || !vrp_val_max (type
))
1251 if (tree_int_cst_lt (vrp_val_min (type
), ar
->min ()))
1254 wide_int_to_tree (type
, wi::to_wide (ar
->min ()) - 1));
1255 if (tree_int_cst_lt (ar
->max (), vrp_val_max (type
)))
1257 wide_int_to_tree (type
, wi::to_wide (ar
->max ()) + 1),
1258 vrp_val_max (type
));
1259 if (vr0
->undefined_p ())
1262 vr1
->set_undefined ();
1265 return !vr0
->undefined_p ();
1268 /* Extract the components of a value range into a pair of wide ints in
1271 If the value range is anything but a VR_*RANGE of constants, the
1272 resulting wide ints are set to [-MIN, +MAX] for the type. */
1275 extract_range_into_wide_ints (const value_range_base
*vr
,
1276 signop sign
, unsigned prec
,
1277 wide_int
&wmin
, wide_int
&wmax
)
1279 gcc_assert (vr
->kind () != VR_ANTI_RANGE
|| vr
->symbolic_p ());
1280 if (range_int_cst_p (vr
))
1282 wmin
= wi::to_wide (vr
->min ());
1283 wmax
= wi::to_wide (vr
->max ());
1287 wmin
= wi::min_value (prec
, sign
);
1288 wmax
= wi::max_value (prec
, sign
);
1292 /* Value range wrapper for wide_int_range_multiplicative_op:
1294 *VR = *VR0 .CODE. *VR1. */
1297 extract_range_from_multiplicative_op (value_range_base
*vr
,
1298 enum tree_code code
,
1299 const value_range_base
*vr0
,
1300 const value_range_base
*vr1
)
1302 gcc_assert (code
== MULT_EXPR
1303 || code
== TRUNC_DIV_EXPR
1304 || code
== FLOOR_DIV_EXPR
1305 || code
== CEIL_DIV_EXPR
1306 || code
== EXACT_DIV_EXPR
1307 || code
== ROUND_DIV_EXPR
1308 || code
== RSHIFT_EXPR
1309 || code
== LSHIFT_EXPR
);
1310 gcc_assert (vr0
->kind () == VR_RANGE
1311 && vr0
->kind () == vr1
->kind ());
1313 tree type
= vr0
->type ();
1314 wide_int res_lb
, res_ub
;
1315 wide_int vr0_lb
= wi::to_wide (vr0
->min ());
1316 wide_int vr0_ub
= wi::to_wide (vr0
->max ());
1317 wide_int vr1_lb
= wi::to_wide (vr1
->min ());
1318 wide_int vr1_ub
= wi::to_wide (vr1
->max ());
1319 bool overflow_undefined
= TYPE_OVERFLOW_UNDEFINED (type
);
1320 unsigned prec
= TYPE_PRECISION (type
);
1322 if (wide_int_range_multiplicative_op (res_lb
, res_ub
,
1323 code
, TYPE_SIGN (type
), prec
,
1324 vr0_lb
, vr0_ub
, vr1_lb
, vr1_ub
,
1325 overflow_undefined
))
1326 vr
->set_and_canonicalize (VR_RANGE
,
1327 wide_int_to_tree (type
, res_lb
),
1328 wide_int_to_tree (type
, res_ub
));
1333 /* If BOUND will include a symbolic bound, adjust it accordingly,
1334 otherwise leave it as is.
1336 CODE is the original operation that combined the bounds (PLUS_EXPR
1339 TYPE is the type of the original operation.
1341 SYM_OPn is the symbolic for OPn if it has a symbolic.
1343 NEG_OPn is TRUE if the OPn was negated. */
1346 adjust_symbolic_bound (tree
&bound
, enum tree_code code
, tree type
,
1347 tree sym_op0
, tree sym_op1
,
1348 bool neg_op0
, bool neg_op1
)
1350 bool minus_p
= (code
== MINUS_EXPR
);
1351 /* If the result bound is constant, we're done; otherwise, build the
1352 symbolic lower bound. */
1353 if (sym_op0
== sym_op1
)
1356 bound
= build_symbolic_expr (type
, sym_op0
,
1360 /* We may not negate if that might introduce
1361 undefined overflow. */
1364 || TYPE_OVERFLOW_WRAPS (type
))
1365 bound
= build_symbolic_expr (type
, sym_op1
,
1366 neg_op1
^ minus_p
, bound
);
1372 /* Combine OP1 and OP1, which are two parts of a bound, into one wide
1373 int bound according to CODE. CODE is the operation combining the
1374 bound (either a PLUS_EXPR or a MINUS_EXPR).
1376 TYPE is the type of the combine operation.
1378 WI is the wide int to store the result.
1380 OVF is -1 if an underflow occurred, +1 if an overflow occurred or 0
1381 if over/underflow occurred. */
1384 combine_bound (enum tree_code code
, wide_int
&wi
, wi::overflow_type
&ovf
,
1385 tree type
, tree op0
, tree op1
)
1387 bool minus_p
= (code
== MINUS_EXPR
);
1388 const signop sgn
= TYPE_SIGN (type
);
1389 const unsigned int prec
= TYPE_PRECISION (type
);
1391 /* Combine the bounds, if any. */
1395 wi
= wi::sub (wi::to_wide (op0
), wi::to_wide (op1
), sgn
, &ovf
);
1397 wi
= wi::add (wi::to_wide (op0
), wi::to_wide (op1
), sgn
, &ovf
);
1400 wi
= wi::to_wide (op0
);
1404 wi
= wi::neg (wi::to_wide (op1
), &ovf
);
1406 wi
= wi::to_wide (op1
);
1409 wi
= wi::shwi (0, prec
);
1412 /* Given a range in [WMIN, WMAX], adjust it for possible overflow and
1413 put the result in VR.
1415 TYPE is the type of the range.
1417 MIN_OVF and MAX_OVF indicate what type of overflow, if any,
1418 occurred while originally calculating WMIN or WMAX. -1 indicates
1419 underflow. +1 indicates overflow. 0 indicates neither. */
1422 set_value_range_with_overflow (value_range_kind
&kind
, tree
&min
, tree
&max
,
1424 const wide_int
&wmin
, const wide_int
&wmax
,
1425 wi::overflow_type min_ovf
,
1426 wi::overflow_type max_ovf
)
1428 const signop sgn
= TYPE_SIGN (type
);
1429 const unsigned int prec
= TYPE_PRECISION (type
);
1431 /* For one bit precision if max < min, then the swapped
1432 range covers all values. */
1433 if (prec
== 1 && wi::lt_p (wmax
, wmin
, sgn
))
1439 if (TYPE_OVERFLOW_WRAPS (type
))
1441 /* If overflow wraps, truncate the values and adjust the
1442 range kind and bounds appropriately. */
1443 wide_int tmin
= wide_int::from (wmin
, prec
, sgn
);
1444 wide_int tmax
= wide_int::from (wmax
, prec
, sgn
);
1445 if ((min_ovf
!= wi::OVF_NONE
) == (max_ovf
!= wi::OVF_NONE
))
1447 /* If the limits are swapped, we wrapped around and cover
1448 the entire range. We have a similar check at the end of
1449 extract_range_from_binary_expr. */
1450 if (wi::gt_p (tmin
, tmax
, sgn
))
1455 /* No overflow or both overflow or underflow. The
1456 range kind stays VR_RANGE. */
1457 min
= wide_int_to_tree (type
, tmin
);
1458 max
= wide_int_to_tree (type
, tmax
);
1462 else if ((min_ovf
== wi::OVF_UNDERFLOW
&& max_ovf
== wi::OVF_NONE
)
1463 || (max_ovf
== wi::OVF_OVERFLOW
&& min_ovf
== wi::OVF_NONE
))
1465 /* Min underflow or max overflow. The range kind
1466 changes to VR_ANTI_RANGE. */
1467 bool covers
= false;
1468 wide_int tem
= tmin
;
1470 if (wi::cmp (tmin
, tmax
, sgn
) < 0)
1473 if (wi::cmp (tmax
, tem
, sgn
) > 0)
1475 /* If the anti-range would cover nothing, drop to varying.
1476 Likewise if the anti-range bounds are outside of the
1478 if (covers
|| wi::cmp (tmin
, tmax
, sgn
) > 0)
1483 kind
= VR_ANTI_RANGE
;
1484 min
= wide_int_to_tree (type
, tmin
);
1485 max
= wide_int_to_tree (type
, tmax
);
1490 /* Other underflow and/or overflow, drop to VR_VARYING. */
1497 /* If overflow does not wrap, saturate to the types min/max
1499 wide_int type_min
= wi::min_value (prec
, sgn
);
1500 wide_int type_max
= wi::max_value (prec
, sgn
);
1502 if (min_ovf
== wi::OVF_UNDERFLOW
)
1503 min
= wide_int_to_tree (type
, type_min
);
1504 else if (min_ovf
== wi::OVF_OVERFLOW
)
1505 min
= wide_int_to_tree (type
, type_max
);
1507 min
= wide_int_to_tree (type
, wmin
);
1509 if (max_ovf
== wi::OVF_UNDERFLOW
)
1510 max
= wide_int_to_tree (type
, type_min
);
1511 else if (max_ovf
== wi::OVF_OVERFLOW
)
1512 max
= wide_int_to_tree (type
, type_max
);
1514 max
= wide_int_to_tree (type
, wmax
);
1518 /* Extract range information from a binary operation CODE based on
1519 the ranges of each of its operands *VR0 and *VR1 with resulting
1520 type EXPR_TYPE. The resulting range is stored in *VR. */
1523 extract_range_from_binary_expr (value_range_base
*vr
,
1524 enum tree_code code
, tree expr_type
,
1525 const value_range_base
*vr0_
,
1526 const value_range_base
*vr1_
)
1528 signop sign
= TYPE_SIGN (expr_type
);
1529 unsigned int prec
= TYPE_PRECISION (expr_type
);
1530 value_range_base vr0
= *vr0_
, vr1
= *vr1_
;
1531 value_range_base vrtem0
, vrtem1
;
1532 enum value_range_kind type
;
1533 tree min
= NULL_TREE
, max
= NULL_TREE
;
1536 if (!INTEGRAL_TYPE_P (expr_type
)
1537 && !POINTER_TYPE_P (expr_type
))
1543 /* Not all binary expressions can be applied to ranges in a
1544 meaningful way. Handle only arithmetic operations. */
1545 if (code
!= PLUS_EXPR
1546 && code
!= MINUS_EXPR
1547 && code
!= POINTER_PLUS_EXPR
1548 && code
!= MULT_EXPR
1549 && code
!= TRUNC_DIV_EXPR
1550 && code
!= FLOOR_DIV_EXPR
1551 && code
!= CEIL_DIV_EXPR
1552 && code
!= EXACT_DIV_EXPR
1553 && code
!= ROUND_DIV_EXPR
1554 && code
!= TRUNC_MOD_EXPR
1555 && code
!= RSHIFT_EXPR
1556 && code
!= LSHIFT_EXPR
1559 && code
!= BIT_AND_EXPR
1560 && code
!= BIT_IOR_EXPR
1561 && code
!= BIT_XOR_EXPR
)
1567 /* If both ranges are UNDEFINED, so is the result. */
1568 if (vr0
.undefined_p () && vr1
.undefined_p ())
1570 vr
->set_undefined ();
1573 /* If one of the ranges is UNDEFINED drop it to VARYING for the following
1574 code. At some point we may want to special-case operations that
1575 have UNDEFINED result for all or some value-ranges of the not UNDEFINED
1577 else if (vr0
.undefined_p ())
1579 else if (vr1
.undefined_p ())
1582 /* We get imprecise results from ranges_from_anti_range when
1583 code is EXACT_DIV_EXPR. We could mask out bits in the resulting
1584 range, but then we also need to hack up vrp_union. It's just
1585 easier to special case when vr0 is ~[0,0] for EXACT_DIV_EXPR. */
1586 if (code
== EXACT_DIV_EXPR
&& range_is_nonnull (&vr0
))
1588 vr
->set_nonnull (expr_type
);
1592 /* Now canonicalize anti-ranges to ranges when they are not symbolic
1593 and express ~[] op X as ([]' op X) U ([]'' op X). */
1594 if (vr0
.kind () == VR_ANTI_RANGE
1595 && ranges_from_anti_range (&vr0
, &vrtem0
, &vrtem1
))
1597 extract_range_from_binary_expr (vr
, code
, expr_type
, &vrtem0
, vr1_
);
1598 if (!vrtem1
.undefined_p ())
1600 value_range_base vrres
;
1601 extract_range_from_binary_expr (&vrres
, code
, expr_type
,
1603 vr
->union_ (&vrres
);
1607 /* Likewise for X op ~[]. */
1608 if (vr1
.kind () == VR_ANTI_RANGE
1609 && ranges_from_anti_range (&vr1
, &vrtem0
, &vrtem1
))
1611 extract_range_from_binary_expr (vr
, code
, expr_type
, vr0_
, &vrtem0
);
1612 if (!vrtem1
.undefined_p ())
1614 value_range_base vrres
;
1615 extract_range_from_binary_expr (&vrres
, code
, expr_type
,
1617 vr
->union_ (&vrres
);
1622 /* The type of the resulting value range defaults to VR0.TYPE. */
1625 /* Refuse to operate on VARYING ranges, ranges of different kinds
1626 and symbolic ranges. As an exception, we allow BIT_{AND,IOR}
1627 because we may be able to derive a useful range even if one of
1628 the operands is VR_VARYING or symbolic range. Similarly for
1629 divisions, MIN/MAX and PLUS/MINUS.
1631 TODO, we may be able to derive anti-ranges in some cases. */
1632 if (code
!= BIT_AND_EXPR
1633 && code
!= BIT_IOR_EXPR
1634 && code
!= TRUNC_DIV_EXPR
1635 && code
!= FLOOR_DIV_EXPR
1636 && code
!= CEIL_DIV_EXPR
1637 && code
!= EXACT_DIV_EXPR
1638 && code
!= ROUND_DIV_EXPR
1639 && code
!= TRUNC_MOD_EXPR
1642 && code
!= PLUS_EXPR
1643 && code
!= MINUS_EXPR
1644 && code
!= RSHIFT_EXPR
1645 && code
!= POINTER_PLUS_EXPR
1646 && (vr0
.varying_p ()
1648 || vr0
.kind () != vr1
.kind ()
1649 || vr0
.symbolic_p ()
1650 || vr1
.symbolic_p ()))
1656 /* Now evaluate the expression to determine the new range. */
1657 if (POINTER_TYPE_P (expr_type
))
1659 if (code
== MIN_EXPR
|| code
== MAX_EXPR
)
1661 /* For MIN/MAX expressions with pointers, we only care about
1662 nullness, if both are non null, then the result is nonnull.
1663 If both are null, then the result is null. Otherwise they
1665 if (!range_includes_zero_p (&vr0
) && !range_includes_zero_p (&vr1
))
1666 vr
->set_nonnull (expr_type
);
1667 else if (range_is_null (&vr0
) && range_is_null (&vr1
))
1668 vr
->set_null (expr_type
);
1672 else if (code
== POINTER_PLUS_EXPR
)
1674 /* For pointer types, we are really only interested in asserting
1675 whether the expression evaluates to non-NULL.
1676 With -fno-delete-null-pointer-checks we need to be more
1677 conservative. As some object might reside at address 0,
1678 then some offset could be added to it and the same offset
1679 subtracted again and the result would be NULL.
1681 static int a[12]; where &a[0] is NULL and
1684 ptr will be NULL here, even when there is POINTER_PLUS_EXPR
1685 where the first range doesn't include zero and the second one
1686 doesn't either. As the second operand is sizetype (unsigned),
1687 consider all ranges where the MSB could be set as possible
1688 subtractions where the result might be NULL. */
1689 if ((!range_includes_zero_p (&vr0
)
1690 || !range_includes_zero_p (&vr1
))
1691 && !TYPE_OVERFLOW_WRAPS (expr_type
)
1692 && (flag_delete_null_pointer_checks
1693 || (range_int_cst_p (&vr1
)
1694 && !tree_int_cst_sign_bit (vr1
.max ()))))
1695 vr
->set_nonnull (expr_type
);
1696 else if (range_is_null (&vr0
) && range_is_null (&vr1
))
1697 vr
->set_null (expr_type
);
1701 else if (code
== BIT_AND_EXPR
)
1703 /* For pointer types, we are really only interested in asserting
1704 whether the expression evaluates to non-NULL. */
1705 if (!range_includes_zero_p (&vr0
) && !range_includes_zero_p (&vr1
))
1706 vr
->set_nonnull (expr_type
);
1707 else if (range_is_null (&vr0
) || range_is_null (&vr1
))
1708 vr
->set_null (expr_type
);
1718 /* For integer ranges, apply the operation to each end of the
1719 range and see what we end up with. */
1720 if (code
== PLUS_EXPR
|| code
== MINUS_EXPR
)
1722 /* This will normalize things such that calculating
1723 [0,0] - VR_VARYING is not dropped to varying, but is
1724 calculated as [MIN+1, MAX]. */
1725 if (vr0
.varying_p ())
1726 vr0
.set (VR_RANGE
, vrp_val_min (expr_type
), vrp_val_max (expr_type
));
1727 if (vr1
.varying_p ())
1728 vr1
.set (VR_RANGE
, vrp_val_min (expr_type
), vrp_val_max (expr_type
));
1730 const bool minus_p
= (code
== MINUS_EXPR
);
1731 tree min_op0
= vr0
.min ();
1732 tree min_op1
= minus_p
? vr1
.max () : vr1
.min ();
1733 tree max_op0
= vr0
.max ();
1734 tree max_op1
= minus_p
? vr1
.min () : vr1
.max ();
1735 tree sym_min_op0
= NULL_TREE
;
1736 tree sym_min_op1
= NULL_TREE
;
1737 tree sym_max_op0
= NULL_TREE
;
1738 tree sym_max_op1
= NULL_TREE
;
1739 bool neg_min_op0
, neg_min_op1
, neg_max_op0
, neg_max_op1
;
1741 neg_min_op0
= neg_min_op1
= neg_max_op0
= neg_max_op1
= false;
1743 /* If we have a PLUS or MINUS with two VR_RANGEs, either constant or
1744 single-symbolic ranges, try to compute the precise resulting range,
1745 but only if we know that this resulting range will also be constant
1746 or single-symbolic. */
1747 if (vr0
.kind () == VR_RANGE
&& vr1
.kind () == VR_RANGE
1748 && (TREE_CODE (min_op0
) == INTEGER_CST
1750 = get_single_symbol (min_op0
, &neg_min_op0
, &min_op0
)))
1751 && (TREE_CODE (min_op1
) == INTEGER_CST
1753 = get_single_symbol (min_op1
, &neg_min_op1
, &min_op1
)))
1754 && (!(sym_min_op0
&& sym_min_op1
)
1755 || (sym_min_op0
== sym_min_op1
1756 && neg_min_op0
== (minus_p
? neg_min_op1
: !neg_min_op1
)))
1757 && (TREE_CODE (max_op0
) == INTEGER_CST
1759 = get_single_symbol (max_op0
, &neg_max_op0
, &max_op0
)))
1760 && (TREE_CODE (max_op1
) == INTEGER_CST
1762 = get_single_symbol (max_op1
, &neg_max_op1
, &max_op1
)))
1763 && (!(sym_max_op0
&& sym_max_op1
)
1764 || (sym_max_op0
== sym_max_op1
1765 && neg_max_op0
== (minus_p
? neg_max_op1
: !neg_max_op1
))))
1767 wide_int wmin
, wmax
;
1768 wi::overflow_type min_ovf
= wi::OVF_NONE
;
1769 wi::overflow_type max_ovf
= wi::OVF_NONE
;
1771 /* Build the bounds. */
1772 combine_bound (code
, wmin
, min_ovf
, expr_type
, min_op0
, min_op1
);
1773 combine_bound (code
, wmax
, max_ovf
, expr_type
, max_op0
, max_op1
);
1775 /* If we have overflow for the constant part and the resulting
1776 range will be symbolic, drop to VR_VARYING. */
1777 if (((bool)min_ovf
&& sym_min_op0
!= sym_min_op1
)
1778 || ((bool)max_ovf
&& sym_max_op0
!= sym_max_op1
))
1784 /* Adjust the range for possible overflow. */
1787 set_value_range_with_overflow (type
, min
, max
, expr_type
,
1788 wmin
, wmax
, min_ovf
, max_ovf
);
1789 if (type
== VR_VARYING
)
1795 /* Build the symbolic bounds if needed. */
1796 adjust_symbolic_bound (min
, code
, expr_type
,
1797 sym_min_op0
, sym_min_op1
,
1798 neg_min_op0
, neg_min_op1
);
1799 adjust_symbolic_bound (max
, code
, expr_type
,
1800 sym_max_op0
, sym_max_op1
,
1801 neg_max_op0
, neg_max_op1
);
1805 /* For other cases, for example if we have a PLUS_EXPR with two
1806 VR_ANTI_RANGEs, drop to VR_VARYING. It would take more effort
1807 to compute a precise range for such a case.
1808 ??? General even mixed range kind operations can be expressed
1809 by for example transforming ~[3, 5] + [1, 2] to range-only
1810 operations and a union primitive:
1811 [-INF, 2] + [1, 2] U [5, +INF] + [1, 2]
1812 [-INF+1, 4] U [6, +INF(OVF)]
1813 though usually the union is not exactly representable with
1814 a single range or anti-range as the above is
1815 [-INF+1, +INF(OVF)] intersected with ~[5, 5]
1816 but one could use a scheme similar to equivalences for this. */
1821 else if (code
== MIN_EXPR
1822 || code
== MAX_EXPR
)
1824 wide_int wmin
, wmax
;
1825 wide_int vr0_min
, vr0_max
;
1826 wide_int vr1_min
, vr1_max
;
1827 extract_range_into_wide_ints (&vr0
, sign
, prec
, vr0_min
, vr0_max
);
1828 extract_range_into_wide_ints (&vr1
, sign
, prec
, vr1_min
, vr1_max
);
1829 if (wide_int_range_min_max (wmin
, wmax
, code
, sign
, prec
,
1830 vr0_min
, vr0_max
, vr1_min
, vr1_max
))
1831 vr
->set (VR_RANGE
, wide_int_to_tree (expr_type
, wmin
),
1832 wide_int_to_tree (expr_type
, wmax
));
1837 else if (code
== MULT_EXPR
)
1839 if (!range_int_cst_p (&vr0
)
1840 || !range_int_cst_p (&vr1
))
1845 extract_range_from_multiplicative_op (vr
, code
, &vr0
, &vr1
);
1848 else if (code
== RSHIFT_EXPR
1849 || code
== LSHIFT_EXPR
)
1851 if (range_int_cst_p (&vr1
)
1852 && !wide_int_range_shift_undefined_p
1853 (TYPE_SIGN (TREE_TYPE (vr1
.min ())),
1855 wi::to_wide (vr1
.min ()),
1856 wi::to_wide (vr1
.max ())))
1858 if (code
== RSHIFT_EXPR
)
1860 /* Even if vr0 is VARYING or otherwise not usable, we can derive
1861 useful ranges just from the shift count. E.g.
1862 x >> 63 for signed 64-bit x is always [-1, 0]. */
1863 if (vr0
.kind () != VR_RANGE
|| vr0
.symbolic_p ())
1864 vr0
.set (VR_RANGE
, vrp_val_min (expr_type
),
1865 vrp_val_max (expr_type
));
1866 extract_range_from_multiplicative_op (vr
, code
, &vr0
, &vr1
);
1869 else if (code
== LSHIFT_EXPR
1870 && range_int_cst_p (&vr0
))
1872 wide_int res_lb
, res_ub
;
1873 if (wide_int_range_lshift (res_lb
, res_ub
, sign
, prec
,
1874 wi::to_wide (vr0
.min ()),
1875 wi::to_wide (vr0
.max ()),
1876 wi::to_wide (vr1
.min ()),
1877 wi::to_wide (vr1
.max ()),
1878 TYPE_OVERFLOW_UNDEFINED (expr_type
)))
1880 min
= wide_int_to_tree (expr_type
, res_lb
);
1881 max
= wide_int_to_tree (expr_type
, res_ub
);
1882 vr
->set_and_canonicalize (VR_RANGE
, min
, max
);
1890 else if (code
== TRUNC_DIV_EXPR
1891 || code
== FLOOR_DIV_EXPR
1892 || code
== CEIL_DIV_EXPR
1893 || code
== EXACT_DIV_EXPR
1894 || code
== ROUND_DIV_EXPR
)
1896 wide_int dividend_min
, dividend_max
, divisor_min
, divisor_max
;
1897 wide_int wmin
, wmax
, extra_min
, extra_max
;
1900 /* Special case explicit division by zero as undefined. */
1901 if (range_is_null (&vr1
))
1903 vr
->set_undefined ();
1907 /* First, normalize ranges into constants we can handle. Note
1908 that VR_ANTI_RANGE's of constants were already normalized
1909 before arriving here.
1911 NOTE: As a future improvement, we may be able to do better
1912 with mixed symbolic (anti-)ranges like [0, A]. See note in
1913 ranges_from_anti_range. */
1914 extract_range_into_wide_ints (&vr0
, sign
, prec
,
1915 dividend_min
, dividend_max
);
1916 extract_range_into_wide_ints (&vr1
, sign
, prec
,
1917 divisor_min
, divisor_max
);
1918 if (!wide_int_range_div (wmin
, wmax
, code
, sign
, prec
,
1919 dividend_min
, dividend_max
,
1920 divisor_min
, divisor_max
,
1921 TYPE_OVERFLOW_UNDEFINED (expr_type
),
1922 extra_range_p
, extra_min
, extra_max
))
1927 vr
->set (VR_RANGE
, wide_int_to_tree (expr_type
, wmin
),
1928 wide_int_to_tree (expr_type
, wmax
));
1932 extra_range (VR_RANGE
, wide_int_to_tree (expr_type
, extra_min
),
1933 wide_int_to_tree (expr_type
, extra_max
));
1934 vr
->union_ (&extra_range
);
1938 else if (code
== TRUNC_MOD_EXPR
)
1940 if (range_is_null (&vr1
))
1942 vr
->set_undefined ();
1945 wide_int wmin
, wmax
, tmp
;
1946 wide_int vr0_min
, vr0_max
, vr1_min
, vr1_max
;
1947 extract_range_into_wide_ints (&vr0
, sign
, prec
, vr0_min
, vr0_max
);
1948 extract_range_into_wide_ints (&vr1
, sign
, prec
, vr1_min
, vr1_max
);
1949 wide_int_range_trunc_mod (wmin
, wmax
, sign
, prec
,
1950 vr0_min
, vr0_max
, vr1_min
, vr1_max
);
1951 min
= wide_int_to_tree (expr_type
, wmin
);
1952 max
= wide_int_to_tree (expr_type
, wmax
);
1953 vr
->set (VR_RANGE
, min
, max
);
1956 else if (code
== BIT_AND_EXPR
|| code
== BIT_IOR_EXPR
|| code
== BIT_XOR_EXPR
)
1958 wide_int may_be_nonzero0
, may_be_nonzero1
;
1959 wide_int must_be_nonzero0
, must_be_nonzero1
;
1960 wide_int wmin
, wmax
;
1961 wide_int vr0_min
, vr0_max
, vr1_min
, vr1_max
;
1962 vrp_set_zero_nonzero_bits (expr_type
, &vr0
,
1963 &may_be_nonzero0
, &must_be_nonzero0
);
1964 vrp_set_zero_nonzero_bits (expr_type
, &vr1
,
1965 &may_be_nonzero1
, &must_be_nonzero1
);
1966 extract_range_into_wide_ints (&vr0
, sign
, prec
, vr0_min
, vr0_max
);
1967 extract_range_into_wide_ints (&vr1
, sign
, prec
, vr1_min
, vr1_max
);
1968 if (code
== BIT_AND_EXPR
)
1970 if (wide_int_range_bit_and (wmin
, wmax
, sign
, prec
,
1978 min
= wide_int_to_tree (expr_type
, wmin
);
1979 max
= wide_int_to_tree (expr_type
, wmax
);
1980 vr
->set (VR_RANGE
, min
, max
);
1986 else if (code
== BIT_IOR_EXPR
)
1988 if (wide_int_range_bit_ior (wmin
, wmax
, sign
,
1996 min
= wide_int_to_tree (expr_type
, wmin
);
1997 max
= wide_int_to_tree (expr_type
, wmax
);
1998 vr
->set (VR_RANGE
, min
, max
);
2004 else if (code
== BIT_XOR_EXPR
)
2006 if (wide_int_range_bit_xor (wmin
, wmax
, sign
, prec
,
2012 min
= wide_int_to_tree (expr_type
, wmin
);
2013 max
= wide_int_to_tree (expr_type
, wmax
);
2014 vr
->set (VR_RANGE
, min
, max
);
2024 /* If either MIN or MAX overflowed, then set the resulting range to
2026 if (min
== NULL_TREE
2027 || TREE_OVERFLOW_P (min
)
2029 || TREE_OVERFLOW_P (max
))
2035 /* We punt for [-INF, +INF].
2036 We learn nothing when we have INF on both sides.
2037 Note that we do accept [-INF, -INF] and [+INF, +INF]. */
2038 if (vrp_val_is_min (min
) && vrp_val_is_max (max
))
2044 cmp
= compare_values (min
, max
);
2045 if (cmp
== -2 || cmp
== 1)
2047 /* If the new range has its limits swapped around (MIN > MAX),
2048 then the operation caused one of them to wrap around, mark
2049 the new range VARYING. */
2053 vr
->set (type
, min
, max
);
2056 /* Extract range information from a unary operation CODE based on
2057 the range of its operand *VR0 with type OP0_TYPE with resulting type TYPE.
2058 The resulting range is stored in *VR. */
2061 extract_range_from_unary_expr (value_range_base
*vr
,
2062 enum tree_code code
, tree type
,
2063 const value_range_base
*vr0_
, tree op0_type
)
2065 signop sign
= TYPE_SIGN (type
);
2066 unsigned int prec
= TYPE_PRECISION (type
);
2067 value_range_base vr0
= *vr0_
;
2068 value_range_base vrtem0
, vrtem1
;
2070 /* VRP only operates on integral and pointer types. */
2071 if (!(INTEGRAL_TYPE_P (op0_type
)
2072 || POINTER_TYPE_P (op0_type
))
2073 || !(INTEGRAL_TYPE_P (type
)
2074 || POINTER_TYPE_P (type
)))
2080 /* If VR0 is UNDEFINED, so is the result. */
2081 if (vr0
.undefined_p ())
2083 vr
->set_undefined ();
2087 /* Handle operations that we express in terms of others. */
2088 if (code
== PAREN_EXPR
)
2090 /* PAREN_EXPR and OBJ_TYPE_REF are simple copies. */
2094 else if (code
== NEGATE_EXPR
)
2096 /* -X is simply 0 - X, so re-use existing code that also handles
2097 anti-ranges fine. */
2098 value_range_base zero
;
2099 zero
.set (build_int_cst (type
, 0));
2100 extract_range_from_binary_expr (vr
, MINUS_EXPR
, type
, &zero
, &vr0
);
2103 else if (code
== BIT_NOT_EXPR
)
2105 /* ~X is simply -1 - X, so re-use existing code that also handles
2106 anti-ranges fine. */
2107 value_range_base minusone
;
2108 minusone
.set (build_int_cst (type
, -1));
2109 extract_range_from_binary_expr (vr
, MINUS_EXPR
, type
, &minusone
, &vr0
);
2113 /* Now canonicalize anti-ranges to ranges when they are not symbolic
2114 and express op ~[] as (op []') U (op []''). */
2115 if (vr0
.kind () == VR_ANTI_RANGE
2116 && ranges_from_anti_range (&vr0
, &vrtem0
, &vrtem1
))
2118 extract_range_from_unary_expr (vr
, code
, type
, &vrtem0
, op0_type
);
2119 if (!vrtem1
.undefined_p ())
2121 value_range_base vrres
;
2122 extract_range_from_unary_expr (&vrres
, code
, type
,
2124 vr
->union_ (&vrres
);
2129 if (CONVERT_EXPR_CODE_P (code
))
2131 tree inner_type
= op0_type
;
2132 tree outer_type
= type
;
2134 /* If the expression involves a pointer, we are only interested in
2135 determining if it evaluates to NULL [0, 0] or non-NULL (~[0, 0]).
2137 This may lose precision when converting (char *)~[0,2] to
2138 int, because we'll forget that the pointer can also not be 1
2139 or 2. In practice we don't care, as this is some idiot
2140 storing a magic constant to a pointer. */
2141 if (POINTER_TYPE_P (type
) || POINTER_TYPE_P (op0_type
))
2143 if (!range_includes_zero_p (&vr0
))
2144 vr
->set_nonnull (type
);
2145 else if (range_is_null (&vr0
))
2146 vr
->set_null (type
);
2152 /* The POINTER_TYPE_P code above will have dealt with all
2153 pointer anti-ranges. Any remaining anti-ranges at this point
2154 will be integer conversions from SSA names that will be
2155 normalized into VARYING. For instance: ~[x_55, x_55]. */
2156 gcc_assert (vr0
.kind () != VR_ANTI_RANGE
2157 || TREE_CODE (vr0
.min ()) != INTEGER_CST
);
2159 /* NOTES: Previously we were returning VARYING for all symbolics, but
2160 we can do better by treating them as [-MIN, +MAX]. For
2161 example, converting [SYM, SYM] from INT to LONG UNSIGNED,
2162 we can return: ~[0x8000000, 0xffffffff7fffffff].
2164 We were also failing to convert ~[0,0] from char* to unsigned,
2165 instead choosing to return VR_VARYING. Now we return ~[0,0]. */
2166 wide_int vr0_min
, vr0_max
, wmin
, wmax
;
2167 signop inner_sign
= TYPE_SIGN (inner_type
);
2168 signop outer_sign
= TYPE_SIGN (outer_type
);
2169 unsigned inner_prec
= TYPE_PRECISION (inner_type
);
2170 unsigned outer_prec
= TYPE_PRECISION (outer_type
);
2171 extract_range_into_wide_ints (&vr0
, inner_sign
, inner_prec
,
2173 if (wide_int_range_convert (wmin
, wmax
,
2174 inner_sign
, inner_prec
,
2175 outer_sign
, outer_prec
,
2178 tree min
= wide_int_to_tree (outer_type
, wmin
);
2179 tree max
= wide_int_to_tree (outer_type
, wmax
);
2180 vr
->set_and_canonicalize (VR_RANGE
, min
, max
);
2186 else if (code
== ABS_EXPR
)
2188 wide_int wmin
, wmax
;
2189 wide_int vr0_min
, vr0_max
;
2190 extract_range_into_wide_ints (&vr0
, sign
, prec
, vr0_min
, vr0_max
);
2191 if (wide_int_range_abs (wmin
, wmax
, sign
, prec
, vr0_min
, vr0_max
,
2192 TYPE_OVERFLOW_UNDEFINED (type
)))
2193 vr
->set (VR_RANGE
, wide_int_to_tree (type
, wmin
),
2194 wide_int_to_tree (type
, wmax
));
2200 /* For unhandled operations fall back to varying. */
2205 /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
2206 create a new SSA name N and return the assertion assignment
2207 'N = ASSERT_EXPR <V, V OP W>'. */
2210 build_assert_expr_for (tree cond
, tree v
)
2215 gcc_assert (TREE_CODE (v
) == SSA_NAME
2216 && COMPARISON_CLASS_P (cond
));
2218 a
= build2 (ASSERT_EXPR
, TREE_TYPE (v
), v
, cond
);
2219 assertion
= gimple_build_assign (NULL_TREE
, a
);
2221 /* The new ASSERT_EXPR, creates a new SSA name that replaces the
2222 operand of the ASSERT_EXPR. Create it so the new name and the old one
2223 are registered in the replacement table so that we can fix the SSA web
2224 after adding all the ASSERT_EXPRs. */
2225 tree new_def
= create_new_def_for (v
, assertion
, NULL
);
2226 /* Make sure we preserve abnormalness throughout an ASSERT_EXPR chain
2227 given we have to be able to fully propagate those out to re-create
2228 valid SSA when removing the asserts. */
2229 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (v
))
2230 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_def
) = 1;
2236 /* Return false if EXPR is a predicate expression involving floating
2240 fp_predicate (gimple
*stmt
)
2242 GIMPLE_CHECK (stmt
, GIMPLE_COND
);
2244 return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt
)));
2247 /* If the range of values taken by OP can be inferred after STMT executes,
2248 return the comparison code (COMP_CODE_P) and value (VAL_P) that
2249 describes the inferred range. Return true if a range could be
2253 infer_value_range (gimple
*stmt
, tree op
, tree_code
*comp_code_p
, tree
*val_p
)
2256 *comp_code_p
= ERROR_MARK
;
2258 /* Do not attempt to infer anything in names that flow through
2260 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op
))
2263 /* If STMT is the last statement of a basic block with no normal
2264 successors, there is no point inferring anything about any of its
2265 operands. We would not be able to find a proper insertion point
2266 for the assertion, anyway. */
2267 if (stmt_ends_bb_p (stmt
))
2272 FOR_EACH_EDGE (e
, ei
, gimple_bb (stmt
)->succs
)
2273 if (!(e
->flags
& (EDGE_ABNORMAL
|EDGE_EH
)))
2279 if (infer_nonnull_range (stmt
, op
))
2281 *val_p
= build_int_cst (TREE_TYPE (op
), 0);
2282 *comp_code_p
= NE_EXPR
;
2290 void dump_asserts_for (FILE *, tree
);
2291 void debug_asserts_for (tree
);
2292 void dump_all_asserts (FILE *);
2293 void debug_all_asserts (void);
2295 /* Dump all the registered assertions for NAME to FILE. */
2298 dump_asserts_for (FILE *file
, tree name
)
2302 fprintf (file
, "Assertions to be inserted for ");
2303 print_generic_expr (file
, name
);
2304 fprintf (file
, "\n");
2306 loc
= asserts_for
[SSA_NAME_VERSION (name
)];
2309 fprintf (file
, "\t");
2310 print_gimple_stmt (file
, gsi_stmt (loc
->si
), 0);
2311 fprintf (file
, "\n\tBB #%d", loc
->bb
->index
);
2314 fprintf (file
, "\n\tEDGE %d->%d", loc
->e
->src
->index
,
2315 loc
->e
->dest
->index
);
2316 dump_edge_info (file
, loc
->e
, dump_flags
, 0);
2318 fprintf (file
, "\n\tPREDICATE: ");
2319 print_generic_expr (file
, loc
->expr
);
2320 fprintf (file
, " %s ", get_tree_code_name (loc
->comp_code
));
2321 print_generic_expr (file
, loc
->val
);
2322 fprintf (file
, "\n\n");
2326 fprintf (file
, "\n");
2330 /* Dump all the registered assertions for NAME to stderr. */
2333 debug_asserts_for (tree name
)
2335 dump_asserts_for (stderr
, name
);
2339 /* Dump all the registered assertions for all the names to FILE. */
2342 dump_all_asserts (FILE *file
)
2347 fprintf (file
, "\nASSERT_EXPRs to be inserted\n\n");
2348 EXECUTE_IF_SET_IN_BITMAP (need_assert_for
, 0, i
, bi
)
2349 dump_asserts_for (file
, ssa_name (i
));
2350 fprintf (file
, "\n");
2354 /* Dump all the registered assertions for all the names to stderr. */
2357 debug_all_asserts (void)
2359 dump_all_asserts (stderr
);
2362 /* Push the assert info for NAME, EXPR, COMP_CODE and VAL to ASSERTS. */
2365 add_assert_info (vec
<assert_info
> &asserts
,
2366 tree name
, tree expr
, enum tree_code comp_code
, tree val
)
2369 info
.comp_code
= comp_code
;
2371 if (TREE_OVERFLOW_P (val
))
2372 val
= drop_tree_overflow (val
);
2375 asserts
.safe_push (info
);
2376 if (dump_enabled_p ())
2377 dump_printf (MSG_NOTE
| MSG_PRIORITY_INTERNALS
,
2378 "Adding assert for %T from %T %s %T\n",
2379 name
, expr
, op_symbol_code (comp_code
), val
);
2382 /* If NAME doesn't have an ASSERT_EXPR registered for asserting
2383 'EXPR COMP_CODE VAL' at a location that dominates block BB or
2384 E->DEST, then register this location as a possible insertion point
2385 for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
2387 BB, E and SI provide the exact insertion point for the new
2388 ASSERT_EXPR. If BB is NULL, then the ASSERT_EXPR is to be inserted
2389 on edge E. Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
2390 BB. If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
2391 must not be NULL. */
2394 register_new_assert_for (tree name
, tree expr
,
2395 enum tree_code comp_code
,
2399 gimple_stmt_iterator si
)
2401 assert_locus
*n
, *loc
, *last_loc
;
2402 basic_block dest_bb
;
2404 gcc_checking_assert (bb
== NULL
|| e
== NULL
);
2407 gcc_checking_assert (gimple_code (gsi_stmt (si
)) != GIMPLE_COND
2408 && gimple_code (gsi_stmt (si
)) != GIMPLE_SWITCH
);
2410 /* Never build an assert comparing against an integer constant with
2411 TREE_OVERFLOW set. This confuses our undefined overflow warning
2413 if (TREE_OVERFLOW_P (val
))
2414 val
= drop_tree_overflow (val
);
2416 /* The new assertion A will be inserted at BB or E. We need to
2417 determine if the new location is dominated by a previously
2418 registered location for A. If we are doing an edge insertion,
2419 assume that A will be inserted at E->DEST. Note that this is not
2422 If E is a critical edge, it will be split. But even if E is
2423 split, the new block will dominate the same set of blocks that
2426 The reverse, however, is not true, blocks dominated by E->DEST
2427 will not be dominated by the new block created to split E. So,
2428 if the insertion location is on a critical edge, we will not use
2429 the new location to move another assertion previously registered
2430 at a block dominated by E->DEST. */
2431 dest_bb
= (bb
) ? bb
: e
->dest
;
2433 /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and
2434 VAL at a block dominating DEST_BB, then we don't need to insert a new
2435 one. Similarly, if the same assertion already exists at a block
2436 dominated by DEST_BB and the new location is not on a critical
2437 edge, then update the existing location for the assertion (i.e.,
2438 move the assertion up in the dominance tree).
2440 Note, this is implemented as a simple linked list because there
2441 should not be more than a handful of assertions registered per
2442 name. If this becomes a performance problem, a table hashed by
2443 COMP_CODE and VAL could be implemented. */
2444 loc
= asserts_for
[SSA_NAME_VERSION (name
)];
2448 if (loc
->comp_code
== comp_code
2450 || operand_equal_p (loc
->val
, val
, 0))
2451 && (loc
->expr
== expr
2452 || operand_equal_p (loc
->expr
, expr
, 0)))
2454 /* If E is not a critical edge and DEST_BB
2455 dominates the existing location for the assertion, move
2456 the assertion up in the dominance tree by updating its
2457 location information. */
2458 if ((e
== NULL
|| !EDGE_CRITICAL_P (e
))
2459 && dominated_by_p (CDI_DOMINATORS
, loc
->bb
, dest_bb
))
2468 /* Update the last node of the list and move to the next one. */
2473 /* If we didn't find an assertion already registered for
2474 NAME COMP_CODE VAL, add a new one at the end of the list of
2475 assertions associated with NAME. */
2476 n
= XNEW (struct assert_locus
);
2480 n
->comp_code
= comp_code
;
2488 asserts_for
[SSA_NAME_VERSION (name
)] = n
;
2490 bitmap_set_bit (need_assert_for
, SSA_NAME_VERSION (name
));
2493 /* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME.
2494 Extract a suitable test code and value and store them into *CODE_P and
2495 *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P.
2497 If no extraction was possible, return FALSE, otherwise return TRUE.
2499 If INVERT is true, then we invert the result stored into *CODE_P. */
2502 extract_code_and_val_from_cond_with_ops (tree name
, enum tree_code cond_code
,
2503 tree cond_op0
, tree cond_op1
,
2504 bool invert
, enum tree_code
*code_p
,
2507 enum tree_code comp_code
;
2510 /* Otherwise, we have a comparison of the form NAME COMP VAL
2511 or VAL COMP NAME. */
2512 if (name
== cond_op1
)
2514 /* If the predicate is of the form VAL COMP NAME, flip
2515 COMP around because we need to register NAME as the
2516 first operand in the predicate. */
2517 comp_code
= swap_tree_comparison (cond_code
);
2520 else if (name
== cond_op0
)
2522 /* The comparison is of the form NAME COMP VAL, so the
2523 comparison code remains unchanged. */
2524 comp_code
= cond_code
;
2530 /* Invert the comparison code as necessary. */
2532 comp_code
= invert_tree_comparison (comp_code
, 0);
2534 /* VRP only handles integral and pointer types. */
2535 if (! INTEGRAL_TYPE_P (TREE_TYPE (val
))
2536 && ! POINTER_TYPE_P (TREE_TYPE (val
)))
2539 /* Do not register always-false predicates.
2540 FIXME: this works around a limitation in fold() when dealing with
2541 enumerations. Given 'enum { N1, N2 } x;', fold will not
2542 fold 'if (x > N2)' to 'if (0)'. */
2543 if ((comp_code
== GT_EXPR
|| comp_code
== LT_EXPR
)
2544 && INTEGRAL_TYPE_P (TREE_TYPE (val
)))
2546 tree min
= TYPE_MIN_VALUE (TREE_TYPE (val
));
2547 tree max
= TYPE_MAX_VALUE (TREE_TYPE (val
));
2549 if (comp_code
== GT_EXPR
2551 || compare_values (val
, max
) == 0))
2554 if (comp_code
== LT_EXPR
2556 || compare_values (val
, min
) == 0))
2559 *code_p
= comp_code
;
2564 /* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any
2565 (otherwise return VAL). VAL and MASK must be zero-extended for
2566 precision PREC. If SGNBIT is non-zero, first xor VAL with SGNBIT
2567 (to transform signed values into unsigned) and at the end xor
2571 masked_increment (const wide_int
&val_in
, const wide_int
&mask
,
2572 const wide_int
&sgnbit
, unsigned int prec
)
2574 wide_int bit
= wi::one (prec
), res
;
2577 wide_int val
= val_in
^ sgnbit
;
2578 for (i
= 0; i
< prec
; i
++, bit
+= bit
)
2581 if ((res
& bit
) == 0)
2584 res
= wi::bit_and_not (val
+ bit
, res
);
2586 if (wi::gtu_p (res
, val
))
2587 return res
^ sgnbit
;
2589 return val
^ sgnbit
;
2592 /* Helper for overflow_comparison_p
2594 OP0 CODE OP1 is a comparison. Examine the comparison and potentially
2595 OP1's defining statement to see if it ultimately has the form
2596 OP0 CODE (OP0 PLUS INTEGER_CST)
2598 If so, return TRUE indicating this is an overflow test and store into
2599 *NEW_CST an updated constant that can be used in a narrowed range test.
2601 REVERSED indicates if the comparison was originally:
2605 This affects how we build the updated constant. */
2608 overflow_comparison_p_1 (enum tree_code code
, tree op0
, tree op1
,
2609 bool follow_assert_exprs
, bool reversed
, tree
*new_cst
)
2611 /* See if this is a relational operation between two SSA_NAMES with
2612 unsigned, overflow wrapping values. If so, check it more deeply. */
2613 if ((code
== LT_EXPR
|| code
== LE_EXPR
2614 || code
== GE_EXPR
|| code
== GT_EXPR
)
2615 && TREE_CODE (op0
) == SSA_NAME
2616 && TREE_CODE (op1
) == SSA_NAME
2617 && INTEGRAL_TYPE_P (TREE_TYPE (op0
))
2618 && TYPE_UNSIGNED (TREE_TYPE (op0
))
2619 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (op0
)))
2621 gimple
*op1_def
= SSA_NAME_DEF_STMT (op1
);
2623 /* If requested, follow any ASSERT_EXPRs backwards for OP1. */
2624 if (follow_assert_exprs
)
2626 while (gimple_assign_single_p (op1_def
)
2627 && TREE_CODE (gimple_assign_rhs1 (op1_def
)) == ASSERT_EXPR
)
2629 op1
= TREE_OPERAND (gimple_assign_rhs1 (op1_def
), 0);
2630 if (TREE_CODE (op1
) != SSA_NAME
)
2632 op1_def
= SSA_NAME_DEF_STMT (op1
);
2636 /* Now look at the defining statement of OP1 to see if it adds
2637 or subtracts a nonzero constant from another operand. */
2639 && is_gimple_assign (op1_def
)
2640 && gimple_assign_rhs_code (op1_def
) == PLUS_EXPR
2641 && TREE_CODE (gimple_assign_rhs2 (op1_def
)) == INTEGER_CST
2642 && !integer_zerop (gimple_assign_rhs2 (op1_def
)))
2644 tree target
= gimple_assign_rhs1 (op1_def
);
2646 /* If requested, follow ASSERT_EXPRs backwards for op0 looking
2647 for one where TARGET appears on the RHS. */
2648 if (follow_assert_exprs
)
2650 /* Now see if that "other operand" is op0, following the chain
2651 of ASSERT_EXPRs if necessary. */
2652 gimple
*op0_def
= SSA_NAME_DEF_STMT (op0
);
2653 while (op0
!= target
2654 && gimple_assign_single_p (op0_def
)
2655 && TREE_CODE (gimple_assign_rhs1 (op0_def
)) == ASSERT_EXPR
)
2657 op0
= TREE_OPERAND (gimple_assign_rhs1 (op0_def
), 0);
2658 if (TREE_CODE (op0
) != SSA_NAME
)
2660 op0_def
= SSA_NAME_DEF_STMT (op0
);
2664 /* If we did not find our target SSA_NAME, then this is not
2665 an overflow test. */
2669 tree type
= TREE_TYPE (op0
);
2670 wide_int max
= wi::max_value (TYPE_PRECISION (type
), UNSIGNED
);
2671 tree inc
= gimple_assign_rhs2 (op1_def
);
2673 *new_cst
= wide_int_to_tree (type
, max
+ wi::to_wide (inc
));
2675 *new_cst
= wide_int_to_tree (type
, max
- wi::to_wide (inc
));
2682 /* OP0 CODE OP1 is a comparison. Examine the comparison and potentially
2683 OP1's defining statement to see if it ultimately has the form
2684 OP0 CODE (OP0 PLUS INTEGER_CST)
2686 If so, return TRUE indicating this is an overflow test and store into
2687 *NEW_CST an updated constant that can be used in a narrowed range test.
2689 These statements are left as-is in the IL to facilitate discovery of
2690 {ADD,SUB}_OVERFLOW sequences later in the optimizer pipeline. But
2691 the alternate range representation is often useful within VRP. */
2694 overflow_comparison_p (tree_code code
, tree name
, tree val
,
2695 bool use_equiv_p
, tree
*new_cst
)
2697 if (overflow_comparison_p_1 (code
, name
, val
, use_equiv_p
, false, new_cst
))
2699 return overflow_comparison_p_1 (swap_tree_comparison (code
), val
, name
,
2700 use_equiv_p
, true, new_cst
);
2704 /* Try to register an edge assertion for SSA name NAME on edge E for
2705 the condition COND contributing to the conditional jump pointed to by BSI.
2706 Invert the condition COND if INVERT is true. */
2709 register_edge_assert_for_2 (tree name
, edge e
,
2710 enum tree_code cond_code
,
2711 tree cond_op0
, tree cond_op1
, bool invert
,
2712 vec
<assert_info
> &asserts
)
2715 enum tree_code comp_code
;
2717 if (!extract_code_and_val_from_cond_with_ops (name
, cond_code
,
2720 invert
, &comp_code
, &val
))
2723 /* Queue the assert. */
2725 if (overflow_comparison_p (comp_code
, name
, val
, false, &x
))
2727 enum tree_code new_code
= ((comp_code
== GT_EXPR
|| comp_code
== GE_EXPR
)
2728 ? GT_EXPR
: LE_EXPR
);
2729 add_assert_info (asserts
, name
, name
, new_code
, x
);
2731 add_assert_info (asserts
, name
, name
, comp_code
, val
);
2733 /* In the case of NAME <= CST and NAME being defined as
2734 NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2
2735 and NAME2 <= CST - CST2. We can do the same for NAME > CST.
2736 This catches range and anti-range tests. */
2737 if ((comp_code
== LE_EXPR
2738 || comp_code
== GT_EXPR
)
2739 && TREE_CODE (val
) == INTEGER_CST
2740 && TYPE_UNSIGNED (TREE_TYPE (val
)))
2742 gimple
*def_stmt
= SSA_NAME_DEF_STMT (name
);
2743 tree cst2
= NULL_TREE
, name2
= NULL_TREE
, name3
= NULL_TREE
;
2745 /* Extract CST2 from the (optional) addition. */
2746 if (is_gimple_assign (def_stmt
)
2747 && gimple_assign_rhs_code (def_stmt
) == PLUS_EXPR
)
2749 name2
= gimple_assign_rhs1 (def_stmt
);
2750 cst2
= gimple_assign_rhs2 (def_stmt
);
2751 if (TREE_CODE (name2
) == SSA_NAME
2752 && TREE_CODE (cst2
) == INTEGER_CST
)
2753 def_stmt
= SSA_NAME_DEF_STMT (name2
);
2756 /* Extract NAME2 from the (optional) sign-changing cast. */
2757 if (gimple_assign_cast_p (def_stmt
))
2759 if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt
))
2760 && ! TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def_stmt
)))
2761 && (TYPE_PRECISION (gimple_expr_type (def_stmt
))
2762 == TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt
)))))
2763 name3
= gimple_assign_rhs1 (def_stmt
);
2766 /* If name3 is used later, create an ASSERT_EXPR for it. */
2767 if (name3
!= NULL_TREE
2768 && TREE_CODE (name3
) == SSA_NAME
2769 && (cst2
== NULL_TREE
2770 || TREE_CODE (cst2
) == INTEGER_CST
)
2771 && INTEGRAL_TYPE_P (TREE_TYPE (name3
)))
2775 /* Build an expression for the range test. */
2776 tmp
= build1 (NOP_EXPR
, TREE_TYPE (name
), name3
);
2777 if (cst2
!= NULL_TREE
)
2778 tmp
= build2 (PLUS_EXPR
, TREE_TYPE (name
), tmp
, cst2
);
2779 add_assert_info (asserts
, name3
, tmp
, comp_code
, val
);
2782 /* If name2 is used later, create an ASSERT_EXPR for it. */
2783 if (name2
!= NULL_TREE
2784 && TREE_CODE (name2
) == SSA_NAME
2785 && TREE_CODE (cst2
) == INTEGER_CST
2786 && INTEGRAL_TYPE_P (TREE_TYPE (name2
)))
2790 /* Build an expression for the range test. */
2792 if (TREE_TYPE (name
) != TREE_TYPE (name2
))
2793 tmp
= build1 (NOP_EXPR
, TREE_TYPE (name
), tmp
);
2794 if (cst2
!= NULL_TREE
)
2795 tmp
= build2 (PLUS_EXPR
, TREE_TYPE (name
), tmp
, cst2
);
2796 add_assert_info (asserts
, name2
, tmp
, comp_code
, val
);
2800 /* In the case of post-in/decrement tests like if (i++) ... and uses
2801 of the in/decremented value on the edge the extra name we want to
2802 assert for is not on the def chain of the name compared. Instead
2803 it is in the set of use stmts.
2804 Similar cases happen for conversions that were simplified through
2805 fold_{sign_changed,widened}_comparison. */
2806 if ((comp_code
== NE_EXPR
2807 || comp_code
== EQ_EXPR
)
2808 && TREE_CODE (val
) == INTEGER_CST
)
2810 imm_use_iterator ui
;
2812 FOR_EACH_IMM_USE_STMT (use_stmt
, ui
, name
)
2814 if (!is_gimple_assign (use_stmt
))
2817 /* Cut off to use-stmts that are dominating the predecessor. */
2818 if (!dominated_by_p (CDI_DOMINATORS
, e
->src
, gimple_bb (use_stmt
)))
2821 tree name2
= gimple_assign_lhs (use_stmt
);
2822 if (TREE_CODE (name2
) != SSA_NAME
)
2825 enum tree_code code
= gimple_assign_rhs_code (use_stmt
);
2827 if (code
== PLUS_EXPR
2828 || code
== MINUS_EXPR
)
2830 cst
= gimple_assign_rhs2 (use_stmt
);
2831 if (TREE_CODE (cst
) != INTEGER_CST
)
2833 cst
= int_const_binop (code
, val
, cst
);
2835 else if (CONVERT_EXPR_CODE_P (code
))
2837 /* For truncating conversions we cannot record
2839 if (comp_code
== NE_EXPR
2840 && (TYPE_PRECISION (TREE_TYPE (name2
))
2841 < TYPE_PRECISION (TREE_TYPE (name
))))
2843 cst
= fold_convert (TREE_TYPE (name2
), val
);
2848 if (TREE_OVERFLOW_P (cst
))
2849 cst
= drop_tree_overflow (cst
);
2850 add_assert_info (asserts
, name2
, name2
, comp_code
, cst
);
2854 if (TREE_CODE_CLASS (comp_code
) == tcc_comparison
2855 && TREE_CODE (val
) == INTEGER_CST
)
2857 gimple
*def_stmt
= SSA_NAME_DEF_STMT (name
);
2858 tree name2
= NULL_TREE
, names
[2], cst2
= NULL_TREE
;
2859 tree val2
= NULL_TREE
;
2860 unsigned int prec
= TYPE_PRECISION (TREE_TYPE (val
));
2861 wide_int mask
= wi::zero (prec
);
2862 unsigned int nprec
= prec
;
2863 enum tree_code rhs_code
= ERROR_MARK
;
2865 if (is_gimple_assign (def_stmt
))
2866 rhs_code
= gimple_assign_rhs_code (def_stmt
);
2868 /* In the case of NAME != CST1 where NAME = A +- CST2 we can
2869 assert that A != CST1 -+ CST2. */
2870 if ((comp_code
== EQ_EXPR
|| comp_code
== NE_EXPR
)
2871 && (rhs_code
== PLUS_EXPR
|| rhs_code
== MINUS_EXPR
))
2873 tree op0
= gimple_assign_rhs1 (def_stmt
);
2874 tree op1
= gimple_assign_rhs2 (def_stmt
);
2875 if (TREE_CODE (op0
) == SSA_NAME
2876 && TREE_CODE (op1
) == INTEGER_CST
)
2878 enum tree_code reverse_op
= (rhs_code
== PLUS_EXPR
2879 ? MINUS_EXPR
: PLUS_EXPR
);
2880 op1
= int_const_binop (reverse_op
, val
, op1
);
2881 if (TREE_OVERFLOW (op1
))
2882 op1
= drop_tree_overflow (op1
);
2883 add_assert_info (asserts
, op0
, op0
, comp_code
, op1
);
2887 /* Add asserts for NAME cmp CST and NAME being defined
2888 as NAME = (int) NAME2. */
2889 if (!TYPE_UNSIGNED (TREE_TYPE (val
))
2890 && (comp_code
== LE_EXPR
|| comp_code
== LT_EXPR
2891 || comp_code
== GT_EXPR
|| comp_code
== GE_EXPR
)
2892 && gimple_assign_cast_p (def_stmt
))
2894 name2
= gimple_assign_rhs1 (def_stmt
);
2895 if (CONVERT_EXPR_CODE_P (rhs_code
)
2896 && TREE_CODE (name2
) == SSA_NAME
2897 && INTEGRAL_TYPE_P (TREE_TYPE (name2
))
2898 && TYPE_UNSIGNED (TREE_TYPE (name2
))
2899 && prec
== TYPE_PRECISION (TREE_TYPE (name2
))
2900 && (comp_code
== LE_EXPR
|| comp_code
== GT_EXPR
2901 || !tree_int_cst_equal (val
,
2902 TYPE_MIN_VALUE (TREE_TYPE (val
)))))
2905 enum tree_code new_comp_code
= comp_code
;
2907 cst
= fold_convert (TREE_TYPE (name2
),
2908 TYPE_MIN_VALUE (TREE_TYPE (val
)));
2909 /* Build an expression for the range test. */
2910 tmp
= build2 (PLUS_EXPR
, TREE_TYPE (name2
), name2
, cst
);
2911 cst
= fold_build2 (PLUS_EXPR
, TREE_TYPE (name2
), cst
,
2912 fold_convert (TREE_TYPE (name2
), val
));
2913 if (comp_code
== LT_EXPR
|| comp_code
== GE_EXPR
)
2915 new_comp_code
= comp_code
== LT_EXPR
? LE_EXPR
: GT_EXPR
;
2916 cst
= fold_build2 (MINUS_EXPR
, TREE_TYPE (name2
), cst
,
2917 build_int_cst (TREE_TYPE (name2
), 1));
2919 add_assert_info (asserts
, name2
, tmp
, new_comp_code
, cst
);
2923 /* Add asserts for NAME cmp CST and NAME being defined as
2924 NAME = NAME2 >> CST2.
2926 Extract CST2 from the right shift. */
2927 if (rhs_code
== RSHIFT_EXPR
)
2929 name2
= gimple_assign_rhs1 (def_stmt
);
2930 cst2
= gimple_assign_rhs2 (def_stmt
);
2931 if (TREE_CODE (name2
) == SSA_NAME
2932 && tree_fits_uhwi_p (cst2
)
2933 && INTEGRAL_TYPE_P (TREE_TYPE (name2
))
2934 && IN_RANGE (tree_to_uhwi (cst2
), 1, prec
- 1)
2935 && type_has_mode_precision_p (TREE_TYPE (val
)))
2937 mask
= wi::mask (tree_to_uhwi (cst2
), false, prec
);
2938 val2
= fold_binary (LSHIFT_EXPR
, TREE_TYPE (val
), val
, cst2
);
2941 if (val2
!= NULL_TREE
2942 && TREE_CODE (val2
) == INTEGER_CST
2943 && simple_cst_equal (fold_build2 (RSHIFT_EXPR
,
2947 enum tree_code new_comp_code
= comp_code
;
2951 if (comp_code
== EQ_EXPR
|| comp_code
== NE_EXPR
)
2953 if (!TYPE_UNSIGNED (TREE_TYPE (val
)))
2955 tree type
= build_nonstandard_integer_type (prec
, 1);
2956 tmp
= build1 (NOP_EXPR
, type
, name2
);
2957 val2
= fold_convert (type
, val2
);
2959 tmp
= fold_build2 (MINUS_EXPR
, TREE_TYPE (tmp
), tmp
, val2
);
2960 new_val
= wide_int_to_tree (TREE_TYPE (tmp
), mask
);
2961 new_comp_code
= comp_code
== EQ_EXPR
? LE_EXPR
: GT_EXPR
;
2963 else if (comp_code
== LT_EXPR
|| comp_code
== GE_EXPR
)
2966 = wi::min_value (prec
, TYPE_SIGN (TREE_TYPE (val
)));
2968 if (minval
== wi::to_wide (new_val
))
2969 new_val
= NULL_TREE
;
2974 = wi::max_value (prec
, TYPE_SIGN (TREE_TYPE (val
)));
2975 mask
|= wi::to_wide (val2
);
2976 if (wi::eq_p (mask
, maxval
))
2977 new_val
= NULL_TREE
;
2979 new_val
= wide_int_to_tree (TREE_TYPE (val2
), mask
);
2983 add_assert_info (asserts
, name2
, tmp
, new_comp_code
, new_val
);
2986 /* If we have a conversion that doesn't change the value of the source
2987 simply register the same assert for it. */
2988 if (CONVERT_EXPR_CODE_P (rhs_code
))
2990 wide_int rmin
, rmax
;
2991 tree rhs1
= gimple_assign_rhs1 (def_stmt
);
2992 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1
))
2993 && TREE_CODE (rhs1
) == SSA_NAME
2994 /* Make sure the relation preserves the upper/lower boundary of
2995 the range conservatively. */
2996 && (comp_code
== NE_EXPR
2997 || comp_code
== EQ_EXPR
2998 || (TYPE_SIGN (TREE_TYPE (name
))
2999 == TYPE_SIGN (TREE_TYPE (rhs1
)))
3000 || ((comp_code
== LE_EXPR
3001 || comp_code
== LT_EXPR
)
3002 && !TYPE_UNSIGNED (TREE_TYPE (rhs1
)))
3003 || ((comp_code
== GE_EXPR
3004 || comp_code
== GT_EXPR
)
3005 && TYPE_UNSIGNED (TREE_TYPE (rhs1
))))
3006 /* And the conversion does not alter the value we compare
3007 against and all values in rhs1 can be represented in
3008 the converted to type. */
3009 && int_fits_type_p (val
, TREE_TYPE (rhs1
))
3010 && ((TYPE_PRECISION (TREE_TYPE (name
))
3011 > TYPE_PRECISION (TREE_TYPE (rhs1
)))
3012 || (get_range_info (rhs1
, &rmin
, &rmax
) == VR_RANGE
3013 && wi::fits_to_tree_p (rmin
, TREE_TYPE (name
))
3014 && wi::fits_to_tree_p (rmax
, TREE_TYPE (name
)))))
3015 add_assert_info (asserts
, rhs1
, rhs1
,
3016 comp_code
, fold_convert (TREE_TYPE (rhs1
), val
));
3019 /* Add asserts for NAME cmp CST and NAME being defined as
3020 NAME = NAME2 & CST2.
3022 Extract CST2 from the and.
3025 NAME = (unsigned) NAME2;
3026 casts where NAME's type is unsigned and has smaller precision
3027 than NAME2's type as if it was NAME = NAME2 & MASK. */
3028 names
[0] = NULL_TREE
;
3029 names
[1] = NULL_TREE
;
3031 if (rhs_code
== BIT_AND_EXPR
3032 || (CONVERT_EXPR_CODE_P (rhs_code
)
3033 && INTEGRAL_TYPE_P (TREE_TYPE (val
))
3034 && TYPE_UNSIGNED (TREE_TYPE (val
))
3035 && TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt
)))
3038 name2
= gimple_assign_rhs1 (def_stmt
);
3039 if (rhs_code
== BIT_AND_EXPR
)
3040 cst2
= gimple_assign_rhs2 (def_stmt
);
3043 cst2
= TYPE_MAX_VALUE (TREE_TYPE (val
));
3044 nprec
= TYPE_PRECISION (TREE_TYPE (name2
));
3046 if (TREE_CODE (name2
) == SSA_NAME
3047 && INTEGRAL_TYPE_P (TREE_TYPE (name2
))
3048 && TREE_CODE (cst2
) == INTEGER_CST
3049 && !integer_zerop (cst2
)
3051 || TYPE_UNSIGNED (TREE_TYPE (val
))))
3053 gimple
*def_stmt2
= SSA_NAME_DEF_STMT (name2
);
3054 if (gimple_assign_cast_p (def_stmt2
))
3056 names
[1] = gimple_assign_rhs1 (def_stmt2
);
3057 if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt2
))
3058 || TREE_CODE (names
[1]) != SSA_NAME
3059 || !INTEGRAL_TYPE_P (TREE_TYPE (names
[1]))
3060 || (TYPE_PRECISION (TREE_TYPE (name2
))
3061 != TYPE_PRECISION (TREE_TYPE (names
[1]))))
3062 names
[1] = NULL_TREE
;
3067 if (names
[0] || names
[1])
3069 wide_int minv
, maxv
, valv
, cst2v
;
3070 wide_int tem
, sgnbit
;
3071 bool valid_p
= false, valn
, cst2n
;
3072 enum tree_code ccode
= comp_code
;
3074 valv
= wide_int::from (wi::to_wide (val
), nprec
, UNSIGNED
);
3075 cst2v
= wide_int::from (wi::to_wide (cst2
), nprec
, UNSIGNED
);
3076 valn
= wi::neg_p (valv
, TYPE_SIGN (TREE_TYPE (val
)));
3077 cst2n
= wi::neg_p (cst2v
, TYPE_SIGN (TREE_TYPE (val
)));
3078 /* If CST2 doesn't have most significant bit set,
3079 but VAL is negative, we have comparison like
3080 if ((x & 0x123) > -4) (always true). Just give up. */
3084 sgnbit
= wi::set_bit_in_zero (nprec
- 1, nprec
);
3086 sgnbit
= wi::zero (nprec
);
3087 minv
= valv
& cst2v
;
3091 /* Minimum unsigned value for equality is VAL & CST2
3092 (should be equal to VAL, otherwise we probably should
3093 have folded the comparison into false) and
3094 maximum unsigned value is VAL | ~CST2. */
3095 maxv
= valv
| ~cst2v
;
3100 tem
= valv
| ~cst2v
;
3101 /* If VAL is 0, handle (X & CST2) != 0 as (X & CST2) > 0U. */
3105 sgnbit
= wi::zero (nprec
);
3108 /* If (VAL | ~CST2) is all ones, handle it as
3109 (X & CST2) < VAL. */
3114 sgnbit
= wi::zero (nprec
);
3117 if (!cst2n
&& wi::neg_p (cst2v
))
3118 sgnbit
= wi::set_bit_in_zero (nprec
- 1, nprec
);
3127 if (tem
== wi::mask (nprec
- 1, false, nprec
))
3133 sgnbit
= wi::zero (nprec
);
3138 /* Minimum unsigned value for >= if (VAL & CST2) == VAL
3139 is VAL and maximum unsigned value is ~0. For signed
3140 comparison, if CST2 doesn't have most significant bit
3141 set, handle it similarly. If CST2 has MSB set,
3142 the minimum is the same, and maximum is ~0U/2. */
3145 /* If (VAL & CST2) != VAL, X & CST2 can't be equal to
3147 minv
= masked_increment (valv
, cst2v
, sgnbit
, nprec
);
3151 maxv
= wi::mask (nprec
- (cst2n
? 1 : 0), false, nprec
);
3157 /* Find out smallest MINV where MINV > VAL
3158 && (MINV & CST2) == MINV, if any. If VAL is signed and
3159 CST2 has MSB set, compute it biased by 1 << (nprec - 1). */
3160 minv
= masked_increment (valv
, cst2v
, sgnbit
, nprec
);
3163 maxv
= wi::mask (nprec
- (cst2n
? 1 : 0), false, nprec
);
3168 /* Minimum unsigned value for <= is 0 and maximum
3169 unsigned value is VAL | ~CST2 if (VAL & CST2) == VAL.
3170 Otherwise, find smallest VAL2 where VAL2 > VAL
3171 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
3173 For signed comparison, if CST2 doesn't have most
3174 significant bit set, handle it similarly. If CST2 has
3175 MSB set, the maximum is the same and minimum is INT_MIN. */
3180 maxv
= masked_increment (valv
, cst2v
, sgnbit
, nprec
);
3192 /* Minimum unsigned value for < is 0 and maximum
3193 unsigned value is (VAL-1) | ~CST2 if (VAL & CST2) == VAL.
3194 Otherwise, find smallest VAL2 where VAL2 > VAL
3195 && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
3197 For signed comparison, if CST2 doesn't have most
3198 significant bit set, handle it similarly. If CST2 has
3199 MSB set, the maximum is the same and minimum is INT_MIN. */
3208 maxv
= masked_increment (valv
, cst2v
, sgnbit
, nprec
);
3222 && (maxv
- minv
) != -1)
3224 tree tmp
, new_val
, type
;
3227 for (i
= 0; i
< 2; i
++)
3230 wide_int maxv2
= maxv
;
3232 type
= TREE_TYPE (names
[i
]);
3233 if (!TYPE_UNSIGNED (type
))
3235 type
= build_nonstandard_integer_type (nprec
, 1);
3236 tmp
= build1 (NOP_EXPR
, type
, names
[i
]);
3240 tmp
= build2 (PLUS_EXPR
, type
, tmp
,
3241 wide_int_to_tree (type
, -minv
));
3242 maxv2
= maxv
- minv
;
3244 new_val
= wide_int_to_tree (type
, maxv2
);
3245 add_assert_info (asserts
, names
[i
], tmp
, LE_EXPR
, new_val
);
3252 /* OP is an operand of a truth value expression which is known to have
3253 a particular value. Register any asserts for OP and for any
3254 operands in OP's defining statement.
3256 If CODE is EQ_EXPR, then we want to register OP is zero (false),
3257 if CODE is NE_EXPR, then we want to register OP is nonzero (true). */
3260 register_edge_assert_for_1 (tree op
, enum tree_code code
,
3261 edge e
, vec
<assert_info
> &asserts
)
3265 enum tree_code rhs_code
;
3267 /* We only care about SSA_NAMEs. */
3268 if (TREE_CODE (op
) != SSA_NAME
)
3271 /* We know that OP will have a zero or nonzero value. */
3272 val
= build_int_cst (TREE_TYPE (op
), 0);
3273 add_assert_info (asserts
, op
, op
, code
, val
);
3275 /* Now look at how OP is set. If it's set from a comparison,
3276 a truth operation or some bit operations, then we may be able
3277 to register information about the operands of that assignment. */
3278 op_def
= SSA_NAME_DEF_STMT (op
);
3279 if (gimple_code (op_def
) != GIMPLE_ASSIGN
)
3282 rhs_code
= gimple_assign_rhs_code (op_def
);
3284 if (TREE_CODE_CLASS (rhs_code
) == tcc_comparison
)
3286 bool invert
= (code
== EQ_EXPR
? true : false);
3287 tree op0
= gimple_assign_rhs1 (op_def
);
3288 tree op1
= gimple_assign_rhs2 (op_def
);
3290 if (TREE_CODE (op0
) == SSA_NAME
)
3291 register_edge_assert_for_2 (op0
, e
, rhs_code
, op0
, op1
, invert
, asserts
);
3292 if (TREE_CODE (op1
) == SSA_NAME
)
3293 register_edge_assert_for_2 (op1
, e
, rhs_code
, op0
, op1
, invert
, asserts
);
3295 else if ((code
== NE_EXPR
3296 && gimple_assign_rhs_code (op_def
) == BIT_AND_EXPR
)
3298 && gimple_assign_rhs_code (op_def
) == BIT_IOR_EXPR
))
3300 /* Recurse on each operand. */
3301 tree op0
= gimple_assign_rhs1 (op_def
);
3302 tree op1
= gimple_assign_rhs2 (op_def
);
3303 if (TREE_CODE (op0
) == SSA_NAME
3304 && has_single_use (op0
))
3305 register_edge_assert_for_1 (op0
, code
, e
, asserts
);
3306 if (TREE_CODE (op1
) == SSA_NAME
3307 && has_single_use (op1
))
3308 register_edge_assert_for_1 (op1
, code
, e
, asserts
);
3310 else if (gimple_assign_rhs_code (op_def
) == BIT_NOT_EXPR
3311 && TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (op_def
))) == 1)
3313 /* Recurse, flipping CODE. */
3314 code
= invert_tree_comparison (code
, false);
3315 register_edge_assert_for_1 (gimple_assign_rhs1 (op_def
), code
, e
, asserts
);
3317 else if (gimple_assign_rhs_code (op_def
) == SSA_NAME
)
3319 /* Recurse through the copy. */
3320 register_edge_assert_for_1 (gimple_assign_rhs1 (op_def
), code
, e
, asserts
);
3322 else if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (op_def
)))
3324 /* Recurse through the type conversion, unless it is a narrowing
3325 conversion or conversion from non-integral type. */
3326 tree rhs
= gimple_assign_rhs1 (op_def
);
3327 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs
))
3328 && (TYPE_PRECISION (TREE_TYPE (rhs
))
3329 <= TYPE_PRECISION (TREE_TYPE (op
))))
3330 register_edge_assert_for_1 (rhs
, code
, e
, asserts
);
3334 /* Check if comparison
3335 NAME COND_OP INTEGER_CST
3337 (X & 11...100..0) COND_OP XX...X00...0
3338 Such comparison can yield assertions like
3341 in case of COND_OP being EQ_EXPR or
3344 in case of NE_EXPR. */
3347 is_masked_range_test (tree name
, tree valt
, enum tree_code cond_code
,
3348 tree
*new_name
, tree
*low
, enum tree_code
*low_code
,
3349 tree
*high
, enum tree_code
*high_code
)
3351 gimple
*def_stmt
= SSA_NAME_DEF_STMT (name
);
3353 if (!is_gimple_assign (def_stmt
)
3354 || gimple_assign_rhs_code (def_stmt
) != BIT_AND_EXPR
)
3357 tree t
= gimple_assign_rhs1 (def_stmt
);
3358 tree maskt
= gimple_assign_rhs2 (def_stmt
);
3359 if (TREE_CODE (t
) != SSA_NAME
|| TREE_CODE (maskt
) != INTEGER_CST
)
3362 wi::tree_to_wide_ref mask
= wi::to_wide (maskt
);
3363 wide_int inv_mask
= ~mask
;
3364 /* Must have been removed by now so don't bother optimizing. */
3365 if (mask
== 0 || inv_mask
== 0)
3368 /* Assume VALT is INTEGER_CST. */
3369 wi::tree_to_wide_ref val
= wi::to_wide (valt
);
3371 if ((inv_mask
& (inv_mask
+ 1)) != 0
3372 || (val
& mask
) != val
)
3375 bool is_range
= cond_code
== EQ_EXPR
;
3377 tree type
= TREE_TYPE (t
);
3378 wide_int min
= wi::min_value (type
),
3379 max
= wi::max_value (type
);
3383 *low_code
= val
== min
? ERROR_MARK
: GE_EXPR
;
3384 *high_code
= val
== max
? ERROR_MARK
: LE_EXPR
;
3388 /* We can still generate assertion if one of alternatives
3389 is known to always be false. */
3392 *low_code
= (enum tree_code
) 0;
3393 *high_code
= GT_EXPR
;
3395 else if ((val
| inv_mask
) == max
)
3397 *low_code
= LT_EXPR
;
3398 *high_code
= (enum tree_code
) 0;
3405 *low
= wide_int_to_tree (type
, val
);
3406 *high
= wide_int_to_tree (type
, val
| inv_mask
);
3411 /* Try to register an edge assertion for SSA name NAME on edge E for
3412 the condition COND contributing to the conditional jump pointed to by
3416 register_edge_assert_for (tree name
, edge e
,
3417 enum tree_code cond_code
, tree cond_op0
,
3418 tree cond_op1
, vec
<assert_info
> &asserts
)
3421 enum tree_code comp_code
;
3422 bool is_else_edge
= (e
->flags
& EDGE_FALSE_VALUE
) != 0;
3424 /* Do not attempt to infer anything in names that flow through
3426 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name
))
3429 if (!extract_code_and_val_from_cond_with_ops (name
, cond_code
,
3435 /* Register ASSERT_EXPRs for name. */
3436 register_edge_assert_for_2 (name
, e
, cond_code
, cond_op0
,
3437 cond_op1
, is_else_edge
, asserts
);
3440 /* If COND is effectively an equality test of an SSA_NAME against
3441 the value zero or one, then we may be able to assert values
3442 for SSA_NAMEs which flow into COND. */
3444 /* In the case of NAME == 1 or NAME != 0, for BIT_AND_EXPR defining
3445 statement of NAME we can assert both operands of the BIT_AND_EXPR
3446 have nonzero value. */
3447 if (((comp_code
== EQ_EXPR
&& integer_onep (val
))
3448 || (comp_code
== NE_EXPR
&& integer_zerop (val
))))
3450 gimple
*def_stmt
= SSA_NAME_DEF_STMT (name
);
3452 if (is_gimple_assign (def_stmt
)
3453 && gimple_assign_rhs_code (def_stmt
) == BIT_AND_EXPR
)
3455 tree op0
= gimple_assign_rhs1 (def_stmt
);
3456 tree op1
= gimple_assign_rhs2 (def_stmt
);
3457 register_edge_assert_for_1 (op0
, NE_EXPR
, e
, asserts
);
3458 register_edge_assert_for_1 (op1
, NE_EXPR
, e
, asserts
);
3462 /* In the case of NAME == 0 or NAME != 1, for BIT_IOR_EXPR defining
3463 statement of NAME we can assert both operands of the BIT_IOR_EXPR
3465 if (((comp_code
== EQ_EXPR
&& integer_zerop (val
))
3466 || (comp_code
== NE_EXPR
&& integer_onep (val
))))
3468 gimple
*def_stmt
= SSA_NAME_DEF_STMT (name
);
3470 /* For BIT_IOR_EXPR only if NAME == 0 both operands have
3471 necessarily zero value, or if type-precision is one. */
3472 if (is_gimple_assign (def_stmt
)
3473 && (gimple_assign_rhs_code (def_stmt
) == BIT_IOR_EXPR
3474 && (TYPE_PRECISION (TREE_TYPE (name
)) == 1
3475 || comp_code
== EQ_EXPR
)))
3477 tree op0
= gimple_assign_rhs1 (def_stmt
);
3478 tree op1
= gimple_assign_rhs2 (def_stmt
);
3479 register_edge_assert_for_1 (op0
, EQ_EXPR
, e
, asserts
);
3480 register_edge_assert_for_1 (op1
, EQ_EXPR
, e
, asserts
);
3484 /* Sometimes we can infer ranges from (NAME & MASK) == VALUE. */
3485 if ((comp_code
== EQ_EXPR
|| comp_code
== NE_EXPR
)
3486 && TREE_CODE (val
) == INTEGER_CST
)
3488 enum tree_code low_code
, high_code
;
3490 if (is_masked_range_test (name
, val
, comp_code
, &name
, &low
,
3491 &low_code
, &high
, &high_code
))
3493 if (low_code
!= ERROR_MARK
)
3494 register_edge_assert_for_2 (name
, e
, low_code
, name
,
3495 low
, /*invert*/false, asserts
);
3496 if (high_code
!= ERROR_MARK
)
3497 register_edge_assert_for_2 (name
, e
, high_code
, name
,
3498 high
, /*invert*/false, asserts
);
3503 /* Finish found ASSERTS for E and register them at GSI. */
3506 finish_register_edge_assert_for (edge e
, gimple_stmt_iterator gsi
,
3507 vec
<assert_info
> &asserts
)
3509 for (unsigned i
= 0; i
< asserts
.length (); ++i
)
3510 /* Only register an ASSERT_EXPR if NAME was found in the sub-graph
3511 reachable from E. */
3512 if (live_on_edge (e
, asserts
[i
].name
))
3513 register_new_assert_for (asserts
[i
].name
, asserts
[i
].expr
,
3514 asserts
[i
].comp_code
, asserts
[i
].val
,
3520 /* Determine whether the outgoing edges of BB should receive an
3521 ASSERT_EXPR for each of the operands of BB's LAST statement.
3522 The last statement of BB must be a COND_EXPR.
3524 If any of the sub-graphs rooted at BB have an interesting use of
3525 the predicate operands, an assert location node is added to the
3526 list of assertions for the corresponding operands. */
3529 find_conditional_asserts (basic_block bb
, gcond
*last
)
3531 gimple_stmt_iterator bsi
;
3537 bsi
= gsi_for_stmt (last
);
3539 /* Look for uses of the operands in each of the sub-graphs
3540 rooted at BB. We need to check each of the outgoing edges
3541 separately, so that we know what kind of ASSERT_EXPR to
3543 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
3548 /* Register the necessary assertions for each operand in the
3549 conditional predicate. */
3550 auto_vec
<assert_info
, 8> asserts
;
3551 FOR_EACH_SSA_TREE_OPERAND (op
, last
, iter
, SSA_OP_USE
)
3552 register_edge_assert_for (op
, e
,
3553 gimple_cond_code (last
),
3554 gimple_cond_lhs (last
),
3555 gimple_cond_rhs (last
), asserts
);
3556 finish_register_edge_assert_for (e
, bsi
, asserts
);
3566 /* Compare two case labels sorting first by the destination bb index
3567 and then by the case value. */
3570 compare_case_labels (const void *p1
, const void *p2
)
3572 const struct case_info
*ci1
= (const struct case_info
*) p1
;
3573 const struct case_info
*ci2
= (const struct case_info
*) p2
;
3574 int idx1
= ci1
->bb
->index
;
3575 int idx2
= ci2
->bb
->index
;
3579 else if (idx1
== idx2
)
3581 /* Make sure the default label is first in a group. */
3582 if (!CASE_LOW (ci1
->expr
))
3584 else if (!CASE_LOW (ci2
->expr
))
3587 return tree_int_cst_compare (CASE_LOW (ci1
->expr
),
3588 CASE_LOW (ci2
->expr
));
3594 /* Determine whether the outgoing edges of BB should receive an
3595 ASSERT_EXPR for each of the operands of BB's LAST statement.
3596 The last statement of BB must be a SWITCH_EXPR.
3598 If any of the sub-graphs rooted at BB have an interesting use of
3599 the predicate operands, an assert location node is added to the
3600 list of assertions for the corresponding operands. */
3603 find_switch_asserts (basic_block bb
, gswitch
*last
)
3605 gimple_stmt_iterator bsi
;
3608 struct case_info
*ci
;
3609 size_t n
= gimple_switch_num_labels (last
);
3610 #if GCC_VERSION >= 4000
3613 /* Work around GCC 3.4 bug (PR 37086). */
3614 volatile unsigned int idx
;
3617 bsi
= gsi_for_stmt (last
);
3618 op
= gimple_switch_index (last
);
3619 if (TREE_CODE (op
) != SSA_NAME
)
3622 /* Build a vector of case labels sorted by destination label. */
3623 ci
= XNEWVEC (struct case_info
, n
);
3624 for (idx
= 0; idx
< n
; ++idx
)
3626 ci
[idx
].expr
= gimple_switch_label (last
, idx
);
3627 ci
[idx
].bb
= label_to_block (cfun
, CASE_LABEL (ci
[idx
].expr
));
3629 edge default_edge
= find_edge (bb
, ci
[0].bb
);
3630 qsort (ci
, n
, sizeof (struct case_info
), compare_case_labels
);
3632 for (idx
= 0; idx
< n
; ++idx
)
3635 tree cl
= ci
[idx
].expr
;
3636 basic_block cbb
= ci
[idx
].bb
;
3638 min
= CASE_LOW (cl
);
3639 max
= CASE_HIGH (cl
);
3641 /* If there are multiple case labels with the same destination
3642 we need to combine them to a single value range for the edge. */
3643 if (idx
+ 1 < n
&& cbb
== ci
[idx
+ 1].bb
)
3645 /* Skip labels until the last of the group. */
3648 } while (idx
< n
&& cbb
== ci
[idx
].bb
);
3651 /* Pick up the maximum of the case label range. */
3652 if (CASE_HIGH (ci
[idx
].expr
))
3653 max
= CASE_HIGH (ci
[idx
].expr
);
3655 max
= CASE_LOW (ci
[idx
].expr
);
3658 /* Can't extract a useful assertion out of a range that includes the
3660 if (min
== NULL_TREE
)
3663 /* Find the edge to register the assert expr on. */
3664 e
= find_edge (bb
, cbb
);
3666 /* Register the necessary assertions for the operand in the
3668 auto_vec
<assert_info
, 8> asserts
;
3669 register_edge_assert_for (op
, e
,
3670 max
? GE_EXPR
: EQ_EXPR
,
3671 op
, fold_convert (TREE_TYPE (op
), min
),
3674 register_edge_assert_for (op
, e
, LE_EXPR
, op
,
3675 fold_convert (TREE_TYPE (op
), max
),
3677 finish_register_edge_assert_for (e
, bsi
, asserts
);
3682 if (!live_on_edge (default_edge
, op
))
3685 /* Now register along the default label assertions that correspond to the
3686 anti-range of each label. */
3687 int insertion_limit
= PARAM_VALUE (PARAM_MAX_VRP_SWITCH_ASSERTIONS
);
3688 if (insertion_limit
== 0)
3691 /* We can't do this if the default case shares a label with another case. */
3692 tree default_cl
= gimple_switch_default_label (last
);
3693 for (idx
= 1; idx
< n
; idx
++)
3696 tree cl
= gimple_switch_label (last
, idx
);
3697 if (CASE_LABEL (cl
) == CASE_LABEL (default_cl
))
3700 min
= CASE_LOW (cl
);
3701 max
= CASE_HIGH (cl
);
3703 /* Combine contiguous case ranges to reduce the number of assertions
3705 for (idx
= idx
+ 1; idx
< n
; idx
++)
3707 tree next_min
, next_max
;
3708 tree next_cl
= gimple_switch_label (last
, idx
);
3709 if (CASE_LABEL (next_cl
) == CASE_LABEL (default_cl
))
3712 next_min
= CASE_LOW (next_cl
);
3713 next_max
= CASE_HIGH (next_cl
);
3715 wide_int difference
= (wi::to_wide (next_min
)
3716 - wi::to_wide (max
? max
: min
));
3717 if (wi::eq_p (difference
, 1))
3718 max
= next_max
? next_max
: next_min
;
3724 if (max
== NULL_TREE
)
3726 /* Register the assertion OP != MIN. */
3727 auto_vec
<assert_info
, 8> asserts
;
3728 min
= fold_convert (TREE_TYPE (op
), min
);
3729 register_edge_assert_for (op
, default_edge
, NE_EXPR
, op
, min
,
3731 finish_register_edge_assert_for (default_edge
, bsi
, asserts
);
3735 /* Register the assertion (unsigned)OP - MIN > (MAX - MIN),
3736 which will give OP the anti-range ~[MIN,MAX]. */
3737 tree uop
= fold_convert (unsigned_type_for (TREE_TYPE (op
)), op
);
3738 min
= fold_convert (TREE_TYPE (uop
), min
);
3739 max
= fold_convert (TREE_TYPE (uop
), max
);
3741 tree lhs
= fold_build2 (MINUS_EXPR
, TREE_TYPE (uop
), uop
, min
);
3742 tree rhs
= int_const_binop (MINUS_EXPR
, max
, min
);
3743 register_new_assert_for (op
, lhs
, GT_EXPR
, rhs
,
3744 NULL
, default_edge
, bsi
);
3747 if (--insertion_limit
== 0)
3753 /* Traverse all the statements in block BB looking for statements that
3754 may generate useful assertions for the SSA names in their operand.
3755 If a statement produces a useful assertion A for name N_i, then the
3756 list of assertions already generated for N_i is scanned to
3757 determine if A is actually needed.
3759 If N_i already had the assertion A at a location dominating the
3760 current location, then nothing needs to be done. Otherwise, the
3761 new location for A is recorded instead.
3763 1- For every statement S in BB, all the variables used by S are
3764 added to bitmap FOUND_IN_SUBGRAPH.
3766 2- If statement S uses an operand N in a way that exposes a known
3767 value range for N, then if N was not already generated by an
3768 ASSERT_EXPR, create a new assert location for N. For instance,
3769 if N is a pointer and the statement dereferences it, we can
3770 assume that N is not NULL.
3772 3- COND_EXPRs are a special case of #2. We can derive range
3773 information from the predicate but need to insert different
3774 ASSERT_EXPRs for each of the sub-graphs rooted at the
3775 conditional block. If the last statement of BB is a conditional
3776 expression of the form 'X op Y', then
3778 a) Remove X and Y from the set FOUND_IN_SUBGRAPH.
3780 b) If the conditional is the only entry point to the sub-graph
3781 corresponding to the THEN_CLAUSE, recurse into it. On
3782 return, if X and/or Y are marked in FOUND_IN_SUBGRAPH, then
3783 an ASSERT_EXPR is added for the corresponding variable.
3785 c) Repeat step (b) on the ELSE_CLAUSE.
3787 d) Mark X and Y in FOUND_IN_SUBGRAPH.
3796 In this case, an assertion on the THEN clause is useful to
3797 determine that 'a' is always 9 on that edge. However, an assertion
3798 on the ELSE clause would be unnecessary.
3800 4- If BB does not end in a conditional expression, then we recurse
3801 into BB's dominator children.
3803 At the end of the recursive traversal, every SSA name will have a
3804 list of locations where ASSERT_EXPRs should be added. When a new
3805 location for name N is found, it is registered by calling
3806 register_new_assert_for. That function keeps track of all the
3807 registered assertions to prevent adding unnecessary assertions.
3808 For instance, if a pointer P_4 is dereferenced more than once in a
3809 dominator tree, only the location dominating all the dereference of
3810 P_4 will receive an ASSERT_EXPR. */
3813 find_assert_locations_1 (basic_block bb
, sbitmap live
)
3817 last
= last_stmt (bb
);
3819 /* If BB's last statement is a conditional statement involving integer
3820 operands, determine if we need to add ASSERT_EXPRs. */
3822 && gimple_code (last
) == GIMPLE_COND
3823 && !fp_predicate (last
)
3824 && !ZERO_SSA_OPERANDS (last
, SSA_OP_USE
))
3825 find_conditional_asserts (bb
, as_a
<gcond
*> (last
));
3827 /* If BB's last statement is a switch statement involving integer
3828 operands, determine if we need to add ASSERT_EXPRs. */
3830 && gimple_code (last
) == GIMPLE_SWITCH
3831 && !ZERO_SSA_OPERANDS (last
, SSA_OP_USE
))
3832 find_switch_asserts (bb
, as_a
<gswitch
*> (last
));
3834 /* Traverse all the statements in BB marking used names and looking
3835 for statements that may infer assertions for their used operands. */
3836 for (gimple_stmt_iterator si
= gsi_last_bb (bb
); !gsi_end_p (si
);
3843 stmt
= gsi_stmt (si
);
3845 if (is_gimple_debug (stmt
))
3848 /* See if we can derive an assertion for any of STMT's operands. */
3849 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, i
, SSA_OP_USE
)
3852 enum tree_code comp_code
;
3854 /* If op is not live beyond this stmt, do not bother to insert
3856 if (!bitmap_bit_p (live
, SSA_NAME_VERSION (op
)))
3859 /* If OP is used in such a way that we can infer a value
3860 range for it, and we don't find a previous assertion for
3861 it, create a new assertion location node for OP. */
3862 if (infer_value_range (stmt
, op
, &comp_code
, &value
))
3864 /* If we are able to infer a nonzero value range for OP,
3865 then walk backwards through the use-def chain to see if OP
3866 was set via a typecast.
3868 If so, then we can also infer a nonzero value range
3869 for the operand of the NOP_EXPR. */
3870 if (comp_code
== NE_EXPR
&& integer_zerop (value
))
3873 gimple
*def_stmt
= SSA_NAME_DEF_STMT (t
);
3875 while (is_gimple_assign (def_stmt
)
3876 && CONVERT_EXPR_CODE_P
3877 (gimple_assign_rhs_code (def_stmt
))
3879 (gimple_assign_rhs1 (def_stmt
)) == SSA_NAME
3881 (TREE_TYPE (gimple_assign_rhs1 (def_stmt
))))
3883 t
= gimple_assign_rhs1 (def_stmt
);
3884 def_stmt
= SSA_NAME_DEF_STMT (t
);
3886 /* Note we want to register the assert for the
3887 operand of the NOP_EXPR after SI, not after the
3889 if (bitmap_bit_p (live
, SSA_NAME_VERSION (t
)))
3890 register_new_assert_for (t
, t
, comp_code
, value
,
3895 register_new_assert_for (op
, op
, comp_code
, value
, bb
, NULL
, si
);
3900 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, i
, SSA_OP_USE
)
3901 bitmap_set_bit (live
, SSA_NAME_VERSION (op
));
3902 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, i
, SSA_OP_DEF
)
3903 bitmap_clear_bit (live
, SSA_NAME_VERSION (op
));
3906 /* Traverse all PHI nodes in BB, updating live. */
3907 for (gphi_iterator si
= gsi_start_phis (bb
); !gsi_end_p (si
);
3910 use_operand_p arg_p
;
3912 gphi
*phi
= si
.phi ();
3913 tree res
= gimple_phi_result (phi
);
3915 if (virtual_operand_p (res
))
3918 FOR_EACH_PHI_ARG (arg_p
, phi
, i
, SSA_OP_USE
)
3920 tree arg
= USE_FROM_PTR (arg_p
);
3921 if (TREE_CODE (arg
) == SSA_NAME
)
3922 bitmap_set_bit (live
, SSA_NAME_VERSION (arg
));
3925 bitmap_clear_bit (live
, SSA_NAME_VERSION (res
));
3929 /* Do an RPO walk over the function computing SSA name liveness
3930 on-the-fly and deciding on assert expressions to insert. */
3933 find_assert_locations (void)
3935 int *rpo
= XNEWVEC (int, last_basic_block_for_fn (cfun
));
3936 int *bb_rpo
= XNEWVEC (int, last_basic_block_for_fn (cfun
));
3937 int *last_rpo
= XCNEWVEC (int, last_basic_block_for_fn (cfun
));
3940 live
= XCNEWVEC (sbitmap
, last_basic_block_for_fn (cfun
));
3941 rpo_cnt
= pre_and_rev_post_order_compute (NULL
, rpo
, false);
3942 for (i
= 0; i
< rpo_cnt
; ++i
)
3945 /* Pre-seed loop latch liveness from loop header PHI nodes. Due to
3946 the order we compute liveness and insert asserts we otherwise
3947 fail to insert asserts into the loop latch. */
3949 FOR_EACH_LOOP (loop
, 0)
3951 i
= loop
->latch
->index
;
3952 unsigned int j
= single_succ_edge (loop
->latch
)->dest_idx
;
3953 for (gphi_iterator gsi
= gsi_start_phis (loop
->header
);
3954 !gsi_end_p (gsi
); gsi_next (&gsi
))
3956 gphi
*phi
= gsi
.phi ();
3957 if (virtual_operand_p (gimple_phi_result (phi
)))
3959 tree arg
= gimple_phi_arg_def (phi
, j
);
3960 if (TREE_CODE (arg
) == SSA_NAME
)
3962 if (live
[i
] == NULL
)
3964 live
[i
] = sbitmap_alloc (num_ssa_names
);
3965 bitmap_clear (live
[i
]);
3967 bitmap_set_bit (live
[i
], SSA_NAME_VERSION (arg
));
3972 for (i
= rpo_cnt
- 1; i
>= 0; --i
)
3974 basic_block bb
= BASIC_BLOCK_FOR_FN (cfun
, rpo
[i
]);
3980 live
[rpo
[i
]] = sbitmap_alloc (num_ssa_names
);
3981 bitmap_clear (live
[rpo
[i
]]);
3984 /* Process BB and update the live information with uses in
3986 find_assert_locations_1 (bb
, live
[rpo
[i
]]);
3988 /* Merge liveness into the predecessor blocks and free it. */
3989 if (!bitmap_empty_p (live
[rpo
[i
]]))
3992 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
3994 int pred
= e
->src
->index
;
3995 if ((e
->flags
& EDGE_DFS_BACK
) || pred
== ENTRY_BLOCK
)
4000 live
[pred
] = sbitmap_alloc (num_ssa_names
);
4001 bitmap_clear (live
[pred
]);
4003 bitmap_ior (live
[pred
], live
[pred
], live
[rpo
[i
]]);
4005 if (bb_rpo
[pred
] < pred_rpo
)
4006 pred_rpo
= bb_rpo
[pred
];
4009 /* Record the RPO number of the last visited block that needs
4010 live information from this block. */
4011 last_rpo
[rpo
[i
]] = pred_rpo
;
4015 sbitmap_free (live
[rpo
[i
]]);
4016 live
[rpo
[i
]] = NULL
;
4019 /* We can free all successors live bitmaps if all their
4020 predecessors have been visited already. */
4021 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
4022 if (last_rpo
[e
->dest
->index
] == i
4023 && live
[e
->dest
->index
])
4025 sbitmap_free (live
[e
->dest
->index
]);
4026 live
[e
->dest
->index
] = NULL
;
4031 XDELETEVEC (bb_rpo
);
4032 XDELETEVEC (last_rpo
);
4033 for (i
= 0; i
< last_basic_block_for_fn (cfun
); ++i
)
4035 sbitmap_free (live
[i
]);
4039 /* Create an ASSERT_EXPR for NAME and insert it in the location
4040 indicated by LOC. Return true if we made any edge insertions. */
4043 process_assert_insertions_for (tree name
, assert_locus
*loc
)
4045 /* Build the comparison expression NAME_i COMP_CODE VAL. */
4048 gimple
*assert_stmt
;
4052 /* If we have X <=> X do not insert an assert expr for that. */
4053 if (loc
->expr
== loc
->val
)
4056 cond
= build2 (loc
->comp_code
, boolean_type_node
, loc
->expr
, loc
->val
);
4057 assert_stmt
= build_assert_expr_for (cond
, name
);
4060 /* We have been asked to insert the assertion on an edge. This
4061 is used only by COND_EXPR and SWITCH_EXPR assertions. */
4062 gcc_checking_assert (gimple_code (gsi_stmt (loc
->si
)) == GIMPLE_COND
4063 || (gimple_code (gsi_stmt (loc
->si
))
4066 gsi_insert_on_edge (loc
->e
, assert_stmt
);
4070 /* If the stmt iterator points at the end then this is an insertion
4071 at the beginning of a block. */
4072 if (gsi_end_p (loc
->si
))
4074 gimple_stmt_iterator si
= gsi_after_labels (loc
->bb
);
4075 gsi_insert_before (&si
, assert_stmt
, GSI_SAME_STMT
);
4079 /* Otherwise, we can insert right after LOC->SI iff the
4080 statement must not be the last statement in the block. */
4081 stmt
= gsi_stmt (loc
->si
);
4082 if (!stmt_ends_bb_p (stmt
))
4084 gsi_insert_after (&loc
->si
, assert_stmt
, GSI_SAME_STMT
);
4088 /* If STMT must be the last statement in BB, we can only insert new
4089 assertions on the non-abnormal edge out of BB. Note that since
4090 STMT is not control flow, there may only be one non-abnormal/eh edge
4092 FOR_EACH_EDGE (e
, ei
, loc
->bb
->succs
)
4093 if (!(e
->flags
& (EDGE_ABNORMAL
|EDGE_EH
)))
4095 gsi_insert_on_edge (e
, assert_stmt
);
4102 /* Qsort helper for sorting assert locations. If stable is true, don't
4103 use iterative_hash_expr because it can be unstable for -fcompare-debug,
4104 on the other side some pointers might be NULL. */
4106 template <bool stable
>
4108 compare_assert_loc (const void *pa
, const void *pb
)
4110 assert_locus
* const a
= *(assert_locus
* const *)pa
;
4111 assert_locus
* const b
= *(assert_locus
* const *)pb
;
4113 /* If stable, some asserts might be optimized away already, sort
4123 if (a
->e
== NULL
&& b
->e
!= NULL
)
4125 else if (a
->e
!= NULL
&& b
->e
== NULL
)
4128 /* After the above checks, we know that (a->e == NULL) == (b->e == NULL),
4129 no need to test both a->e and b->e. */
4131 /* Sort after destination index. */
4134 else if (a
->e
->dest
->index
> b
->e
->dest
->index
)
4136 else if (a
->e
->dest
->index
< b
->e
->dest
->index
)
4139 /* Sort after comp_code. */
4140 if (a
->comp_code
> b
->comp_code
)
4142 else if (a
->comp_code
< b
->comp_code
)
4147 /* E.g. if a->val is ADDR_EXPR of a VAR_DECL, iterative_hash_expr
4148 uses DECL_UID of the VAR_DECL, so sorting might differ between
4149 -g and -g0. When doing the removal of redundant assert exprs
4150 and commonization to successors, this does not matter, but for
4151 the final sort needs to be stable. */
4159 ha
= iterative_hash_expr (a
->expr
, iterative_hash_expr (a
->val
, 0));
4160 hb
= iterative_hash_expr (b
->expr
, iterative_hash_expr (b
->val
, 0));
4163 /* Break the tie using hashing and source/bb index. */
4165 return (a
->e
!= NULL
4166 ? a
->e
->src
->index
- b
->e
->src
->index
4167 : a
->bb
->index
- b
->bb
->index
);
4168 return ha
> hb
? 1 : -1;
4171 /* Process all the insertions registered for every name N_i registered
4172 in NEED_ASSERT_FOR. The list of assertions to be inserted are
4173 found in ASSERTS_FOR[i]. */
4176 process_assert_insertions (void)
4180 bool update_edges_p
= false;
4181 int num_asserts
= 0;
4183 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4184 dump_all_asserts (dump_file
);
4186 EXECUTE_IF_SET_IN_BITMAP (need_assert_for
, 0, i
, bi
)
4188 assert_locus
*loc
= asserts_for
[i
];
4191 auto_vec
<assert_locus
*, 16> asserts
;
4192 for (; loc
; loc
= loc
->next
)
4193 asserts
.safe_push (loc
);
4194 asserts
.qsort (compare_assert_loc
<false>);
4196 /* Push down common asserts to successors and remove redundant ones. */
4198 assert_locus
*common
= NULL
;
4199 unsigned commonj
= 0;
4200 for (unsigned j
= 0; j
< asserts
.length (); ++j
)
4206 || loc
->e
->dest
!= common
->e
->dest
4207 || loc
->comp_code
!= common
->comp_code
4208 || ! operand_equal_p (loc
->val
, common
->val
, 0)
4209 || ! operand_equal_p (loc
->expr
, common
->expr
, 0))
4215 else if (loc
->e
== asserts
[j
-1]->e
)
4217 /* Remove duplicate asserts. */
4218 if (commonj
== j
- 1)
4223 free (asserts
[j
-1]);
4224 asserts
[j
-1] = NULL
;
4229 if (EDGE_COUNT (common
->e
->dest
->preds
) == ecnt
)
4231 /* We have the same assertion on all incoming edges of a BB.
4232 Insert it at the beginning of that block. */
4233 loc
->bb
= loc
->e
->dest
;
4235 loc
->si
= gsi_none ();
4237 /* Clear asserts commoned. */
4238 for (; commonj
!= j
; ++commonj
)
4239 if (asserts
[commonj
])
4241 free (asserts
[commonj
]);
4242 asserts
[commonj
] = NULL
;
4248 /* The asserts vector sorting above might be unstable for
4249 -fcompare-debug, sort again to ensure a stable sort. */
4250 asserts
.qsort (compare_assert_loc
<true>);
4251 for (unsigned j
= 0; j
< asserts
.length (); ++j
)
4256 update_edges_p
|= process_assert_insertions_for (ssa_name (i
), loc
);
4263 gsi_commit_edge_inserts ();
4265 statistics_counter_event (cfun
, "Number of ASSERT_EXPR expressions inserted",
4270 /* Traverse the flowgraph looking for conditional jumps to insert range
4271 expressions. These range expressions are meant to provide information
4272 to optimizations that need to reason in terms of value ranges. They
4273 will not be expanded into RTL. For instance, given:
4282 this pass will transform the code into:
4288 x = ASSERT_EXPR <x, x < y>
4293 y = ASSERT_EXPR <y, x >= y>
4297 The idea is that once copy and constant propagation have run, other
4298 optimizations will be able to determine what ranges of values can 'x'
4299 take in different paths of the code, simply by checking the reaching
4300 definition of 'x'. */
4303 insert_range_assertions (void)
4305 need_assert_for
= BITMAP_ALLOC (NULL
);
4306 asserts_for
= XCNEWVEC (assert_locus
*, num_ssa_names
);
4308 calculate_dominance_info (CDI_DOMINATORS
);
4310 find_assert_locations ();
4311 if (!bitmap_empty_p (need_assert_for
))
4313 process_assert_insertions ();
4314 update_ssa (TODO_update_ssa_no_phi
);
4317 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4319 fprintf (dump_file
, "\nSSA form after inserting ASSERT_EXPRs\n");
4320 dump_function_to_file (current_function_decl
, dump_file
, dump_flags
);
4324 BITMAP_FREE (need_assert_for
);
4327 class vrp_prop
: public ssa_propagation_engine
4330 enum ssa_prop_result
visit_stmt (gimple
*, edge
*, tree
*) FINAL OVERRIDE
;
4331 enum ssa_prop_result
visit_phi (gphi
*) FINAL OVERRIDE
;
4333 void vrp_initialize (void);
4334 void vrp_finalize (bool);
4335 void check_all_array_refs (void);
4336 void check_array_ref (location_t
, tree
, bool);
4337 void check_mem_ref (location_t
, tree
, bool);
4338 void search_for_addr_array (tree
, location_t
);
4340 class vr_values vr_values
;
4341 /* Temporary delegator to minimize code churn. */
4342 value_range
*get_value_range (const_tree op
)
4343 { return vr_values
.get_value_range (op
); }
4344 void set_defs_to_varying (gimple
*stmt
)
4345 { return vr_values
.set_defs_to_varying (stmt
); }
4346 void extract_range_from_stmt (gimple
*stmt
, edge
*taken_edge_p
,
4347 tree
*output_p
, value_range
*vr
)
4348 { vr_values
.extract_range_from_stmt (stmt
, taken_edge_p
, output_p
, vr
); }
4349 bool update_value_range (const_tree op
, value_range
*vr
)
4350 { return vr_values
.update_value_range (op
, vr
); }
4351 void extract_range_basic (value_range
*vr
, gimple
*stmt
)
4352 { vr_values
.extract_range_basic (vr
, stmt
); }
4353 void extract_range_from_phi_node (gphi
*phi
, value_range
*vr
)
4354 { vr_values
.extract_range_from_phi_node (phi
, vr
); }
4356 /* Checks one ARRAY_REF in REF, located at LOCUS. Ignores flexible arrays
4357 and "struct" hacks. If VRP can determine that the
4358 array subscript is a constant, check if it is outside valid
4359 range. If the array subscript is a RANGE, warn if it is
4360 non-overlapping with valid range.
4361 IGNORE_OFF_BY_ONE is true if the ARRAY_REF is inside a ADDR_EXPR. */
4364 vrp_prop::check_array_ref (location_t location
, tree ref
,
4365 bool ignore_off_by_one
)
4367 const value_range
*vr
= NULL
;
4368 tree low_sub
, up_sub
;
4369 tree low_bound
, up_bound
, up_bound_p1
;
4371 if (TREE_NO_WARNING (ref
))
4374 low_sub
= up_sub
= TREE_OPERAND (ref
, 1);
4375 up_bound
= array_ref_up_bound (ref
);
4378 || TREE_CODE (up_bound
) != INTEGER_CST
4379 || (warn_array_bounds
< 2
4380 && array_at_struct_end_p (ref
)))
4382 /* Accesses to trailing arrays via pointers may access storage
4383 beyond the types array bounds. For such arrays, or for flexible
4384 array members, as well as for other arrays of an unknown size,
4385 replace the upper bound with a more permissive one that assumes
4386 the size of the largest object is PTRDIFF_MAX. */
4387 tree eltsize
= array_ref_element_size (ref
);
4389 if (TREE_CODE (eltsize
) != INTEGER_CST
4390 || integer_zerop (eltsize
))
4392 up_bound
= NULL_TREE
;
4393 up_bound_p1
= NULL_TREE
;
4397 tree maxbound
= TYPE_MAX_VALUE (ptrdiff_type_node
);
4398 tree arg
= TREE_OPERAND (ref
, 0);
4401 if (get_addr_base_and_unit_offset (arg
, &off
) && known_gt (off
, 0))
4402 maxbound
= wide_int_to_tree (sizetype
,
4403 wi::sub (wi::to_wide (maxbound
),
4406 maxbound
= fold_convert (sizetype
, maxbound
);
4408 up_bound_p1
= int_const_binop (TRUNC_DIV_EXPR
, maxbound
, eltsize
);
4410 up_bound
= int_const_binop (MINUS_EXPR
, up_bound_p1
,
4411 build_int_cst (ptrdiff_type_node
, 1));
4415 up_bound_p1
= int_const_binop (PLUS_EXPR
, up_bound
,
4416 build_int_cst (TREE_TYPE (up_bound
), 1));
4418 low_bound
= array_ref_low_bound (ref
);
4420 tree artype
= TREE_TYPE (TREE_OPERAND (ref
, 0));
4422 bool warned
= false;
4425 if (up_bound
&& tree_int_cst_equal (low_bound
, up_bound_p1
))
4426 warned
= warning_at (location
, OPT_Warray_bounds
,
4427 "array subscript %E is above array bounds of %qT",
4430 if (TREE_CODE (low_sub
) == SSA_NAME
)
4432 vr
= get_value_range (low_sub
);
4433 if (!vr
->undefined_p () && !vr
->varying_p ())
4435 low_sub
= vr
->kind () == VR_RANGE
? vr
->max () : vr
->min ();
4436 up_sub
= vr
->kind () == VR_RANGE
? vr
->min () : vr
->max ();
4440 if (vr
&& vr
->kind () == VR_ANTI_RANGE
)
4443 && TREE_CODE (up_sub
) == INTEGER_CST
4444 && (ignore_off_by_one
4445 ? tree_int_cst_lt (up_bound
, up_sub
)
4446 : tree_int_cst_le (up_bound
, up_sub
))
4447 && TREE_CODE (low_sub
) == INTEGER_CST
4448 && tree_int_cst_le (low_sub
, low_bound
))
4449 warned
= warning_at (location
, OPT_Warray_bounds
,
4450 "array subscript [%E, %E] is outside "
4451 "array bounds of %qT",
4452 low_sub
, up_sub
, artype
);
4455 && TREE_CODE (up_sub
) == INTEGER_CST
4456 && (ignore_off_by_one
4457 ? !tree_int_cst_le (up_sub
, up_bound_p1
)
4458 : !tree_int_cst_le (up_sub
, up_bound
)))
4460 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4462 fprintf (dump_file
, "Array bound warning for ");
4463 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, ref
);
4464 fprintf (dump_file
, "\n");
4466 warned
= warning_at (location
, OPT_Warray_bounds
,
4467 "array subscript %E is above array bounds of %qT",
4470 else if (TREE_CODE (low_sub
) == INTEGER_CST
4471 && tree_int_cst_lt (low_sub
, low_bound
))
4473 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4475 fprintf (dump_file
, "Array bound warning for ");
4476 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, ref
);
4477 fprintf (dump_file
, "\n");
4479 warned
= warning_at (location
, OPT_Warray_bounds
,
4480 "array subscript %E is below array bounds of %qT",
4486 ref
= TREE_OPERAND (ref
, 0);
4489 inform (DECL_SOURCE_LOCATION (ref
), "while referencing %qD", ref
);
4491 TREE_NO_WARNING (ref
) = 1;
4495 /* Checks one MEM_REF in REF, located at LOCATION, for out-of-bounds
4496 references to string constants. If VRP can determine that the array
4497 subscript is a constant, check if it is outside valid range.
4498 If the array subscript is a RANGE, warn if it is non-overlapping
4500 IGNORE_OFF_BY_ONE is true if the MEM_REF is inside an ADDR_EXPR
4501 (used to allow one-past-the-end indices for code that takes
4502 the address of the just-past-the-end element of an array). */
4505 vrp_prop::check_mem_ref (location_t location
, tree ref
,
4506 bool ignore_off_by_one
)
4508 if (TREE_NO_WARNING (ref
))
4511 tree arg
= TREE_OPERAND (ref
, 0);
4512 /* The constant and variable offset of the reference. */
4513 tree cstoff
= TREE_OPERAND (ref
, 1);
4514 tree varoff
= NULL_TREE
;
4516 const offset_int maxobjsize
= tree_to_shwi (max_object_size ());
4518 /* The array or string constant bounds in bytes. Initially set
4519 to [-MAXOBJSIZE - 1, MAXOBJSIZE] until a tighter bound is
4521 offset_int arrbounds
[2] = { -maxobjsize
- 1, maxobjsize
};
4523 /* The minimum and maximum intermediate offset. For a reference
4524 to be valid, not only does the final offset/subscript must be
4525 in bounds but all intermediate offsets should be as well.
4526 GCC may be able to deal gracefully with such out-of-bounds
4527 offsets so the checking is only enbaled at -Warray-bounds=2
4528 where it may help detect bugs in uses of the intermediate
4529 offsets that could otherwise not be detectable. */
4530 offset_int ioff
= wi::to_offset (fold_convert (ptrdiff_type_node
, cstoff
));
4531 offset_int extrema
[2] = { 0, wi::abs (ioff
) };
4533 /* The range of the byte offset into the reference. */
4534 offset_int offrange
[2] = { 0, 0 };
4536 const value_range
*vr
= NULL
;
4538 /* Determine the offsets and increment OFFRANGE for the bounds of each.
4539 The loop computes the the range of the final offset for expressions
4540 such as (A + i0 + ... + iN)[CSTOFF] where i0 through iN are SSA_NAMEs
4542 while (TREE_CODE (arg
) == SSA_NAME
)
4544 gimple
*def
= SSA_NAME_DEF_STMT (arg
);
4545 if (!is_gimple_assign (def
))
4548 tree_code code
= gimple_assign_rhs_code (def
);
4549 if (code
== POINTER_PLUS_EXPR
)
4551 arg
= gimple_assign_rhs1 (def
);
4552 varoff
= gimple_assign_rhs2 (def
);
4554 else if (code
== ASSERT_EXPR
)
4556 arg
= TREE_OPERAND (gimple_assign_rhs1 (def
), 0);
4562 /* VAROFF should always be a SSA_NAME here (and not even
4563 INTEGER_CST) but there's no point in taking chances. */
4564 if (TREE_CODE (varoff
) != SSA_NAME
)
4567 vr
= get_value_range (varoff
);
4568 if (!vr
|| vr
->undefined_p () || vr
->varying_p ())
4571 if (!vr
->constant_p ())
4574 if (vr
->kind () == VR_RANGE
)
4576 if (tree_int_cst_lt (vr
->min (), vr
->max ()))
4579 = wi::to_offset (fold_convert (ptrdiff_type_node
, vr
->min ()));
4581 = wi::to_offset (fold_convert (ptrdiff_type_node
, vr
->max ()));
4595 /* Conservatively add [-MAXOBJSIZE -1, MAXOBJSIZE]
4597 offrange
[0] += arrbounds
[0];
4598 offrange
[1] += arrbounds
[1];
4603 /* For an anti-range, analogously to the above, conservatively
4604 add [-MAXOBJSIZE -1, MAXOBJSIZE] to OFFRANGE. */
4605 offrange
[0] += arrbounds
[0];
4606 offrange
[1] += arrbounds
[1];
4609 /* Keep track of the minimum and maximum offset. */
4610 if (offrange
[1] < 0 && offrange
[1] < extrema
[0])
4611 extrema
[0] = offrange
[1];
4612 if (offrange
[0] > 0 && offrange
[0] > extrema
[1])
4613 extrema
[1] = offrange
[0];
4615 if (offrange
[0] < arrbounds
[0])
4616 offrange
[0] = arrbounds
[0];
4618 if (offrange
[1] > arrbounds
[1])
4619 offrange
[1] = arrbounds
[1];
4622 if (TREE_CODE (arg
) == ADDR_EXPR
)
4624 arg
= TREE_OPERAND (arg
, 0);
4625 if (TREE_CODE (arg
) != STRING_CST
4626 && TREE_CODE (arg
) != VAR_DECL
)
4632 /* The type of the object being referred to. It can be an array,
4633 string literal, or a non-array type when the MEM_REF represents
4634 a reference/subscript via a pointer to an object that is not
4635 an element of an array. References to members of structs and
4636 unions are excluded because MEM_REF doesn't make it possible
4637 to identify the member where the reference originated.
4638 Incomplete types are excluded as well because their size is
4640 tree reftype
= TREE_TYPE (arg
);
4641 if (POINTER_TYPE_P (reftype
)
4642 || !COMPLETE_TYPE_P (reftype
)
4643 || TREE_CODE (TYPE_SIZE_UNIT (reftype
)) != INTEGER_CST
4644 || RECORD_OR_UNION_TYPE_P (reftype
))
4648 if (TREE_CODE (reftype
) == ARRAY_TYPE
)
4650 eltsize
= wi::to_offset (TYPE_SIZE_UNIT (TREE_TYPE (reftype
)));
4652 if (tree dom
= TYPE_DOMAIN (reftype
))
4654 tree bnds
[] = { TYPE_MIN_VALUE (dom
), TYPE_MAX_VALUE (dom
) };
4655 if (array_at_struct_end_p (arg
)
4656 || !bnds
[0] || !bnds
[1])
4659 arrbounds
[1] = wi::lrshift (maxobjsize
, wi::floor_log2 (eltsize
));
4663 arrbounds
[0] = wi::to_offset (bnds
[0]) * eltsize
;
4664 arrbounds
[1] = (wi::to_offset (bnds
[1]) + 1) * eltsize
;
4670 arrbounds
[1] = wi::lrshift (maxobjsize
, wi::floor_log2 (eltsize
));
4673 if (TREE_CODE (ref
) == MEM_REF
)
4675 /* For MEM_REF determine a tighter bound of the non-array
4677 tree eltype
= TREE_TYPE (reftype
);
4678 while (TREE_CODE (eltype
) == ARRAY_TYPE
)
4679 eltype
= TREE_TYPE (eltype
);
4680 eltsize
= wi::to_offset (TYPE_SIZE_UNIT (eltype
));
4687 arrbounds
[1] = wi::to_offset (TYPE_SIZE_UNIT (reftype
));
4690 offrange
[0] += ioff
;
4691 offrange
[1] += ioff
;
4693 /* Compute the more permissive upper bound when IGNORE_OFF_BY_ONE
4694 is set (when taking the address of the one-past-last element
4695 of an array) but always use the stricter bound in diagnostics. */
4696 offset_int ubound
= arrbounds
[1];
4697 if (ignore_off_by_one
)
4700 if (offrange
[0] >= ubound
|| offrange
[1] < arrbounds
[0])
4702 /* Treat a reference to a non-array object as one to an array
4703 of a single element. */
4704 if (TREE_CODE (reftype
) != ARRAY_TYPE
)
4705 reftype
= build_array_type_nelts (reftype
, 1);
4707 if (TREE_CODE (ref
) == MEM_REF
)
4709 /* Extract the element type out of MEM_REF and use its size
4710 to compute the index to print in the diagnostic; arrays
4711 in MEM_REF don't mean anything. */
4712 tree type
= TREE_TYPE (ref
);
4713 while (TREE_CODE (type
) == ARRAY_TYPE
)
4714 type
= TREE_TYPE (type
);
4715 tree size
= TYPE_SIZE_UNIT (type
);
4716 offrange
[0] = offrange
[0] / wi::to_offset (size
);
4717 offrange
[1] = offrange
[1] / wi::to_offset (size
);
4721 /* For anything other than MEM_REF, compute the index to
4722 print in the diagnostic as the offset over element size. */
4723 offrange
[0] = offrange
[0] / eltsize
;
4724 offrange
[1] = offrange
[1] / eltsize
;
4728 if (offrange
[0] == offrange
[1])
4729 warned
= warning_at (location
, OPT_Warray_bounds
,
4730 "array subscript %wi is outside array bounds "
4732 offrange
[0].to_shwi (), reftype
);
4734 warned
= warning_at (location
, OPT_Warray_bounds
,
4735 "array subscript [%wi, %wi] is outside "
4736 "array bounds of %qT",
4737 offrange
[0].to_shwi (),
4738 offrange
[1].to_shwi (), reftype
);
4739 if (warned
&& DECL_P (arg
))
4740 inform (DECL_SOURCE_LOCATION (arg
), "while referencing %qD", arg
);
4742 TREE_NO_WARNING (ref
) = 1;
4746 if (warn_array_bounds
< 2)
4749 /* At level 2 check also intermediate offsets. */
4751 if (extrema
[i
] < -arrbounds
[1] || extrema
[i
= 1] > ubound
)
4753 HOST_WIDE_INT tmpidx
= extrema
[i
].to_shwi () / eltsize
.to_shwi ();
4755 warning_at (location
, OPT_Warray_bounds
,
4756 "intermediate array offset %wi is outside array bounds "
4759 TREE_NO_WARNING (ref
) = 1;
4763 /* Searches if the expr T, located at LOCATION computes
4764 address of an ARRAY_REF, and call check_array_ref on it. */
4767 vrp_prop::search_for_addr_array (tree t
, location_t location
)
4769 /* Check each ARRAY_REF and MEM_REF in the reference chain. */
4772 if (TREE_CODE (t
) == ARRAY_REF
)
4773 check_array_ref (location
, t
, true /*ignore_off_by_one*/);
4774 else if (TREE_CODE (t
) == MEM_REF
)
4775 check_mem_ref (location
, t
, true /*ignore_off_by_one*/);
4777 t
= TREE_OPERAND (t
, 0);
4779 while (handled_component_p (t
) || TREE_CODE (t
) == MEM_REF
);
4781 if (TREE_CODE (t
) != MEM_REF
4782 || TREE_CODE (TREE_OPERAND (t
, 0)) != ADDR_EXPR
4783 || TREE_NO_WARNING (t
))
4786 tree tem
= TREE_OPERAND (TREE_OPERAND (t
, 0), 0);
4787 tree low_bound
, up_bound
, el_sz
;
4788 if (TREE_CODE (TREE_TYPE (tem
)) != ARRAY_TYPE
4789 || TREE_CODE (TREE_TYPE (TREE_TYPE (tem
))) == ARRAY_TYPE
4790 || !TYPE_DOMAIN (TREE_TYPE (tem
)))
4793 low_bound
= TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (tem
)));
4794 up_bound
= TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (tem
)));
4795 el_sz
= TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (tem
)));
4797 || TREE_CODE (low_bound
) != INTEGER_CST
4799 || TREE_CODE (up_bound
) != INTEGER_CST
4801 || TREE_CODE (el_sz
) != INTEGER_CST
)
4805 if (!mem_ref_offset (t
).is_constant (&idx
))
4808 bool warned
= false;
4809 idx
= wi::sdiv_trunc (idx
, wi::to_offset (el_sz
));
4812 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4814 fprintf (dump_file
, "Array bound warning for ");
4815 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, t
);
4816 fprintf (dump_file
, "\n");
4818 warned
= warning_at (location
, OPT_Warray_bounds
,
4819 "array subscript %wi is below "
4820 "array bounds of %qT",
4821 idx
.to_shwi (), TREE_TYPE (tem
));
4823 else if (idx
> (wi::to_offset (up_bound
)
4824 - wi::to_offset (low_bound
) + 1))
4826 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
4828 fprintf (dump_file
, "Array bound warning for ");
4829 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, t
);
4830 fprintf (dump_file
, "\n");
4832 warned
= warning_at (location
, OPT_Warray_bounds
,
4833 "array subscript %wu is above "
4834 "array bounds of %qT",
4835 idx
.to_uhwi (), TREE_TYPE (tem
));
4841 inform (DECL_SOURCE_LOCATION (t
), "while referencing %qD", t
);
4843 TREE_NO_WARNING (t
) = 1;
4847 /* walk_tree() callback that checks if *TP is
4848 an ARRAY_REF inside an ADDR_EXPR (in which an array
4849 subscript one outside the valid range is allowed). Call
4850 check_array_ref for each ARRAY_REF found. The location is
4854 check_array_bounds (tree
*tp
, int *walk_subtree
, void *data
)
4857 struct walk_stmt_info
*wi
= (struct walk_stmt_info
*) data
;
4858 location_t location
;
4860 if (EXPR_HAS_LOCATION (t
))
4861 location
= EXPR_LOCATION (t
);
4863 location
= gimple_location (wi
->stmt
);
4865 *walk_subtree
= TRUE
;
4867 vrp_prop
*vrp_prop
= (class vrp_prop
*)wi
->info
;
4868 if (TREE_CODE (t
) == ARRAY_REF
)
4869 vrp_prop
->check_array_ref (location
, t
, false /*ignore_off_by_one*/);
4870 else if (TREE_CODE (t
) == MEM_REF
)
4871 vrp_prop
->check_mem_ref (location
, t
, false /*ignore_off_by_one*/);
4872 else if (TREE_CODE (t
) == ADDR_EXPR
)
4874 vrp_prop
->search_for_addr_array (t
, location
);
4875 *walk_subtree
= FALSE
;
4881 /* A dom_walker subclass for use by vrp_prop::check_all_array_refs,
4882 to walk over all statements of all reachable BBs and call
4883 check_array_bounds on them. */
4885 class check_array_bounds_dom_walker
: public dom_walker
4888 check_array_bounds_dom_walker (vrp_prop
*prop
)
4889 : dom_walker (CDI_DOMINATORS
,
4890 /* Discover non-executable edges, preserving EDGE_EXECUTABLE
4891 flags, so that we can merge in information on
4892 non-executable edges from vrp_folder . */
4893 REACHABLE_BLOCKS_PRESERVING_FLAGS
),
4895 ~check_array_bounds_dom_walker () {}
4897 edge
before_dom_children (basic_block
) FINAL OVERRIDE
;
4903 /* Implementation of dom_walker::before_dom_children.
4905 Walk over all statements of BB and call check_array_bounds on them,
4906 and determine if there's a unique successor edge. */
4909 check_array_bounds_dom_walker::before_dom_children (basic_block bb
)
4911 gimple_stmt_iterator si
;
4912 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
4914 gimple
*stmt
= gsi_stmt (si
);
4915 struct walk_stmt_info wi
;
4916 if (!gimple_has_location (stmt
)
4917 || is_gimple_debug (stmt
))
4920 memset (&wi
, 0, sizeof (wi
));
4924 walk_gimple_op (stmt
, check_array_bounds
, &wi
);
4927 /* Determine if there's a unique successor edge, and if so, return
4928 that back to dom_walker, ensuring that we don't visit blocks that
4929 became unreachable during the VRP propagation
4930 (PR tree-optimization/83312). */
4931 return find_taken_edge (bb
, NULL_TREE
);
4934 /* Walk over all statements of all reachable BBs and call check_array_bounds
4938 vrp_prop::check_all_array_refs ()
4940 check_array_bounds_dom_walker
w (this);
4941 w
.walk (ENTRY_BLOCK_PTR_FOR_FN (cfun
));
4944 /* Return true if all imm uses of VAR are either in STMT, or
4945 feed (optionally through a chain of single imm uses) GIMPLE_COND
4946 in basic block COND_BB. */
4949 all_imm_uses_in_stmt_or_feed_cond (tree var
, gimple
*stmt
, basic_block cond_bb
)
4951 use_operand_p use_p
, use2_p
;
4952 imm_use_iterator iter
;
4954 FOR_EACH_IMM_USE_FAST (use_p
, iter
, var
)
4955 if (USE_STMT (use_p
) != stmt
)
4957 gimple
*use_stmt
= USE_STMT (use_p
), *use_stmt2
;
4958 if (is_gimple_debug (use_stmt
))
4960 while (is_gimple_assign (use_stmt
)
4961 && TREE_CODE (gimple_assign_lhs (use_stmt
)) == SSA_NAME
4962 && single_imm_use (gimple_assign_lhs (use_stmt
),
4963 &use2_p
, &use_stmt2
))
4964 use_stmt
= use_stmt2
;
4965 if (gimple_code (use_stmt
) != GIMPLE_COND
4966 || gimple_bb (use_stmt
) != cond_bb
)
4979 __builtin_unreachable ();
4981 x_5 = ASSERT_EXPR <x_3, ...>;
4982 If x_3 has no other immediate uses (checked by caller),
4983 var is the x_3 var from ASSERT_EXPR, we can clear low 5 bits
4984 from the non-zero bitmask. */
4987 maybe_set_nonzero_bits (edge e
, tree var
)
4989 basic_block cond_bb
= e
->src
;
4990 gimple
*stmt
= last_stmt (cond_bb
);
4994 || gimple_code (stmt
) != GIMPLE_COND
4995 || gimple_cond_code (stmt
) != ((e
->flags
& EDGE_TRUE_VALUE
)
4996 ? EQ_EXPR
: NE_EXPR
)
4997 || TREE_CODE (gimple_cond_lhs (stmt
)) != SSA_NAME
4998 || !integer_zerop (gimple_cond_rhs (stmt
)))
5001 stmt
= SSA_NAME_DEF_STMT (gimple_cond_lhs (stmt
));
5002 if (!is_gimple_assign (stmt
)
5003 || gimple_assign_rhs_code (stmt
) != BIT_AND_EXPR
5004 || TREE_CODE (gimple_assign_rhs2 (stmt
)) != INTEGER_CST
)
5006 if (gimple_assign_rhs1 (stmt
) != var
)
5010 if (TREE_CODE (gimple_assign_rhs1 (stmt
)) != SSA_NAME
)
5012 stmt2
= SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt
));
5013 if (!gimple_assign_cast_p (stmt2
)
5014 || gimple_assign_rhs1 (stmt2
) != var
5015 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt2
))
5016 || (TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (stmt
)))
5017 != TYPE_PRECISION (TREE_TYPE (var
))))
5020 cst
= gimple_assign_rhs2 (stmt
);
5021 set_nonzero_bits (var
, wi::bit_and_not (get_nonzero_bits (var
),
5022 wi::to_wide (cst
)));
5025 /* Convert range assertion expressions into the implied copies and
5026 copy propagate away the copies. Doing the trivial copy propagation
5027 here avoids the need to run the full copy propagation pass after
5030 FIXME, this will eventually lead to copy propagation removing the
5031 names that had useful range information attached to them. For
5032 instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>,
5033 then N_i will have the range [3, +INF].
5035 However, by converting the assertion into the implied copy
5036 operation N_i = N_j, we will then copy-propagate N_j into the uses
5037 of N_i and lose the range information. We may want to hold on to
5038 ASSERT_EXPRs a little while longer as the ranges could be used in
5039 things like jump threading.
5041 The problem with keeping ASSERT_EXPRs around is that passes after
5042 VRP need to handle them appropriately.
5044 Another approach would be to make the range information a first
5045 class property of the SSA_NAME so that it can be queried from
5046 any pass. This is made somewhat more complex by the need for
5047 multiple ranges to be associated with one SSA_NAME. */
5050 remove_range_assertions (void)
5053 gimple_stmt_iterator si
;
5054 /* 1 if looking at ASSERT_EXPRs immediately at the beginning of
5055 a basic block preceeded by GIMPLE_COND branching to it and
5056 __builtin_trap, -1 if not yet checked, 0 otherwise. */
5059 /* Note that the BSI iterator bump happens at the bottom of the
5060 loop and no bump is necessary if we're removing the statement
5061 referenced by the current BSI. */
5062 FOR_EACH_BB_FN (bb
, cfun
)
5063 for (si
= gsi_after_labels (bb
), is_unreachable
= -1; !gsi_end_p (si
);)
5065 gimple
*stmt
= gsi_stmt (si
);
5067 if (is_gimple_assign (stmt
)
5068 && gimple_assign_rhs_code (stmt
) == ASSERT_EXPR
)
5070 tree lhs
= gimple_assign_lhs (stmt
);
5071 tree rhs
= gimple_assign_rhs1 (stmt
);
5074 var
= ASSERT_EXPR_VAR (rhs
);
5076 if (TREE_CODE (var
) == SSA_NAME
5077 && !POINTER_TYPE_P (TREE_TYPE (lhs
))
5078 && SSA_NAME_RANGE_INFO (lhs
))
5080 if (is_unreachable
== -1)
5083 if (single_pred_p (bb
)
5084 && assert_unreachable_fallthru_edge_p
5085 (single_pred_edge (bb
)))
5089 if (x_7 >= 10 && x_7 < 20)
5090 __builtin_unreachable ();
5091 x_8 = ASSERT_EXPR <x_7, ...>;
5092 if the only uses of x_7 are in the ASSERT_EXPR and
5093 in the condition. In that case, we can copy the
5094 range info from x_8 computed in this pass also
5097 && all_imm_uses_in_stmt_or_feed_cond (var
, stmt
,
5100 set_range_info (var
, SSA_NAME_RANGE_TYPE (lhs
),
5101 SSA_NAME_RANGE_INFO (lhs
)->get_min (),
5102 SSA_NAME_RANGE_INFO (lhs
)->get_max ());
5103 maybe_set_nonzero_bits (single_pred_edge (bb
), var
);
5107 /* Propagate the RHS into every use of the LHS. For SSA names
5108 also propagate abnormals as it merely restores the original
5109 IL in this case (an replace_uses_by would assert). */
5110 if (TREE_CODE (var
) == SSA_NAME
)
5112 imm_use_iterator iter
;
5113 use_operand_p use_p
;
5115 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
, lhs
)
5116 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
5117 SET_USE (use_p
, var
);
5120 replace_uses_by (lhs
, var
);
5122 /* And finally, remove the copy, it is not needed. */
5123 gsi_remove (&si
, true);
5124 release_defs (stmt
);
5128 if (!is_gimple_debug (gsi_stmt (si
)))
5135 /* Return true if STMT is interesting for VRP. */
5138 stmt_interesting_for_vrp (gimple
*stmt
)
5140 if (gimple_code (stmt
) == GIMPLE_PHI
)
5142 tree res
= gimple_phi_result (stmt
);
5143 return (!virtual_operand_p (res
)
5144 && (INTEGRAL_TYPE_P (TREE_TYPE (res
))
5145 || POINTER_TYPE_P (TREE_TYPE (res
))));
5147 else if (is_gimple_assign (stmt
) || is_gimple_call (stmt
))
5149 tree lhs
= gimple_get_lhs (stmt
);
5151 /* In general, assignments with virtual operands are not useful
5152 for deriving ranges, with the obvious exception of calls to
5153 builtin functions. */
5154 if (lhs
&& TREE_CODE (lhs
) == SSA_NAME
5155 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs
))
5156 || POINTER_TYPE_P (TREE_TYPE (lhs
)))
5157 && (is_gimple_call (stmt
)
5158 || !gimple_vuse (stmt
)))
5160 else if (is_gimple_call (stmt
) && gimple_call_internal_p (stmt
))
5161 switch (gimple_call_internal_fn (stmt
))
5163 case IFN_ADD_OVERFLOW
:
5164 case IFN_SUB_OVERFLOW
:
5165 case IFN_MUL_OVERFLOW
:
5166 case IFN_ATOMIC_COMPARE_EXCHANGE
:
5167 /* These internal calls return _Complex integer type,
5168 but are interesting to VRP nevertheless. */
5169 if (lhs
&& TREE_CODE (lhs
) == SSA_NAME
)
5176 else if (gimple_code (stmt
) == GIMPLE_COND
5177 || gimple_code (stmt
) == GIMPLE_SWITCH
)
5183 /* Initialization required by ssa_propagate engine. */
5186 vrp_prop::vrp_initialize ()
5190 FOR_EACH_BB_FN (bb
, cfun
)
5192 for (gphi_iterator si
= gsi_start_phis (bb
); !gsi_end_p (si
);
5195 gphi
*phi
= si
.phi ();
5196 if (!stmt_interesting_for_vrp (phi
))
5198 tree lhs
= PHI_RESULT (phi
);
5199 get_value_range (lhs
)->set_varying ();
5200 prop_set_simulate_again (phi
, false);
5203 prop_set_simulate_again (phi
, true);
5206 for (gimple_stmt_iterator si
= gsi_start_bb (bb
); !gsi_end_p (si
);
5209 gimple
*stmt
= gsi_stmt (si
);
5211 /* If the statement is a control insn, then we do not
5212 want to avoid simulating the statement once. Failure
5213 to do so means that those edges will never get added. */
5214 if (stmt_ends_bb_p (stmt
))
5215 prop_set_simulate_again (stmt
, true);
5216 else if (!stmt_interesting_for_vrp (stmt
))
5218 set_defs_to_varying (stmt
);
5219 prop_set_simulate_again (stmt
, false);
5222 prop_set_simulate_again (stmt
, true);
5227 /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL
5228 that includes the value VAL. The search is restricted to the range
5229 [START_IDX, n - 1] where n is the size of VEC.
5231 If there is a CASE_LABEL for VAL, its index is placed in IDX and true is
5234 If there is no CASE_LABEL for VAL and there is one that is larger than VAL,
5235 it is placed in IDX and false is returned.
5237 If VAL is larger than any CASE_LABEL, n is placed on IDX and false is
5241 find_case_label_index (gswitch
*stmt
, size_t start_idx
, tree val
, size_t *idx
)
5243 size_t n
= gimple_switch_num_labels (stmt
);
5246 /* Find case label for minimum of the value range or the next one.
5247 At each iteration we are searching in [low, high - 1]. */
5249 for (low
= start_idx
, high
= n
; high
!= low
; )
5253 /* Note that i != high, so we never ask for n. */
5254 size_t i
= (high
+ low
) / 2;
5255 t
= gimple_switch_label (stmt
, i
);
5257 /* Cache the result of comparing CASE_LOW and val. */
5258 cmp
= tree_int_cst_compare (CASE_LOW (t
), val
);
5262 /* Ranges cannot be empty. */
5271 if (CASE_HIGH (t
) != NULL
5272 && tree_int_cst_compare (CASE_HIGH (t
), val
) >= 0)
5284 /* Searches the case label vector VEC for the range of CASE_LABELs that is used
5285 for values between MIN and MAX. The first index is placed in MIN_IDX. The
5286 last index is placed in MAX_IDX. If the range of CASE_LABELs is empty
5287 then MAX_IDX < MIN_IDX.
5288 Returns true if the default label is not needed. */
5291 find_case_label_range (gswitch
*stmt
, tree min
, tree max
, size_t *min_idx
,
5295 bool min_take_default
= !find_case_label_index (stmt
, 1, min
, &i
);
5296 bool max_take_default
= !find_case_label_index (stmt
, i
, max
, &j
);
5300 && max_take_default
)
5302 /* Only the default case label reached.
5303 Return an empty range. */
5310 bool take_default
= min_take_default
|| max_take_default
;
5314 if (max_take_default
)
5317 /* If the case label range is continuous, we do not need
5318 the default case label. Verify that. */
5319 high
= CASE_LOW (gimple_switch_label (stmt
, i
));
5320 if (CASE_HIGH (gimple_switch_label (stmt
, i
)))
5321 high
= CASE_HIGH (gimple_switch_label (stmt
, i
));
5322 for (k
= i
+ 1; k
<= j
; ++k
)
5324 low
= CASE_LOW (gimple_switch_label (stmt
, k
));
5325 if (!integer_onep (int_const_binop (MINUS_EXPR
, low
, high
)))
5327 take_default
= true;
5331 if (CASE_HIGH (gimple_switch_label (stmt
, k
)))
5332 high
= CASE_HIGH (gimple_switch_label (stmt
, k
));
5337 return !take_default
;
5341 /* Evaluate statement STMT. If the statement produces a useful range,
5342 return SSA_PROP_INTERESTING and record the SSA name with the
5343 interesting range into *OUTPUT_P.
5345 If STMT is a conditional branch and we can determine its truth
5346 value, the taken edge is recorded in *TAKEN_EDGE_P.
5348 If STMT produces a varying value, return SSA_PROP_VARYING. */
5350 enum ssa_prop_result
5351 vrp_prop::visit_stmt (gimple
*stmt
, edge
*taken_edge_p
, tree
*output_p
)
5353 tree lhs
= gimple_get_lhs (stmt
);
5355 extract_range_from_stmt (stmt
, taken_edge_p
, output_p
, &vr
);
5359 if (update_value_range (*output_p
, &vr
))
5361 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
5363 fprintf (dump_file
, "Found new range for ");
5364 print_generic_expr (dump_file
, *output_p
);
5365 fprintf (dump_file
, ": ");
5366 dump_value_range (dump_file
, &vr
);
5367 fprintf (dump_file
, "\n");
5370 if (vr
.varying_p ())
5371 return SSA_PROP_VARYING
;
5373 return SSA_PROP_INTERESTING
;
5375 return SSA_PROP_NOT_INTERESTING
;
5378 if (is_gimple_call (stmt
) && gimple_call_internal_p (stmt
))
5379 switch (gimple_call_internal_fn (stmt
))
5381 case IFN_ADD_OVERFLOW
:
5382 case IFN_SUB_OVERFLOW
:
5383 case IFN_MUL_OVERFLOW
:
5384 case IFN_ATOMIC_COMPARE_EXCHANGE
:
5385 /* These internal calls return _Complex integer type,
5386 which VRP does not track, but the immediate uses
5387 thereof might be interesting. */
5388 if (lhs
&& TREE_CODE (lhs
) == SSA_NAME
)
5390 imm_use_iterator iter
;
5391 use_operand_p use_p
;
5392 enum ssa_prop_result res
= SSA_PROP_VARYING
;
5394 get_value_range (lhs
)->set_varying ();
5396 FOR_EACH_IMM_USE_FAST (use_p
, iter
, lhs
)
5398 gimple
*use_stmt
= USE_STMT (use_p
);
5399 if (!is_gimple_assign (use_stmt
))
5401 enum tree_code rhs_code
= gimple_assign_rhs_code (use_stmt
);
5402 if (rhs_code
!= REALPART_EXPR
&& rhs_code
!= IMAGPART_EXPR
)
5404 tree rhs1
= gimple_assign_rhs1 (use_stmt
);
5405 tree use_lhs
= gimple_assign_lhs (use_stmt
);
5406 if (TREE_CODE (rhs1
) != rhs_code
5407 || TREE_OPERAND (rhs1
, 0) != lhs
5408 || TREE_CODE (use_lhs
) != SSA_NAME
5409 || !stmt_interesting_for_vrp (use_stmt
)
5410 || (!INTEGRAL_TYPE_P (TREE_TYPE (use_lhs
))
5411 || !TYPE_MIN_VALUE (TREE_TYPE (use_lhs
))
5412 || !TYPE_MAX_VALUE (TREE_TYPE (use_lhs
))))
5415 /* If there is a change in the value range for any of the
5416 REALPART_EXPR/IMAGPART_EXPR immediate uses, return
5417 SSA_PROP_INTERESTING. If there are any REALPART_EXPR
5418 or IMAGPART_EXPR immediate uses, but none of them have
5419 a change in their value ranges, return
5420 SSA_PROP_NOT_INTERESTING. If there are no
5421 {REAL,IMAG}PART_EXPR uses at all,
5422 return SSA_PROP_VARYING. */
5424 extract_range_basic (&new_vr
, use_stmt
);
5425 const value_range
*old_vr
= get_value_range (use_lhs
);
5426 if (!old_vr
->equal_p (new_vr
, /*ignore_equivs=*/false))
5427 res
= SSA_PROP_INTERESTING
;
5429 res
= SSA_PROP_NOT_INTERESTING
;
5430 new_vr
.equiv_clear ();
5431 if (res
== SSA_PROP_INTERESTING
)
5445 /* All other statements produce nothing of interest for VRP, so mark
5446 their outputs varying and prevent further simulation. */
5447 set_defs_to_varying (stmt
);
5449 return (*taken_edge_p
) ? SSA_PROP_INTERESTING
: SSA_PROP_VARYING
;
5452 /* Union the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
5453 { VR1TYPE, VR0MIN, VR0MAX } and store the result
5454 in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest
5455 possible such range. The resulting range is not canonicalized. */
5458 union_ranges (enum value_range_kind
*vr0type
,
5459 tree
*vr0min
, tree
*vr0max
,
5460 enum value_range_kind vr1type
,
5461 tree vr1min
, tree vr1max
)
5463 bool mineq
= vrp_operand_equal_p (*vr0min
, vr1min
);
5464 bool maxeq
= vrp_operand_equal_p (*vr0max
, vr1max
);
5466 /* [] is vr0, () is vr1 in the following classification comments. */
5470 if (*vr0type
== vr1type
)
5471 /* Nothing to do for equal ranges. */
5473 else if ((*vr0type
== VR_RANGE
5474 && vr1type
== VR_ANTI_RANGE
)
5475 || (*vr0type
== VR_ANTI_RANGE
5476 && vr1type
== VR_RANGE
))
5478 /* For anti-range with range union the result is varying. */
5484 else if (operand_less_p (*vr0max
, vr1min
) == 1
5485 || operand_less_p (vr1max
, *vr0min
) == 1)
5487 /* [ ] ( ) or ( ) [ ]
5488 If the ranges have an empty intersection, result of the union
5489 operation is the anti-range or if both are anti-ranges
5491 if (*vr0type
== VR_ANTI_RANGE
5492 && vr1type
== VR_ANTI_RANGE
)
5494 else if (*vr0type
== VR_ANTI_RANGE
5495 && vr1type
== VR_RANGE
)
5497 else if (*vr0type
== VR_RANGE
5498 && vr1type
== VR_ANTI_RANGE
)
5504 else if (*vr0type
== VR_RANGE
5505 && vr1type
== VR_RANGE
)
5507 /* The result is the convex hull of both ranges. */
5508 if (operand_less_p (*vr0max
, vr1min
) == 1)
5510 /* If the result can be an anti-range, create one. */
5511 if (TREE_CODE (*vr0max
) == INTEGER_CST
5512 && TREE_CODE (vr1min
) == INTEGER_CST
5513 && vrp_val_is_min (*vr0min
)
5514 && vrp_val_is_max (vr1max
))
5516 tree min
= int_const_binop (PLUS_EXPR
,
5518 build_int_cst (TREE_TYPE (*vr0max
), 1));
5519 tree max
= int_const_binop (MINUS_EXPR
,
5521 build_int_cst (TREE_TYPE (vr1min
), 1));
5522 if (!operand_less_p (max
, min
))
5524 *vr0type
= VR_ANTI_RANGE
;
5536 /* If the result can be an anti-range, create one. */
5537 if (TREE_CODE (vr1max
) == INTEGER_CST
5538 && TREE_CODE (*vr0min
) == INTEGER_CST
5539 && vrp_val_is_min (vr1min
)
5540 && vrp_val_is_max (*vr0max
))
5542 tree min
= int_const_binop (PLUS_EXPR
,
5544 build_int_cst (TREE_TYPE (vr1max
), 1));
5545 tree max
= int_const_binop (MINUS_EXPR
,
5547 build_int_cst (TREE_TYPE (*vr0min
), 1));
5548 if (!operand_less_p (max
, min
))
5550 *vr0type
= VR_ANTI_RANGE
;
5564 else if ((maxeq
|| operand_less_p (vr1max
, *vr0max
) == 1)
5565 && (mineq
|| operand_less_p (*vr0min
, vr1min
) == 1))
5567 /* [ ( ) ] or [( ) ] or [ ( )] */
5568 if (*vr0type
== VR_RANGE
5569 && vr1type
== VR_RANGE
)
5571 else if (*vr0type
== VR_ANTI_RANGE
5572 && vr1type
== VR_ANTI_RANGE
)
5578 else if (*vr0type
== VR_ANTI_RANGE
5579 && vr1type
== VR_RANGE
)
5581 /* Arbitrarily choose the right or left gap. */
5582 if (!mineq
&& TREE_CODE (vr1min
) == INTEGER_CST
)
5583 *vr0max
= int_const_binop (MINUS_EXPR
, vr1min
,
5584 build_int_cst (TREE_TYPE (vr1min
), 1));
5585 else if (!maxeq
&& TREE_CODE (vr1max
) == INTEGER_CST
)
5586 *vr0min
= int_const_binop (PLUS_EXPR
, vr1max
,
5587 build_int_cst (TREE_TYPE (vr1max
), 1));
5591 else if (*vr0type
== VR_RANGE
5592 && vr1type
== VR_ANTI_RANGE
)
5593 /* The result covers everything. */
5598 else if ((maxeq
|| operand_less_p (*vr0max
, vr1max
) == 1)
5599 && (mineq
|| operand_less_p (vr1min
, *vr0min
) == 1))
5601 /* ( [ ] ) or ([ ] ) or ( [ ]) */
5602 if (*vr0type
== VR_RANGE
5603 && vr1type
== VR_RANGE
)
5609 else if (*vr0type
== VR_ANTI_RANGE
5610 && vr1type
== VR_ANTI_RANGE
)
5612 else if (*vr0type
== VR_RANGE
5613 && vr1type
== VR_ANTI_RANGE
)
5615 *vr0type
= VR_ANTI_RANGE
;
5616 if (!mineq
&& TREE_CODE (*vr0min
) == INTEGER_CST
)
5618 *vr0max
= int_const_binop (MINUS_EXPR
, *vr0min
,
5619 build_int_cst (TREE_TYPE (*vr0min
), 1));
5622 else if (!maxeq
&& TREE_CODE (*vr0max
) == INTEGER_CST
)
5624 *vr0min
= int_const_binop (PLUS_EXPR
, *vr0max
,
5625 build_int_cst (TREE_TYPE (*vr0max
), 1));
5631 else if (*vr0type
== VR_ANTI_RANGE
5632 && vr1type
== VR_RANGE
)
5633 /* The result covers everything. */
5638 else if ((operand_less_p (vr1min
, *vr0max
) == 1
5639 || operand_equal_p (vr1min
, *vr0max
, 0))
5640 && operand_less_p (*vr0min
, vr1min
) == 1
5641 && operand_less_p (*vr0max
, vr1max
) == 1)
5643 /* [ ( ] ) or [ ]( ) */
5644 if (*vr0type
== VR_RANGE
5645 && vr1type
== VR_RANGE
)
5647 else if (*vr0type
== VR_ANTI_RANGE
5648 && vr1type
== VR_ANTI_RANGE
)
5650 else if (*vr0type
== VR_ANTI_RANGE
5651 && vr1type
== VR_RANGE
)
5653 if (TREE_CODE (vr1min
) == INTEGER_CST
)
5654 *vr0max
= int_const_binop (MINUS_EXPR
, vr1min
,
5655 build_int_cst (TREE_TYPE (vr1min
), 1));
5659 else if (*vr0type
== VR_RANGE
5660 && vr1type
== VR_ANTI_RANGE
)
5662 if (TREE_CODE (*vr0max
) == INTEGER_CST
)
5665 *vr0min
= int_const_binop (PLUS_EXPR
, *vr0max
,
5666 build_int_cst (TREE_TYPE (*vr0max
), 1));
5675 else if ((operand_less_p (*vr0min
, vr1max
) == 1
5676 || operand_equal_p (*vr0min
, vr1max
, 0))
5677 && operand_less_p (vr1min
, *vr0min
) == 1
5678 && operand_less_p (vr1max
, *vr0max
) == 1)
5680 /* ( [ ) ] or ( )[ ] */
5681 if (*vr0type
== VR_RANGE
5682 && vr1type
== VR_RANGE
)
5684 else if (*vr0type
== VR_ANTI_RANGE
5685 && vr1type
== VR_ANTI_RANGE
)
5687 else if (*vr0type
== VR_ANTI_RANGE
5688 && vr1type
== VR_RANGE
)
5690 if (TREE_CODE (vr1max
) == INTEGER_CST
)
5691 *vr0min
= int_const_binop (PLUS_EXPR
, vr1max
,
5692 build_int_cst (TREE_TYPE (vr1max
), 1));
5696 else if (*vr0type
== VR_RANGE
5697 && vr1type
== VR_ANTI_RANGE
)
5699 if (TREE_CODE (*vr0min
) == INTEGER_CST
)
5702 *vr0max
= int_const_binop (MINUS_EXPR
, *vr0min
,
5703 build_int_cst (TREE_TYPE (*vr0min
), 1));
5718 *vr0type
= VR_VARYING
;
5719 *vr0min
= NULL_TREE
;
5720 *vr0max
= NULL_TREE
;
5723 /* Intersect the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
5724 { VR1TYPE, VR0MIN, VR0MAX } and store the result
5725 in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest
5726 possible such range. The resulting range is not canonicalized. */
5729 intersect_ranges (enum value_range_kind
*vr0type
,
5730 tree
*vr0min
, tree
*vr0max
,
5731 enum value_range_kind vr1type
,
5732 tree vr1min
, tree vr1max
)
5734 bool mineq
= vrp_operand_equal_p (*vr0min
, vr1min
);
5735 bool maxeq
= vrp_operand_equal_p (*vr0max
, vr1max
);
5737 /* [] is vr0, () is vr1 in the following classification comments. */
5741 if (*vr0type
== vr1type
)
5742 /* Nothing to do for equal ranges. */
5744 else if ((*vr0type
== VR_RANGE
5745 && vr1type
== VR_ANTI_RANGE
)
5746 || (*vr0type
== VR_ANTI_RANGE
5747 && vr1type
== VR_RANGE
))
5749 /* For anti-range with range intersection the result is empty. */
5750 *vr0type
= VR_UNDEFINED
;
5751 *vr0min
= NULL_TREE
;
5752 *vr0max
= NULL_TREE
;
5757 else if (operand_less_p (*vr0max
, vr1min
) == 1
5758 || operand_less_p (vr1max
, *vr0min
) == 1)
5760 /* [ ] ( ) or ( ) [ ]
5761 If the ranges have an empty intersection, the result of the
5762 intersect operation is the range for intersecting an
5763 anti-range with a range or empty when intersecting two ranges. */
5764 if (*vr0type
== VR_RANGE
5765 && vr1type
== VR_ANTI_RANGE
)
5767 else if (*vr0type
== VR_ANTI_RANGE
5768 && vr1type
== VR_RANGE
)
5774 else if (*vr0type
== VR_RANGE
5775 && vr1type
== VR_RANGE
)
5777 *vr0type
= VR_UNDEFINED
;
5778 *vr0min
= NULL_TREE
;
5779 *vr0max
= NULL_TREE
;
5781 else if (*vr0type
== VR_ANTI_RANGE
5782 && vr1type
== VR_ANTI_RANGE
)
5784 /* If the anti-ranges are adjacent to each other merge them. */
5785 if (TREE_CODE (*vr0max
) == INTEGER_CST
5786 && TREE_CODE (vr1min
) == INTEGER_CST
5787 && operand_less_p (*vr0max
, vr1min
) == 1
5788 && integer_onep (int_const_binop (MINUS_EXPR
,
5791 else if (TREE_CODE (vr1max
) == INTEGER_CST
5792 && TREE_CODE (*vr0min
) == INTEGER_CST
5793 && operand_less_p (vr1max
, *vr0min
) == 1
5794 && integer_onep (int_const_binop (MINUS_EXPR
,
5797 /* Else arbitrarily take VR0. */
5800 else if ((maxeq
|| operand_less_p (vr1max
, *vr0max
) == 1)
5801 && (mineq
|| operand_less_p (*vr0min
, vr1min
) == 1))
5803 /* [ ( ) ] or [( ) ] or [ ( )] */
5804 if (*vr0type
== VR_RANGE
5805 && vr1type
== VR_RANGE
)
5807 /* If both are ranges the result is the inner one. */
5812 else if (*vr0type
== VR_RANGE
5813 && vr1type
== VR_ANTI_RANGE
)
5815 /* Choose the right gap if the left one is empty. */
5818 if (TREE_CODE (vr1max
) != INTEGER_CST
)
5820 else if (TYPE_PRECISION (TREE_TYPE (vr1max
)) == 1
5821 && !TYPE_UNSIGNED (TREE_TYPE (vr1max
)))
5823 = int_const_binop (MINUS_EXPR
, vr1max
,
5824 build_int_cst (TREE_TYPE (vr1max
), -1));
5827 = int_const_binop (PLUS_EXPR
, vr1max
,
5828 build_int_cst (TREE_TYPE (vr1max
), 1));
5830 /* Choose the left gap if the right one is empty. */
5833 if (TREE_CODE (vr1min
) != INTEGER_CST
)
5835 else if (TYPE_PRECISION (TREE_TYPE (vr1min
)) == 1
5836 && !TYPE_UNSIGNED (TREE_TYPE (vr1min
)))
5838 = int_const_binop (PLUS_EXPR
, vr1min
,
5839 build_int_cst (TREE_TYPE (vr1min
), -1));
5842 = int_const_binop (MINUS_EXPR
, vr1min
,
5843 build_int_cst (TREE_TYPE (vr1min
), 1));
5845 /* Choose the anti-range if the range is effectively varying. */
5846 else if (vrp_val_is_min (*vr0min
)
5847 && vrp_val_is_max (*vr0max
))
5853 /* Else choose the range. */
5855 else if (*vr0type
== VR_ANTI_RANGE
5856 && vr1type
== VR_ANTI_RANGE
)
5857 /* If both are anti-ranges the result is the outer one. */
5859 else if (*vr0type
== VR_ANTI_RANGE
5860 && vr1type
== VR_RANGE
)
5862 /* The intersection is empty. */
5863 *vr0type
= VR_UNDEFINED
;
5864 *vr0min
= NULL_TREE
;
5865 *vr0max
= NULL_TREE
;
5870 else if ((maxeq
|| operand_less_p (*vr0max
, vr1max
) == 1)
5871 && (mineq
|| operand_less_p (vr1min
, *vr0min
) == 1))
5873 /* ( [ ] ) or ([ ] ) or ( [ ]) */
5874 if (*vr0type
== VR_RANGE
5875 && vr1type
== VR_RANGE
)
5876 /* Choose the inner range. */
5878 else if (*vr0type
== VR_ANTI_RANGE
5879 && vr1type
== VR_RANGE
)
5881 /* Choose the right gap if the left is empty. */
5884 *vr0type
= VR_RANGE
;
5885 if (TREE_CODE (*vr0max
) != INTEGER_CST
)
5887 else if (TYPE_PRECISION (TREE_TYPE (*vr0max
)) == 1
5888 && !TYPE_UNSIGNED (TREE_TYPE (*vr0max
)))
5890 = int_const_binop (MINUS_EXPR
, *vr0max
,
5891 build_int_cst (TREE_TYPE (*vr0max
), -1));
5894 = int_const_binop (PLUS_EXPR
, *vr0max
,
5895 build_int_cst (TREE_TYPE (*vr0max
), 1));
5898 /* Choose the left gap if the right is empty. */
5901 *vr0type
= VR_RANGE
;
5902 if (TREE_CODE (*vr0min
) != INTEGER_CST
)
5904 else if (TYPE_PRECISION (TREE_TYPE (*vr0min
)) == 1
5905 && !TYPE_UNSIGNED (TREE_TYPE (*vr0min
)))
5907 = int_const_binop (PLUS_EXPR
, *vr0min
,
5908 build_int_cst (TREE_TYPE (*vr0min
), -1));
5911 = int_const_binop (MINUS_EXPR
, *vr0min
,
5912 build_int_cst (TREE_TYPE (*vr0min
), 1));
5915 /* Choose the anti-range if the range is effectively varying. */
5916 else if (vrp_val_is_min (vr1min
)
5917 && vrp_val_is_max (vr1max
))
5919 /* Choose the anti-range if it is ~[0,0], that range is special
5920 enough to special case when vr1's range is relatively wide.
5921 At least for types bigger than int - this covers pointers
5922 and arguments to functions like ctz. */
5923 else if (*vr0min
== *vr0max
5924 && integer_zerop (*vr0min
)
5925 && ((TYPE_PRECISION (TREE_TYPE (*vr0min
))
5926 >= TYPE_PRECISION (integer_type_node
))
5927 || POINTER_TYPE_P (TREE_TYPE (*vr0min
)))
5928 && TREE_CODE (vr1max
) == INTEGER_CST
5929 && TREE_CODE (vr1min
) == INTEGER_CST
5930 && (wi::clz (wi::to_wide (vr1max
) - wi::to_wide (vr1min
))
5931 < TYPE_PRECISION (TREE_TYPE (*vr0min
)) / 2))
5933 /* Else choose the range. */
5941 else if (*vr0type
== VR_ANTI_RANGE
5942 && vr1type
== VR_ANTI_RANGE
)
5944 /* If both are anti-ranges the result is the outer one. */
5949 else if (vr1type
== VR_ANTI_RANGE
5950 && *vr0type
== VR_RANGE
)
5952 /* The intersection is empty. */
5953 *vr0type
= VR_UNDEFINED
;
5954 *vr0min
= NULL_TREE
;
5955 *vr0max
= NULL_TREE
;
5960 else if ((operand_less_p (vr1min
, *vr0max
) == 1
5961 || operand_equal_p (vr1min
, *vr0max
, 0))
5962 && operand_less_p (*vr0min
, vr1min
) == 1)
5964 /* [ ( ] ) or [ ]( ) */
5965 if (*vr0type
== VR_ANTI_RANGE
5966 && vr1type
== VR_ANTI_RANGE
)
5968 else if (*vr0type
== VR_RANGE
5969 && vr1type
== VR_RANGE
)
5971 else if (*vr0type
== VR_RANGE
5972 && vr1type
== VR_ANTI_RANGE
)
5974 if (TREE_CODE (vr1min
) == INTEGER_CST
)
5975 *vr0max
= int_const_binop (MINUS_EXPR
, vr1min
,
5976 build_int_cst (TREE_TYPE (vr1min
), 1));
5980 else if (*vr0type
== VR_ANTI_RANGE
5981 && vr1type
== VR_RANGE
)
5983 *vr0type
= VR_RANGE
;
5984 if (TREE_CODE (*vr0max
) == INTEGER_CST
)
5985 *vr0min
= int_const_binop (PLUS_EXPR
, *vr0max
,
5986 build_int_cst (TREE_TYPE (*vr0max
), 1));
5994 else if ((operand_less_p (*vr0min
, vr1max
) == 1
5995 || operand_equal_p (*vr0min
, vr1max
, 0))
5996 && operand_less_p (vr1min
, *vr0min
) == 1)
5998 /* ( [ ) ] or ( )[ ] */
5999 if (*vr0type
== VR_ANTI_RANGE
6000 && vr1type
== VR_ANTI_RANGE
)
6002 else if (*vr0type
== VR_RANGE
6003 && vr1type
== VR_RANGE
)
6005 else if (*vr0type
== VR_RANGE
6006 && vr1type
== VR_ANTI_RANGE
)
6008 if (TREE_CODE (vr1max
) == INTEGER_CST
)
6009 *vr0min
= int_const_binop (PLUS_EXPR
, vr1max
,
6010 build_int_cst (TREE_TYPE (vr1max
), 1));
6014 else if (*vr0type
== VR_ANTI_RANGE
6015 && vr1type
== VR_RANGE
)
6017 *vr0type
= VR_RANGE
;
6018 if (TREE_CODE (*vr0min
) == INTEGER_CST
)
6019 *vr0max
= int_const_binop (MINUS_EXPR
, *vr0min
,
6020 build_int_cst (TREE_TYPE (*vr0min
), 1));
6029 /* As a fallback simply use { *VRTYPE, *VR0MIN, *VR0MAX } as
6030 result for the intersection. That's always a conservative
6031 correct estimate unless VR1 is a constant singleton range
6032 in which case we choose that. */
6033 if (vr1type
== VR_RANGE
6034 && is_gimple_min_invariant (vr1min
)
6035 && vrp_operand_equal_p (vr1min
, vr1max
))
6044 /* Intersect the two value-ranges *VR0 and *VR1 and store the result
6045 in *VR0. This may not be the smallest possible such range. */
6048 value_range::intersect_helper (value_range
*vr0
, const value_range
*vr1
)
6050 /* If either range is VR_VARYING the other one wins. */
6051 if (vr1
->varying_p ())
6053 if (vr0
->varying_p ())
6055 vr0
->deep_copy (vr1
);
6059 /* When either range is VR_UNDEFINED the resulting range is
6060 VR_UNDEFINED, too. */
6061 if (vr0
->undefined_p ())
6063 if (vr1
->undefined_p ())
6065 vr0
->set_undefined ();
6069 value_range_kind vr0type
= vr0
->kind ();
6070 tree vr0min
= vr0
->min ();
6071 tree vr0max
= vr0
->max ();
6072 intersect_ranges (&vr0type
, &vr0min
, &vr0max
,
6073 vr1
->kind (), vr1
->min (), vr1
->max ());
6074 /* Make sure to canonicalize the result though as the inversion of a
6075 VR_RANGE can still be a VR_RANGE. Work on a temporary so we can
6076 fall back to vr0 when this turns things to varying. */
6078 tem
.set_and_canonicalize (vr0type
, vr0min
, vr0max
);
6079 /* If that failed, use the saved original VR0. */
6080 if (tem
.varying_p ())
6082 vr0
->update (tem
.kind (), tem
.min (), tem
.max ());
6084 /* If the result is VR_UNDEFINED there is no need to mess with
6085 the equivalencies. */
6086 if (vr0
->undefined_p ())
6089 /* The resulting set of equivalences for range intersection is the union of
6091 if (vr0
->m_equiv
&& vr1
->m_equiv
&& vr0
->m_equiv
!= vr1
->m_equiv
)
6092 bitmap_ior_into (vr0
->m_equiv
, vr1
->m_equiv
);
6093 else if (vr1
->m_equiv
&& !vr0
->m_equiv
)
6095 /* All equivalence bitmaps are allocated from the same obstack. So
6096 we can use the obstack associated with VR to allocate vr0->equiv. */
6097 vr0
->m_equiv
= BITMAP_ALLOC (vr1
->m_equiv
->obstack
);
6098 bitmap_copy (m_equiv
, vr1
->m_equiv
);
6103 value_range::intersect (const value_range
*other
)
6105 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6107 fprintf (dump_file
, "Intersecting\n ");
6108 dump_value_range (dump_file
, this);
6109 fprintf (dump_file
, "\nand\n ");
6110 dump_value_range (dump_file
, other
);
6111 fprintf (dump_file
, "\n");
6113 intersect_helper (this, other
);
6114 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6116 fprintf (dump_file
, "to\n ");
6117 dump_value_range (dump_file
, this);
6118 fprintf (dump_file
, "\n");
6122 /* Helper for meet operation for value ranges. Given two value ranges VR0 and
6123 VR1, return a range that contains both VR0 and VR1. This may not be the
6124 smallest possible such range. */
6127 value_range_base::union_helper (const value_range_base
*vr0
,
6128 const value_range_base
*vr1
)
6130 /* VR0 has the resulting range if VR1 is undefined or VR0 is varying. */
6131 if (vr1
->undefined_p ()
6132 || vr0
->varying_p ())
6135 /* VR1 has the resulting range if VR0 is undefined or VR1 is varying. */
6136 if (vr0
->undefined_p ()
6137 || vr1
->varying_p ())
6140 value_range_kind vr0type
= vr0
->kind ();
6141 tree vr0min
= vr0
->min ();
6142 tree vr0max
= vr0
->max ();
6143 union_ranges (&vr0type
, &vr0min
, &vr0max
,
6144 vr1
->kind (), vr1
->min (), vr1
->max ());
6146 /* Work on a temporary so we can still use vr0 when union returns varying. */
6148 tem
.set_and_canonicalize (vr0type
, vr0min
, vr0max
);
6150 /* Failed to find an efficient meet. Before giving up and setting
6151 the result to VARYING, see if we can at least derive a useful
6153 if (tem
.varying_p ()
6154 && range_includes_zero_p (vr0
) == 0
6155 && range_includes_zero_p (vr1
) == 0)
6157 tem
.set_nonnull (vr0
->type ());
6165 /* Meet operation for value ranges. Given two value ranges VR0 and
6166 VR1, store in VR0 a range that contains both VR0 and VR1. This
6167 may not be the smallest possible such range. */
6170 value_range_base::union_ (const value_range_base
*other
)
6172 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6174 fprintf (dump_file
, "Meeting\n ");
6175 dump_value_range (dump_file
, this);
6176 fprintf (dump_file
, "\nand\n ");
6177 dump_value_range (dump_file
, other
);
6178 fprintf (dump_file
, "\n");
6181 *this = union_helper (this, other
);
6183 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6185 fprintf (dump_file
, "to\n ");
6186 dump_value_range (dump_file
, this);
6187 fprintf (dump_file
, "\n");
6192 value_range::union_ (const value_range
*other
)
6194 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6196 fprintf (dump_file
, "Meeting\n ");
6197 dump_value_range (dump_file
, this);
6198 fprintf (dump_file
, "\nand\n ");
6199 dump_value_range (dump_file
, other
);
6200 fprintf (dump_file
, "\n");
6203 /* If THIS is undefined we want to pick up equivalences from OTHER.
6204 Just special-case this here rather than trying to fixup after the fact. */
6205 if (this->undefined_p ())
6206 this->deep_copy (other
);
6209 value_range_base tem
= union_helper (this, other
);
6210 this->update (tem
.kind (), tem
.min (), tem
.max ());
6212 /* The resulting set of equivalences is always the intersection of
6214 if (this->m_equiv
&& other
->m_equiv
&& this->m_equiv
!= other
->m_equiv
)
6215 bitmap_and_into (this->m_equiv
, other
->m_equiv
);
6216 else if (this->m_equiv
&& !other
->m_equiv
)
6217 bitmap_clear (this->m_equiv
);
6220 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6222 fprintf (dump_file
, "to\n ");
6223 dump_value_range (dump_file
, this);
6224 fprintf (dump_file
, "\n");
6228 /* Visit all arguments for PHI node PHI that flow through executable
6229 edges. If a valid value range can be derived from all the incoming
6230 value ranges, set a new range for the LHS of PHI. */
6232 enum ssa_prop_result
6233 vrp_prop::visit_phi (gphi
*phi
)
6235 tree lhs
= PHI_RESULT (phi
);
6236 value_range vr_result
;
6237 extract_range_from_phi_node (phi
, &vr_result
);
6238 if (update_value_range (lhs
, &vr_result
))
6240 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
6242 fprintf (dump_file
, "Found new range for ");
6243 print_generic_expr (dump_file
, lhs
);
6244 fprintf (dump_file
, ": ");
6245 dump_value_range (dump_file
, &vr_result
);
6246 fprintf (dump_file
, "\n");
6249 if (vr_result
.varying_p ())
6250 return SSA_PROP_VARYING
;
6252 return SSA_PROP_INTERESTING
;
6255 /* Nothing changed, don't add outgoing edges. */
6256 return SSA_PROP_NOT_INTERESTING
;
6259 class vrp_folder
: public substitute_and_fold_engine
6262 tree
get_value (tree
) FINAL OVERRIDE
;
6263 bool fold_stmt (gimple_stmt_iterator
*) FINAL OVERRIDE
;
6264 bool fold_predicate_in (gimple_stmt_iterator
*);
6266 class vr_values
*vr_values
;
6269 tree
vrp_evaluate_conditional (tree_code code
, tree op0
,
6270 tree op1
, gimple
*stmt
)
6271 { return vr_values
->vrp_evaluate_conditional (code
, op0
, op1
, stmt
); }
6272 bool simplify_stmt_using_ranges (gimple_stmt_iterator
*gsi
)
6273 { return vr_values
->simplify_stmt_using_ranges (gsi
); }
6274 tree
op_with_constant_singleton_value_range (tree op
)
6275 { return vr_values
->op_with_constant_singleton_value_range (op
); }
6278 /* If the statement pointed by SI has a predicate whose value can be
6279 computed using the value range information computed by VRP, compute
6280 its value and return true. Otherwise, return false. */
6283 vrp_folder::fold_predicate_in (gimple_stmt_iterator
*si
)
6285 bool assignment_p
= false;
6287 gimple
*stmt
= gsi_stmt (*si
);
6289 if (is_gimple_assign (stmt
)
6290 && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt
)) == tcc_comparison
)
6292 assignment_p
= true;
6293 val
= vrp_evaluate_conditional (gimple_assign_rhs_code (stmt
),
6294 gimple_assign_rhs1 (stmt
),
6295 gimple_assign_rhs2 (stmt
),
6298 else if (gcond
*cond_stmt
= dyn_cast
<gcond
*> (stmt
))
6299 val
= vrp_evaluate_conditional (gimple_cond_code (cond_stmt
),
6300 gimple_cond_lhs (cond_stmt
),
6301 gimple_cond_rhs (cond_stmt
),
6309 val
= fold_convert (gimple_expr_type (stmt
), val
);
6313 fprintf (dump_file
, "Folding predicate ");
6314 print_gimple_expr (dump_file
, stmt
, 0);
6315 fprintf (dump_file
, " to ");
6316 print_generic_expr (dump_file
, val
);
6317 fprintf (dump_file
, "\n");
6320 if (is_gimple_assign (stmt
))
6321 gimple_assign_set_rhs_from_tree (si
, val
);
6324 gcc_assert (gimple_code (stmt
) == GIMPLE_COND
);
6325 gcond
*cond_stmt
= as_a
<gcond
*> (stmt
);
6326 if (integer_zerop (val
))
6327 gimple_cond_make_false (cond_stmt
);
6328 else if (integer_onep (val
))
6329 gimple_cond_make_true (cond_stmt
);
6340 /* Callback for substitute_and_fold folding the stmt at *SI. */
6343 vrp_folder::fold_stmt (gimple_stmt_iterator
*si
)
6345 if (fold_predicate_in (si
))
6348 return simplify_stmt_using_ranges (si
);
6351 /* If OP has a value range with a single constant value return that,
6352 otherwise return NULL_TREE. This returns OP itself if OP is a
6355 Implemented as a pure wrapper right now, but this will change. */
6358 vrp_folder::get_value (tree op
)
6360 return op_with_constant_singleton_value_range (op
);
6363 /* Return the LHS of any ASSERT_EXPR where OP appears as the first
6364 argument to the ASSERT_EXPR and in which the ASSERT_EXPR dominates
6365 BB. If no such ASSERT_EXPR is found, return OP. */
6368 lhs_of_dominating_assert (tree op
, basic_block bb
, gimple
*stmt
)
6370 imm_use_iterator imm_iter
;
6372 use_operand_p use_p
;
6374 if (TREE_CODE (op
) == SSA_NAME
)
6376 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, op
)
6378 use_stmt
= USE_STMT (use_p
);
6379 if (use_stmt
!= stmt
6380 && gimple_assign_single_p (use_stmt
)
6381 && TREE_CODE (gimple_assign_rhs1 (use_stmt
)) == ASSERT_EXPR
6382 && TREE_OPERAND (gimple_assign_rhs1 (use_stmt
), 0) == op
6383 && dominated_by_p (CDI_DOMINATORS
, bb
, gimple_bb (use_stmt
)))
6384 return gimple_assign_lhs (use_stmt
);
6391 static class vr_values
*x_vr_values
;
6393 /* A trivial wrapper so that we can present the generic jump threading
6394 code with a simple API for simplifying statements. STMT is the
6395 statement we want to simplify, WITHIN_STMT provides the location
6396 for any overflow warnings. */
6399 simplify_stmt_for_jump_threading (gimple
*stmt
, gimple
*within_stmt
,
6400 class avail_exprs_stack
*avail_exprs_stack ATTRIBUTE_UNUSED
,
6403 /* First see if the conditional is in the hash table. */
6404 tree cached_lhs
= avail_exprs_stack
->lookup_avail_expr (stmt
, false, true);
6405 if (cached_lhs
&& is_gimple_min_invariant (cached_lhs
))
6408 vr_values
*vr_values
= x_vr_values
;
6409 if (gcond
*cond_stmt
= dyn_cast
<gcond
*> (stmt
))
6411 tree op0
= gimple_cond_lhs (cond_stmt
);
6412 op0
= lhs_of_dominating_assert (op0
, bb
, stmt
);
6414 tree op1
= gimple_cond_rhs (cond_stmt
);
6415 op1
= lhs_of_dominating_assert (op1
, bb
, stmt
);
6417 return vr_values
->vrp_evaluate_conditional (gimple_cond_code (cond_stmt
),
6418 op0
, op1
, within_stmt
);
6421 /* We simplify a switch statement by trying to determine which case label
6422 will be taken. If we are successful then we return the corresponding
6424 if (gswitch
*switch_stmt
= dyn_cast
<gswitch
*> (stmt
))
6426 tree op
= gimple_switch_index (switch_stmt
);
6427 if (TREE_CODE (op
) != SSA_NAME
)
6430 op
= lhs_of_dominating_assert (op
, bb
, stmt
);
6432 const value_range
*vr
= vr_values
->get_value_range (op
);
6433 if (vr
->undefined_p ()
6435 || vr
->symbolic_p ())
6438 if (vr
->kind () == VR_RANGE
)
6441 /* Get the range of labels that contain a part of the operand's
6443 find_case_label_range (switch_stmt
, vr
->min (), vr
->max (), &i
, &j
);
6445 /* Is there only one such label? */
6448 tree label
= gimple_switch_label (switch_stmt
, i
);
6450 /* The i'th label will be taken only if the value range of the
6451 operand is entirely within the bounds of this label. */
6452 if (CASE_HIGH (label
) != NULL_TREE
6453 ? (tree_int_cst_compare (CASE_LOW (label
), vr
->min ()) <= 0
6454 && tree_int_cst_compare (CASE_HIGH (label
),
6456 : (tree_int_cst_equal (CASE_LOW (label
), vr
->min ())
6457 && tree_int_cst_equal (vr
->min (), vr
->max ())))
6461 /* If there are no such labels then the default label will be
6464 return gimple_switch_label (switch_stmt
, 0);
6467 if (vr
->kind () == VR_ANTI_RANGE
)
6469 unsigned n
= gimple_switch_num_labels (switch_stmt
);
6470 tree min_label
= gimple_switch_label (switch_stmt
, 1);
6471 tree max_label
= gimple_switch_label (switch_stmt
, n
- 1);
6473 /* The default label will be taken only if the anti-range of the
6474 operand is entirely outside the bounds of all the (non-default)
6476 if (tree_int_cst_compare (vr
->min (), CASE_LOW (min_label
)) <= 0
6477 && (CASE_HIGH (max_label
) != NULL_TREE
6478 ? tree_int_cst_compare (vr
->max (),
6479 CASE_HIGH (max_label
)) >= 0
6480 : tree_int_cst_compare (vr
->max (),
6481 CASE_LOW (max_label
)) >= 0))
6482 return gimple_switch_label (switch_stmt
, 0);
6488 if (gassign
*assign_stmt
= dyn_cast
<gassign
*> (stmt
))
6490 tree lhs
= gimple_assign_lhs (assign_stmt
);
6491 if (TREE_CODE (lhs
) == SSA_NAME
6492 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs
))
6493 || POINTER_TYPE_P (TREE_TYPE (lhs
)))
6494 && stmt_interesting_for_vrp (stmt
))
6499 vr_values
->extract_range_from_stmt (stmt
, &dummy_e
,
6500 &dummy_tree
, &new_vr
);
6502 if (new_vr
.singleton_p (&singleton
))
6510 class vrp_dom_walker
: public dom_walker
6513 vrp_dom_walker (cdi_direction direction
,
6514 class const_and_copies
*const_and_copies
,
6515 class avail_exprs_stack
*avail_exprs_stack
)
6516 : dom_walker (direction
, REACHABLE_BLOCKS
),
6517 m_const_and_copies (const_and_copies
),
6518 m_avail_exprs_stack (avail_exprs_stack
),
6519 m_dummy_cond (NULL
) {}
6521 virtual edge
before_dom_children (basic_block
);
6522 virtual void after_dom_children (basic_block
);
6524 class vr_values
*vr_values
;
6527 class const_and_copies
*m_const_and_copies
;
6528 class avail_exprs_stack
*m_avail_exprs_stack
;
6530 gcond
*m_dummy_cond
;
6534 /* Called before processing dominator children of BB. We want to look
6535 at ASSERT_EXPRs and record information from them in the appropriate
6538 We could look at other statements here. It's not seen as likely
6539 to significantly increase the jump threads we discover. */
6542 vrp_dom_walker::before_dom_children (basic_block bb
)
6544 gimple_stmt_iterator gsi
;
6546 m_avail_exprs_stack
->push_marker ();
6547 m_const_and_copies
->push_marker ();
6548 for (gsi
= gsi_start_nondebug_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
6550 gimple
*stmt
= gsi_stmt (gsi
);
6551 if (gimple_assign_single_p (stmt
)
6552 && TREE_CODE (gimple_assign_rhs1 (stmt
)) == ASSERT_EXPR
)
6554 tree rhs1
= gimple_assign_rhs1 (stmt
);
6555 tree cond
= TREE_OPERAND (rhs1
, 1);
6556 tree inverted
= invert_truthvalue (cond
);
6557 vec
<cond_equivalence
> p
;
6559 record_conditions (&p
, cond
, inverted
);
6560 for (unsigned int i
= 0; i
< p
.length (); i
++)
6561 m_avail_exprs_stack
->record_cond (&p
[i
]);
6563 tree lhs
= gimple_assign_lhs (stmt
);
6564 m_const_and_copies
->record_const_or_copy (lhs
,
6565 TREE_OPERAND (rhs1
, 0));
6574 /* Called after processing dominator children of BB. This is where we
6575 actually call into the threader. */
6577 vrp_dom_walker::after_dom_children (basic_block bb
)
6580 m_dummy_cond
= gimple_build_cond (NE_EXPR
,
6581 integer_zero_node
, integer_zero_node
,
6584 x_vr_values
= vr_values
;
6585 thread_outgoing_edges (bb
, m_dummy_cond
, m_const_and_copies
,
6586 m_avail_exprs_stack
, NULL
,
6587 simplify_stmt_for_jump_threading
);
6590 m_avail_exprs_stack
->pop_to_marker ();
6591 m_const_and_copies
->pop_to_marker ();
6594 /* Blocks which have more than one predecessor and more than
6595 one successor present jump threading opportunities, i.e.,
6596 when the block is reached from a specific predecessor, we
6597 may be able to determine which of the outgoing edges will
6598 be traversed. When this optimization applies, we are able
6599 to avoid conditionals at runtime and we may expose secondary
6600 optimization opportunities.
6602 This routine is effectively a driver for the generic jump
6603 threading code. It basically just presents the generic code
6604 with edges that may be suitable for jump threading.
6606 Unlike DOM, we do not iterate VRP if jump threading was successful.
6607 While iterating may expose new opportunities for VRP, it is expected
6608 those opportunities would be very limited and the compile time cost
6609 to expose those opportunities would be significant.
6611 As jump threading opportunities are discovered, they are registered
6612 for later realization. */
6615 identify_jump_threads (class vr_values
*vr_values
)
6617 /* Ugh. When substituting values earlier in this pass we can
6618 wipe the dominance information. So rebuild the dominator
6619 information as we need it within the jump threading code. */
6620 calculate_dominance_info (CDI_DOMINATORS
);
6622 /* We do not allow VRP information to be used for jump threading
6623 across a back edge in the CFG. Otherwise it becomes too
6624 difficult to avoid eliminating loop exit tests. Of course
6625 EDGE_DFS_BACK is not accurate at this time so we have to
6627 mark_dfs_back_edges ();
6629 /* Allocate our unwinder stack to unwind any temporary equivalences
6630 that might be recorded. */
6631 const_and_copies
*equiv_stack
= new const_and_copies ();
6633 hash_table
<expr_elt_hasher
> *avail_exprs
6634 = new hash_table
<expr_elt_hasher
> (1024);
6635 avail_exprs_stack
*avail_exprs_stack
6636 = new class avail_exprs_stack (avail_exprs
);
6638 vrp_dom_walker
walker (CDI_DOMINATORS
, equiv_stack
, avail_exprs_stack
);
6639 walker
.vr_values
= vr_values
;
6640 walker
.walk (cfun
->cfg
->x_entry_block_ptr
);
6642 /* We do not actually update the CFG or SSA graphs at this point as
6643 ASSERT_EXPRs are still in the IL and cfg cleanup code does not yet
6644 handle ASSERT_EXPRs gracefully. */
6647 delete avail_exprs_stack
;
6650 /* Traverse all the blocks folding conditionals with known ranges. */
6653 vrp_prop::vrp_finalize (bool warn_array_bounds_p
)
6657 /* We have completed propagating through the lattice. */
6658 vr_values
.set_lattice_propagation_complete ();
6662 fprintf (dump_file
, "\nValue ranges after VRP:\n\n");
6663 vr_values
.dump_all_value_ranges (dump_file
);
6664 fprintf (dump_file
, "\n");
6667 /* Set value range to non pointer SSA_NAMEs. */
6668 for (i
= 0; i
< num_ssa_names
; i
++)
6670 tree name
= ssa_name (i
);
6674 const value_range
*vr
= get_value_range (name
);
6675 if (!name
|| !vr
->constant_p ())
6678 if (POINTER_TYPE_P (TREE_TYPE (name
))
6679 && range_includes_zero_p (vr
) == 0)
6680 set_ptr_nonnull (name
);
6681 else if (!POINTER_TYPE_P (TREE_TYPE (name
)))
6682 set_range_info (name
, *vr
);
6685 /* If we're checking array refs, we want to merge information on
6686 the executability of each edge between vrp_folder and the
6687 check_array_bounds_dom_walker: each can clear the
6688 EDGE_EXECUTABLE flag on edges, in different ways.
6690 Hence, if we're going to call check_all_array_refs, set
6691 the flag on every edge now, rather than in
6692 check_array_bounds_dom_walker's ctor; vrp_folder may clear
6693 it from some edges. */
6694 if (warn_array_bounds
&& warn_array_bounds_p
)
6695 set_all_edges_as_executable (cfun
);
6697 class vrp_folder vrp_folder
;
6698 vrp_folder
.vr_values
= &vr_values
;
6699 vrp_folder
.substitute_and_fold ();
6701 if (warn_array_bounds
&& warn_array_bounds_p
)
6702 check_all_array_refs ();
6705 /* Main entry point to VRP (Value Range Propagation). This pass is
6706 loosely based on J. R. C. Patterson, ``Accurate Static Branch
6707 Prediction by Value Range Propagation,'' in SIGPLAN Conference on
6708 Programming Language Design and Implementation, pp. 67-78, 1995.
6709 Also available at http://citeseer.ist.psu.edu/patterson95accurate.html
6711 This is essentially an SSA-CCP pass modified to deal with ranges
6712 instead of constants.
6714 While propagating ranges, we may find that two or more SSA name
6715 have equivalent, though distinct ranges. For instance,
6718 2 p_4 = ASSERT_EXPR <p_3, p_3 != 0>
6720 4 p_5 = ASSERT_EXPR <p_4, p_4 == q_2>;
6724 In the code above, pointer p_5 has range [q_2, q_2], but from the
6725 code we can also determine that p_5 cannot be NULL and, if q_2 had
6726 a non-varying range, p_5's range should also be compatible with it.
6728 These equivalences are created by two expressions: ASSERT_EXPR and
6729 copy operations. Since p_5 is an assertion on p_4, and p_4 was the
6730 result of another assertion, then we can use the fact that p_5 and
6731 p_4 are equivalent when evaluating p_5's range.
6733 Together with value ranges, we also propagate these equivalences
6734 between names so that we can take advantage of information from
6735 multiple ranges when doing final replacement. Note that this
6736 equivalency relation is transitive but not symmetric.
6738 In the example above, p_5 is equivalent to p_4, q_2 and p_3, but we
6739 cannot assert that q_2 is equivalent to p_5 because q_2 may be used
6740 in contexts where that assertion does not hold (e.g., in line 6).
6742 TODO, the main difference between this pass and Patterson's is that
6743 we do not propagate edge probabilities. We only compute whether
6744 edges can be taken or not. That is, instead of having a spectrum
6745 of jump probabilities between 0 and 1, we only deal with 0, 1 and
6746 DON'T KNOW. In the future, it may be worthwhile to propagate
6747 probabilities to aid branch prediction. */
6750 execute_vrp (bool warn_array_bounds_p
)
6753 loop_optimizer_init (LOOPS_NORMAL
| LOOPS_HAVE_RECORDED_EXITS
);
6754 rewrite_into_loop_closed_ssa (NULL
, TODO_update_ssa
);
6757 /* ??? This ends up using stale EDGE_DFS_BACK for liveness computation.
6758 Inserting assertions may split edges which will invalidate
6760 insert_range_assertions ();
6762 threadedge_initialize_values ();
6764 /* For visiting PHI nodes we need EDGE_DFS_BACK computed. */
6765 mark_dfs_back_edges ();
6767 class vrp_prop vrp_prop
;
6768 vrp_prop
.vrp_initialize ();
6769 vrp_prop
.ssa_propagate ();
6770 vrp_prop
.vrp_finalize (warn_array_bounds_p
);
6772 /* We must identify jump threading opportunities before we release
6773 the datastructures built by VRP. */
6774 identify_jump_threads (&vrp_prop
.vr_values
);
6776 /* A comparison of an SSA_NAME against a constant where the SSA_NAME
6777 was set by a type conversion can often be rewritten to use the
6778 RHS of the type conversion.
6780 However, doing so inhibits jump threading through the comparison.
6781 So that transformation is not performed until after jump threading
6784 FOR_EACH_BB_FN (bb
, cfun
)
6786 gimple
*last
= last_stmt (bb
);
6787 if (last
&& gimple_code (last
) == GIMPLE_COND
)
6788 vrp_prop
.vr_values
.simplify_cond_using_ranges_2 (as_a
<gcond
*> (last
));
6791 free_numbers_of_iterations_estimates (cfun
);
6793 /* ASSERT_EXPRs must be removed before finalizing jump threads
6794 as finalizing jump threads calls the CFG cleanup code which
6795 does not properly handle ASSERT_EXPRs. */
6796 remove_range_assertions ();
6798 /* If we exposed any new variables, go ahead and put them into
6799 SSA form now, before we handle jump threading. This simplifies
6800 interactions between rewriting of _DECL nodes into SSA form
6801 and rewriting SSA_NAME nodes into SSA form after block
6802 duplication and CFG manipulation. */
6803 update_ssa (TODO_update_ssa
);
6805 /* We identified all the jump threading opportunities earlier, but could
6806 not transform the CFG at that time. This routine transforms the
6807 CFG and arranges for the dominator tree to be rebuilt if necessary.
6809 Note the SSA graph update will occur during the normal TODO
6810 processing by the pass manager. */
6811 thread_through_all_blocks (false);
6813 vrp_prop
.vr_values
.cleanup_edges_and_switches ();
6814 threadedge_finalize_values ();
6817 loop_optimizer_finalize ();
6823 const pass_data pass_data_vrp
=
6825 GIMPLE_PASS
, /* type */
6827 OPTGROUP_NONE
, /* optinfo_flags */
6828 TV_TREE_VRP
, /* tv_id */
6829 PROP_ssa
, /* properties_required */
6830 0, /* properties_provided */
6831 0, /* properties_destroyed */
6832 0, /* todo_flags_start */
6833 ( TODO_cleanup_cfg
| TODO_update_ssa
), /* todo_flags_finish */
6836 class pass_vrp
: public gimple_opt_pass
6839 pass_vrp (gcc::context
*ctxt
)
6840 : gimple_opt_pass (pass_data_vrp
, ctxt
), warn_array_bounds_p (false)
6843 /* opt_pass methods: */
6844 opt_pass
* clone () { return new pass_vrp (m_ctxt
); }
6845 void set_pass_param (unsigned int n
, bool param
)
6847 gcc_assert (n
== 0);
6848 warn_array_bounds_p
= param
;
6850 virtual bool gate (function
*) { return flag_tree_vrp
!= 0; }
6851 virtual unsigned int execute (function
*)
6852 { return execute_vrp (warn_array_bounds_p
); }
6855 bool warn_array_bounds_p
;
6856 }; // class pass_vrp
6861 make_pass_vrp (gcc::context
*ctxt
)
6863 return new pass_vrp (ctxt
);
6867 /* Worker for determine_value_range. */
6870 determine_value_range_1 (value_range_base
*vr
, tree expr
)
6872 if (BINARY_CLASS_P (expr
))
6874 value_range_base vr0
, vr1
;
6875 determine_value_range_1 (&vr0
, TREE_OPERAND (expr
, 0));
6876 determine_value_range_1 (&vr1
, TREE_OPERAND (expr
, 1));
6877 extract_range_from_binary_expr (vr
, TREE_CODE (expr
), TREE_TYPE (expr
),
6880 else if (UNARY_CLASS_P (expr
))
6882 value_range_base vr0
;
6883 determine_value_range_1 (&vr0
, TREE_OPERAND (expr
, 0));
6884 extract_range_from_unary_expr (vr
, TREE_CODE (expr
), TREE_TYPE (expr
),
6885 &vr0
, TREE_TYPE (TREE_OPERAND (expr
, 0)));
6887 else if (TREE_CODE (expr
) == INTEGER_CST
)
6891 value_range_kind kind
;
6893 /* For SSA names try to extract range info computed by VRP. Otherwise
6894 fall back to varying. */
6895 if (TREE_CODE (expr
) == SSA_NAME
6896 && INTEGRAL_TYPE_P (TREE_TYPE (expr
))
6897 && (kind
= get_range_info (expr
, &min
, &max
)) != VR_VARYING
)
6898 vr
->set (kind
, wide_int_to_tree (TREE_TYPE (expr
), min
),
6899 wide_int_to_tree (TREE_TYPE (expr
), max
));
6905 /* Compute a value-range for EXPR and set it in *MIN and *MAX. Return
6906 the determined range type. */
6909 determine_value_range (tree expr
, wide_int
*min
, wide_int
*max
)
6911 value_range_base vr
;
6912 determine_value_range_1 (&vr
, expr
);
6913 if (vr
.constant_p ())
6915 *min
= wi::to_wide (vr
.min ());
6916 *max
= wi::to_wide (vr
.max ());