&& bitmap_bit_p (live[e->dest->index], SSA_NAME_VERSION (name)));
}
-/* Local functions. */
-static int compare_values (tree val1, tree val2);
-static int compare_values_warnv (tree val1, tree val2, bool *);
-
/* Location information for ASSERT_EXPRs. Each instance of this
structure describes an ASSERT_EXPR for an SSA name. Since a single
SSA name may have more than one assertion associated with it, these
ASSERT_EXPRs for SSA name N_I should be inserted. */
static assert_locus **asserts_for;
-struct switch_update {
- gswitch *stmt;
- tree vec;
-};
-
-static vec<edge> to_remove_edges;
-static vec<switch_update> to_update_switch_stmts;
+vec<edge> to_remove_edges;
+vec<switch_update> to_update_switch_stmts;
/* Return the maximum value for TYPE. */
-static inline tree
+tree
vrp_val_max (const_tree type)
{
if (!INTEGRAL_TYPE_P (type))
/* Return the minimum value for TYPE. */
-static inline tree
+tree
vrp_val_min (const_tree type)
{
if (!INTEGRAL_TYPE_P (type))
C typedefs and Ada subtypes can produce types whose TYPE_MAX_VALUE
is not == to the integer constant with the same value in the type. */
-static inline bool
+bool
vrp_val_is_max (const_tree val)
{
tree type_max = vrp_val_max (TREE_TYPE (val));
/* Return whether VAL is equal to the minimum value of its type. */
-static inline bool
+bool
vrp_val_is_min (const_tree val)
{
tree type_min = vrp_val_min (TREE_TYPE (val));
/* Set value range VR to {T, MIN, MAX, EQUIV}. */
-static void
+void
set_value_range (value_range *vr, enum value_range_type t, tree min,
tree max, bitmap equiv)
{
This routine exists to ease canonicalization in the case where we
extract ranges from var + CST op limit. */
-static void
+void
set_and_canonicalize_value_range (value_range *vr, enum value_range_type t,
tree min, tree max, bitmap equiv)
{
/* Copy value range FROM into value range TO. */
-static inline void
+void
copy_value_range (value_range *to, value_range *from)
{
set_value_range (to, from->type, from->min, from->max, from->equiv);
with values we get from statements, and exists to clear the
TREE_OVERFLOW flag. */
-static inline void
+void
set_value_range_to_value (value_range *vr, tree val, bitmap equiv)
{
gcc_assert (is_gimple_min_invariant (val));
set_value_range (vr, VR_RANGE, val, val, equiv);
}
-/* Set value range VR to a non-negative range of type TYPE. */
-
-static inline void
-set_value_range_to_nonnegative (value_range *vr, tree type)
-{
- tree zero = build_int_cst (type, 0);
- set_value_range (vr, VR_RANGE, zero, vrp_val_max (type), vr->equiv);
-}
-
/* Set value range VR to a non-NULL range of type TYPE. */
-static inline void
+void
set_value_range_to_nonnull (value_range *vr, tree type)
{
tree zero = build_int_cst (type, 0);
/* Set value range VR to a NULL range of type TYPE. */
-static inline void
+void
set_value_range_to_null (value_range *vr, tree type)
{
set_value_range_to_value (vr, build_int_cst (type, 0), vr->equiv);
}
-/* Set value range VR to a range of a truthvalue of type TYPE. */
-
-static inline void
-set_value_range_to_truthvalue (value_range *vr, tree type)
-{
- if (TYPE_PRECISION (type) == 1)
- set_value_range_to_varying (vr);
- else
- set_value_range (vr, VR_RANGE,
- build_int_cst (type, 0), build_int_cst (type, 1),
- vr->equiv);
-}
-
-
/* If abs (min) < abs (max), set VR to [-max, max], if
abs (min) >= abs (max), set VR to [-min, min]. */
set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL);
}
-
-/* Return value range information for VAR.
-
- If we have no values ranges recorded (ie, VRP is not running), then
- return NULL. Otherwise create an empty range if none existed for VAR. */
-
-value_range *
-vr_values::get_value_range (const_tree var)
-{
- static const value_range vr_const_varying
- = { VR_VARYING, NULL_TREE, NULL_TREE, NULL };
- value_range *vr;
- tree sym;
- unsigned ver = SSA_NAME_VERSION (var);
-
- /* If we have no recorded ranges, then return NULL. */
- if (! vr_value)
- return NULL;
-
- /* If we query the range for a new SSA name return an unmodifiable VARYING.
- We should get here at most from the substitute-and-fold stage which
- will never try to change values. */
- if (ver >= num_vr_values)
- return CONST_CAST (value_range *, &vr_const_varying);
-
- vr = vr_value[ver];
- if (vr)
- return vr;
-
- /* After propagation finished do not allocate new value-ranges. */
- if (values_propagated)
- return CONST_CAST (value_range *, &vr_const_varying);
-
- /* Create a default value range. */
- vr_value[ver] = vr = vrp_value_range_pool.allocate ();
- memset (vr, 0, sizeof (*vr));
-
- /* Defer allocating the equivalence set. */
- vr->equiv = NULL;
-
- /* If VAR is a default definition of a parameter, the variable can
- take any value in VAR's type. */
- if (SSA_NAME_IS_DEFAULT_DEF (var))
- {
- sym = SSA_NAME_VAR (var);
- if (TREE_CODE (sym) == PARM_DECL)
- {
- /* Try to use the "nonnull" attribute to create ~[0, 0]
- anti-ranges for pointers. Note that this is only valid with
- default definitions of PARM_DECLs. */
- if (POINTER_TYPE_P (TREE_TYPE (sym))
- && (nonnull_arg_p (sym)
- || get_ptr_nonnull (var)))
- set_value_range_to_nonnull (vr, TREE_TYPE (sym));
- else if (INTEGRAL_TYPE_P (TREE_TYPE (sym)))
- {
- wide_int min, max;
- value_range_type rtype = get_range_info (var, &min, &max);
- if (rtype == VR_RANGE || rtype == VR_ANTI_RANGE)
- set_value_range (vr, rtype,
- wide_int_to_tree (TREE_TYPE (var), min),
- wide_int_to_tree (TREE_TYPE (var), max),
- NULL);
- else
- set_value_range_to_varying (vr);
- }
- else
- set_value_range_to_varying (vr);
- }
- else if (TREE_CODE (sym) == RESULT_DECL
- && DECL_BY_REFERENCE (sym))
- set_value_range_to_nonnull (vr, TREE_TYPE (sym));
- }
-
- return vr;
-}
-
-/* Set value-ranges of all SSA names defined by STMT to varying. */
-
-void
-vr_values::set_defs_to_varying (gimple *stmt)
-{
- ssa_op_iter i;
- tree def;
- FOR_EACH_SSA_TREE_OPERAND (def, stmt, i, SSA_OP_DEF)
- {
- value_range *vr = get_value_range (def);
- /* Avoid writing to vr_const_varying get_value_range may return. */
- if (vr->type != VR_VARYING)
- set_value_range_to_varying (vr);
- }
-}
-
-
/* Return true, if VAL1 and VAL2 are equal values for VRP purposes. */
bool
/* Return true, if the bitmaps B1 and B2 are equal. */
-static inline bool
+bool
vrp_bitmap_equal_p (const_bitmap b1, const_bitmap b2)
{
return (b1 == b2
&& bitmap_equal_p (b1, b2)));
}
-/* Update the value range and equivalence set for variable VAR to
- NEW_VR. Return true if NEW_VR is different from VAR's previous
- value.
-
- NOTE: This function assumes that NEW_VR is a temporary value range
- object created for the sole purpose of updating VAR's range. The
- storage used by the equivalence set from NEW_VR will be freed by
- this function. Do not call update_value_range when NEW_VR
- is the range object associated with another SSA name. */
-
-bool
-vr_values::update_value_range (const_tree var, value_range *new_vr)
-{
- value_range *old_vr;
- bool is_new;
-
- /* If there is a value-range on the SSA name from earlier analysis
- factor that in. */
- if (INTEGRAL_TYPE_P (TREE_TYPE (var)))
- {
- wide_int min, max;
- value_range_type rtype = get_range_info (var, &min, &max);
- if (rtype == VR_RANGE || rtype == VR_ANTI_RANGE)
- {
- tree nr_min, nr_max;
- nr_min = wide_int_to_tree (TREE_TYPE (var), min);
- nr_max = wide_int_to_tree (TREE_TYPE (var), max);
- value_range nr = VR_INITIALIZER;
- set_and_canonicalize_value_range (&nr, rtype, nr_min, nr_max, NULL);
- vrp_intersect_ranges (new_vr, &nr);
- }
- }
-
- /* Update the value range, if necessary. */
- old_vr = get_value_range (var);
- is_new = old_vr->type != new_vr->type
- || !vrp_operand_equal_p (old_vr->min, new_vr->min)
- || !vrp_operand_equal_p (old_vr->max, new_vr->max)
- || !vrp_bitmap_equal_p (old_vr->equiv, new_vr->equiv);
-
- if (is_new)
- {
- /* Do not allow transitions up the lattice. The following
- is slightly more awkward than just new_vr->type < old_vr->type
- because VR_RANGE and VR_ANTI_RANGE need to be considered
- the same. We may not have is_new when transitioning to
- UNDEFINED. If old_vr->type is VARYING, we shouldn't be
- called. */
- if (new_vr->type == VR_UNDEFINED)
- {
- BITMAP_FREE (new_vr->equiv);
- set_value_range_to_varying (old_vr);
- set_value_range_to_varying (new_vr);
- return true;
- }
- else
- set_value_range (old_vr, new_vr->type, new_vr->min, new_vr->max,
- new_vr->equiv);
- }
-
- BITMAP_FREE (new_vr->equiv);
-
- return is_new;
-}
-
-
-/* Add VAR and VAR's equivalence set to EQUIV. This is the central
- point where equivalence processing can be turned on/off. */
-
-void
-vr_values::add_equivalence (bitmap *equiv, const_tree var)
-{
- unsigned ver = SSA_NAME_VERSION (var);
- value_range *vr = get_value_range (var);
-
- if (*equiv == NULL)
- *equiv = BITMAP_ALLOC (&vrp_equiv_obstack);
- bitmap_set_bit (*equiv, ver);
- if (vr && vr->equiv)
- bitmap_ior_into (*equiv, vr->equiv);
-}
-
-
/* Return true if VR is ~[0, 0]. */
-static inline bool
+bool
range_is_nonnull (value_range *vr)
{
return vr->type == VR_ANTI_RANGE
/* Return true if max and min of VR are INTEGER_CST. It's not necessary
a singleton. */
-static inline bool
+bool
range_int_cst_p (value_range *vr)
{
return (vr->type == VR_RANGE
/* Return true if VR is a INTEGER_CST singleton. */
-static inline bool
+bool
range_int_cst_singleton_p (value_range *vr)
{
return (range_int_cst_p (vr)
/* Return true if value range VR involves at least one symbol. */
-static inline bool
+bool
symbolic_range_p (value_range *vr)
{
return (!is_gimple_min_invariant (vr->min)
otherwise. We only handle additive operations and set NEG to true if the
symbol is negated and INV to the invariant part, if any. */
-static tree
+tree
get_single_symbol (tree t, bool *neg, tree *inv)
{
bool neg_;
return build2 (pointer_p ? POINTER_PLUS_EXPR : PLUS_EXPR, type, t, inv);
}
-/* Return true if value range VR involves exactly one symbol SYM. */
-
-static bool
-symbolic_range_based_on_p (value_range *vr, const_tree sym)
-{
- bool neg, min_has_symbol, max_has_symbol;
- tree inv;
-
- if (is_gimple_min_invariant (vr->min))
- min_has_symbol = false;
- else if (get_single_symbol (vr->min, &neg, &inv) == sym)
- min_has_symbol = true;
- else
- return false;
-
- if (is_gimple_min_invariant (vr->max))
- max_has_symbol = false;
- else if (get_single_symbol (vr->max, &neg, &inv) == sym)
- max_has_symbol = true;
- else
- return false;
-
- return (min_has_symbol || max_has_symbol);
-}
-
-/* Return true if the result of assignment STMT is know to be non-zero. */
-
-static bool
-gimple_assign_nonzero_p (gimple *stmt)
-{
- enum tree_code code = gimple_assign_rhs_code (stmt);
- bool strict_overflow_p;
- switch (get_gimple_rhs_class (code))
- {
- case GIMPLE_UNARY_RHS:
- return tree_unary_nonzero_warnv_p (gimple_assign_rhs_code (stmt),
- gimple_expr_type (stmt),
- gimple_assign_rhs1 (stmt),
- &strict_overflow_p);
- case GIMPLE_BINARY_RHS:
- return tree_binary_nonzero_warnv_p (gimple_assign_rhs_code (stmt),
- gimple_expr_type (stmt),
- gimple_assign_rhs1 (stmt),
- gimple_assign_rhs2 (stmt),
- &strict_overflow_p);
- case GIMPLE_TERNARY_RHS:
- return false;
- case GIMPLE_SINGLE_RHS:
- return tree_single_nonzero_warnv_p (gimple_assign_rhs1 (stmt),
- &strict_overflow_p);
- case GIMPLE_INVALID_RHS:
- gcc_unreachable ();
- default:
- gcc_unreachable ();
- }
-}
-
-/* Return true if STMT is known to compute a non-zero value. */
-
-static bool
-gimple_stmt_nonzero_p (gimple *stmt)
-{
- switch (gimple_code (stmt))
- {
- case GIMPLE_ASSIGN:
- return gimple_assign_nonzero_p (stmt);
- case GIMPLE_CALL:
- {
- tree fndecl = gimple_call_fndecl (stmt);
- if (!fndecl) return false;
- if (flag_delete_null_pointer_checks && !flag_check_new
- && DECL_IS_OPERATOR_NEW (fndecl)
- && !TREE_NOTHROW (fndecl))
- return true;
- /* References are always non-NULL. */
- if (flag_delete_null_pointer_checks
- && TREE_CODE (TREE_TYPE (fndecl)) == REFERENCE_TYPE)
- return true;
- if (flag_delete_null_pointer_checks &&
- lookup_attribute ("returns_nonnull",
- TYPE_ATTRIBUTES (gimple_call_fntype (stmt))))
- return true;
-
- gcall *call_stmt = as_a<gcall *> (stmt);
- unsigned rf = gimple_call_return_flags (call_stmt);
- if (rf & ERF_RETURNS_ARG)
- {
- unsigned argnum = rf & ERF_RETURN_ARG_MASK;
- if (argnum < gimple_call_num_args (call_stmt))
- {
- tree arg = gimple_call_arg (call_stmt, argnum);
- if (SSA_VAR_P (arg)
- && infer_nonnull_range_by_attribute (stmt, arg))
- return true;
- }
- }
- return gimple_alloca_call_p (stmt);
- }
- default:
- gcc_unreachable ();
- }
-}
-
-/* Like tree_expr_nonzero_p, but this function uses value ranges
- obtained so far. */
-
-bool
-vr_values::vrp_stmt_computes_nonzero (gimple *stmt)
-{
- if (gimple_stmt_nonzero_p (stmt))
- return true;
-
- /* If we have an expression of the form &X->a, then the expression
- is nonnull if X is nonnull. */
- if (is_gimple_assign (stmt)
- && gimple_assign_rhs_code (stmt) == ADDR_EXPR)
- {
- tree expr = gimple_assign_rhs1 (stmt);
- tree base = get_base_address (TREE_OPERAND (expr, 0));
-
- if (base != NULL_TREE
- && TREE_CODE (base) == MEM_REF
- && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME)
- {
- value_range *vr = get_value_range (TREE_OPERAND (base, 0));
- if (range_is_nonnull (vr))
- return true;
- }
- }
-
- return false;
-}
-
-/* Returns true if EXPR is a valid value (as expected by compare_values) --
- a gimple invariant, or SSA_NAME +- CST. */
-
-static bool
-valid_value_p (tree expr)
-{
- if (TREE_CODE (expr) == SSA_NAME)
- return true;
-
- if (TREE_CODE (expr) == PLUS_EXPR
- || TREE_CODE (expr) == MINUS_EXPR)
- return (TREE_CODE (TREE_OPERAND (expr, 0)) == SSA_NAME
- && TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST);
-
- return is_gimple_min_invariant (expr);
-}
-
/* Return
1 if VAL < VAL2
0 if !(VAL < VAL2)
-2 if those are incomparable. */
-static inline int
+int
operand_less_p (tree val, tree val2)
{
/* LT is folded faster than GE and others. Inline the common case. */
true if the return value is only valid if we assume that signed
overflow is undefined. */
-static int
+int
compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p)
{
if (val1 == val2)
/* Compare values like compare_values_warnv. */
-static int
+int
compare_values (tree val1, tree val2)
{
bool sop;
Benchmark compile/20001226-1.c compilation time after changing this
function. */
-static inline int
+int
value_inside_range (tree val, tree min, tree max)
{
int cmp1, cmp2;
/* If *VR has a value rante that is a single constant value return that,
otherwise return NULL_TREE. */
-static tree
+tree
value_range_constant_singleton (value_range *vr)
{
if (vr->type == VR_RANGE
return NULL_TREE;
}
-/* If OP has a value range with a single constant value return that,
- otherwise return NULL_TREE. This returns OP itself if OP is a
- constant. */
-
-tree
-vr_values::op_with_constant_singleton_value_range (tree op)
-{
- if (is_gimple_min_invariant (op))
- return op;
-
- if (TREE_CODE (op) != SSA_NAME)
- return NULL_TREE;
-
- return value_range_constant_singleton (get_value_range (op));
-}
+/* Wrapper around int_const_binop. Return true if we can compute the
+ result; i.e. if the operation doesn't overflow or if the overflow is
+ undefined. In the latter case (if the operation overflows and
+ overflow is undefined), then adjust the result to be -INF or +INF
+ depending on CODE, VAL1 and VAL2. Return the value in *RES.
-/* Return true if op is in a boolean [0, 1] value-range. */
+ Return false for division by zero, for which the result is
+ indeterminate. */
-bool
-vr_values::op_with_boolean_value_range_p (tree op)
+static bool
+vrp_int_const_binop (enum tree_code code, tree val1, tree val2, wide_int *res)
{
- value_range *vr;
+ bool overflow = false;
+ signop sign = TYPE_SIGN (TREE_TYPE (val1));
- if (TYPE_PRECISION (TREE_TYPE (op)) == 1)
- return true;
+ switch (code)
+ {
+ case RSHIFT_EXPR:
+ case LSHIFT_EXPR:
+ {
+ wide_int wval2 = wi::to_wide (val2, TYPE_PRECISION (TREE_TYPE (val1)));
+ if (wi::neg_p (wval2))
+ {
+ wval2 = -wval2;
+ if (code == RSHIFT_EXPR)
+ code = LSHIFT_EXPR;
+ else
+ code = RSHIFT_EXPR;
+ }
- if (integer_zerop (op)
- || integer_onep (op))
- return true;
+ if (code == RSHIFT_EXPR)
+ /* It's unclear from the C standard whether shifts can overflow.
+ The following code ignores overflow; perhaps a C standard
+ interpretation ruling is needed. */
+ *res = wi::rshift (wi::to_wide (val1), wval2, sign);
+ else
+ *res = wi::lshift (wi::to_wide (val1), wval2);
+ break;
+ }
- if (TREE_CODE (op) != SSA_NAME)
- return false;
+ case MULT_EXPR:
+ *res = wi::mul (wi::to_wide (val1),
+ wi::to_wide (val2), sign, &overflow);
+ break;
- vr = get_value_range (op);
- return (vr->type == VR_RANGE
- && integer_zerop (vr->min)
- && integer_onep (vr->max));
-}
+ case TRUNC_DIV_EXPR:
+ case EXACT_DIV_EXPR:
+ if (val2 == 0)
+ return false;
+ else
+ *res = wi::div_trunc (wi::to_wide (val1),
+ wi::to_wide (val2), sign, &overflow);
+ break;
-/* Extract value range information for VAR when (OP COND_CODE LIMIT) is
- true and store it in *VR_P. */
+ case FLOOR_DIV_EXPR:
+ if (val2 == 0)
+ return false;
+ *res = wi::div_floor (wi::to_wide (val1),
+ wi::to_wide (val2), sign, &overflow);
+ break;
-void
-vr_values::extract_range_for_var_from_comparison_expr (tree var,
- enum tree_code cond_code,
- tree op, tree limit,
- value_range *vr_p)
-{
- tree min, max, type;
- value_range *limit_vr;
- type = TREE_TYPE (var);
- gcc_assert (limit != var);
+ case CEIL_DIV_EXPR:
+ if (val2 == 0)
+ return false;
+ *res = wi::div_ceil (wi::to_wide (val1),
+ wi::to_wide (val2), sign, &overflow);
+ break;
- /* For pointer arithmetic, we only keep track of pointer equality
- and inequality. */
- if (POINTER_TYPE_P (type) && cond_code != NE_EXPR && cond_code != EQ_EXPR)
- {
- set_value_range_to_varying (vr_p);
- return;
- }
-
- /* If LIMIT is another SSA name and LIMIT has a range of its own,
- try to use LIMIT's range to avoid creating symbolic ranges
- unnecessarily. */
- limit_vr = (TREE_CODE (limit) == SSA_NAME) ? get_value_range (limit) : NULL;
-
- /* LIMIT's range is only interesting if it has any useful information. */
- if (! limit_vr
- || limit_vr->type == VR_UNDEFINED
- || limit_vr->type == VR_VARYING
- || (symbolic_range_p (limit_vr)
- && ! (limit_vr->type == VR_RANGE
- && (limit_vr->min == limit_vr->max
- || operand_equal_p (limit_vr->min, limit_vr->max, 0)))))
- limit_vr = NULL;
-
- /* Initially, the new range has the same set of equivalences of
- VAR's range. This will be revised before returning the final
- value. Since assertions may be chained via mutually exclusive
- predicates, we will need to trim the set of equivalences before
- we are done. */
- gcc_assert (vr_p->equiv == NULL);
- add_equivalence (&vr_p->equiv, var);
-
- /* Extract a new range based on the asserted comparison for VAR and
- LIMIT's value range. Notice that if LIMIT has an anti-range, we
- will only use it for equality comparisons (EQ_EXPR). For any
- other kind of assertion, we cannot derive a range from LIMIT's
- anti-range that can be used to describe the new range. For
- instance, ASSERT_EXPR <x_2, x_2 <= b_4>. If b_4 is ~[2, 10],
- then b_4 takes on the ranges [-INF, 1] and [11, +INF]. There is
- no single range for x_2 that could describe LE_EXPR, so we might
- as well build the range [b_4, +INF] for it.
- One special case we handle is extracting a range from a
- range test encoded as (unsigned)var + CST <= limit. */
- if (TREE_CODE (op) == NOP_EXPR
- || TREE_CODE (op) == PLUS_EXPR)
- {
- if (TREE_CODE (op) == PLUS_EXPR)
- {
- min = fold_build1 (NEGATE_EXPR, TREE_TYPE (TREE_OPERAND (op, 1)),
- TREE_OPERAND (op, 1));
- max = int_const_binop (PLUS_EXPR, limit, min);
- op = TREE_OPERAND (op, 0);
- }
- else
- {
- min = build_int_cst (TREE_TYPE (var), 0);
- max = limit;
- }
-
- /* Make sure to not set TREE_OVERFLOW on the final type
- conversion. We are willingly interpreting large positive
- unsigned values as negative signed values here. */
- min = force_fit_type (TREE_TYPE (var), wi::to_widest (min), 0, false);
- max = force_fit_type (TREE_TYPE (var), wi::to_widest (max), 0, false);
-
- /* We can transform a max, min range to an anti-range or
- vice-versa. Use set_and_canonicalize_value_range which does
- this for us. */
- if (cond_code == LE_EXPR)
- set_and_canonicalize_value_range (vr_p, VR_RANGE,
- min, max, vr_p->equiv);
- else if (cond_code == GT_EXPR)
- set_and_canonicalize_value_range (vr_p, VR_ANTI_RANGE,
- min, max, vr_p->equiv);
- else
- gcc_unreachable ();
- }
- else if (cond_code == EQ_EXPR)
- {
- enum value_range_type range_type;
-
- if (limit_vr)
- {
- range_type = limit_vr->type;
- min = limit_vr->min;
- max = limit_vr->max;
- }
- else
- {
- range_type = VR_RANGE;
- min = limit;
- max = limit;
- }
-
- set_value_range (vr_p, range_type, min, max, vr_p->equiv);
-
- /* When asserting the equality VAR == LIMIT and LIMIT is another
- SSA name, the new range will also inherit the equivalence set
- from LIMIT. */
- if (TREE_CODE (limit) == SSA_NAME)
- add_equivalence (&vr_p->equiv, limit);
- }
- else if (cond_code == NE_EXPR)
- {
- /* As described above, when LIMIT's range is an anti-range and
- this assertion is an inequality (NE_EXPR), then we cannot
- derive anything from the anti-range. For instance, if
- LIMIT's range was ~[0, 0], the assertion 'VAR != LIMIT' does
- not imply that VAR's range is [0, 0]. So, in the case of
- anti-ranges, we just assert the inequality using LIMIT and
- not its anti-range.
-
- If LIMIT_VR is a range, we can only use it to build a new
- anti-range if LIMIT_VR is a single-valued range. For
- instance, if LIMIT_VR is [0, 1], the predicate
- VAR != [0, 1] does not mean that VAR's range is ~[0, 1].
- Rather, it means that for value 0 VAR should be ~[0, 0]
- and for value 1, VAR should be ~[1, 1]. We cannot
- represent these ranges.
-
- The only situation in which we can build a valid
- anti-range is when LIMIT_VR is a single-valued range
- (i.e., LIMIT_VR->MIN == LIMIT_VR->MAX). In that case,
- build the anti-range ~[LIMIT_VR->MIN, LIMIT_VR->MAX]. */
- if (limit_vr
- && limit_vr->type == VR_RANGE
- && compare_values (limit_vr->min, limit_vr->max) == 0)
- {
- min = limit_vr->min;
- max = limit_vr->max;
- }
- else
- {
- /* In any other case, we cannot use LIMIT's range to build a
- valid anti-range. */
- min = max = limit;
- }
-
- /* If MIN and MAX cover the whole range for their type, then
- just use the original LIMIT. */
- if (INTEGRAL_TYPE_P (type)
- && vrp_val_is_min (min)
- && vrp_val_is_max (max))
- min = max = limit;
-
- set_and_canonicalize_value_range (vr_p, VR_ANTI_RANGE,
- min, max, vr_p->equiv);
- }
- else if (cond_code == LE_EXPR || cond_code == LT_EXPR)
- {
- min = TYPE_MIN_VALUE (type);
-
- if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE)
- max = limit;
- else
- {
- /* If LIMIT_VR is of the form [N1, N2], we need to build the
- range [MIN, N2] for LE_EXPR and [MIN, N2 - 1] for
- LT_EXPR. */
- max = limit_vr->max;
- }
-
- /* If the maximum value forces us to be out of bounds, simply punt.
- It would be pointless to try and do anything more since this
- all should be optimized away above us. */
- if (cond_code == LT_EXPR
- && compare_values (max, min) == 0)
- set_value_range_to_varying (vr_p);
- else
- {
- /* For LT_EXPR, we create the range [MIN, MAX - 1]. */
- if (cond_code == LT_EXPR)
- {
- if (TYPE_PRECISION (TREE_TYPE (max)) == 1
- && !TYPE_UNSIGNED (TREE_TYPE (max)))
- max = fold_build2 (PLUS_EXPR, TREE_TYPE (max), max,
- build_int_cst (TREE_TYPE (max), -1));
- else
- max = fold_build2 (MINUS_EXPR, TREE_TYPE (max), max,
- build_int_cst (TREE_TYPE (max), 1));
- /* Signal to compare_values_warnv this expr doesn't overflow. */
- if (EXPR_P (max))
- TREE_NO_WARNING (max) = 1;
- }
-
- set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
- }
- }
- else if (cond_code == GE_EXPR || cond_code == GT_EXPR)
- {
- max = TYPE_MAX_VALUE (type);
-
- if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE)
- min = limit;
- else
- {
- /* If LIMIT_VR is of the form [N1, N2], we need to build the
- range [N1, MAX] for GE_EXPR and [N1 + 1, MAX] for
- GT_EXPR. */
- min = limit_vr->min;
- }
-
- /* If the minimum value forces us to be out of bounds, simply punt.
- It would be pointless to try and do anything more since this
- all should be optimized away above us. */
- if (cond_code == GT_EXPR
- && compare_values (min, max) == 0)
- set_value_range_to_varying (vr_p);
- else
- {
- /* For GT_EXPR, we create the range [MIN + 1, MAX]. */
- if (cond_code == GT_EXPR)
- {
- if (TYPE_PRECISION (TREE_TYPE (min)) == 1
- && !TYPE_UNSIGNED (TREE_TYPE (min)))
- min = fold_build2 (MINUS_EXPR, TREE_TYPE (min), min,
- build_int_cst (TREE_TYPE (min), -1));
- else
- min = fold_build2 (PLUS_EXPR, TREE_TYPE (min), min,
- build_int_cst (TREE_TYPE (min), 1));
- /* Signal to compare_values_warnv this expr doesn't overflow. */
- if (EXPR_P (min))
- TREE_NO_WARNING (min) = 1;
- }
-
- set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
- }
- }
- else
- gcc_unreachable ();
-
- /* Finally intersect the new range with what we already know about var. */
- vrp_intersect_ranges (vr_p, get_value_range (var));
-}
-
-/* Extract value range information from an ASSERT_EXPR EXPR and store
- it in *VR_P. */
-
-void
-vr_values::extract_range_from_assert (value_range *vr_p, tree expr)
-{
- tree var = ASSERT_EXPR_VAR (expr);
- tree cond = ASSERT_EXPR_COND (expr);
- tree limit, op;
- enum tree_code cond_code;
- gcc_assert (COMPARISON_CLASS_P (cond));
-
- /* Find VAR in the ASSERT_EXPR conditional. */
- if (var == TREE_OPERAND (cond, 0)
- || TREE_CODE (TREE_OPERAND (cond, 0)) == PLUS_EXPR
- || TREE_CODE (TREE_OPERAND (cond, 0)) == NOP_EXPR)
- {
- /* If the predicate is of the form VAR COMP LIMIT, then we just
- take LIMIT from the RHS and use the same comparison code. */
- cond_code = TREE_CODE (cond);
- limit = TREE_OPERAND (cond, 1);
- op = TREE_OPERAND (cond, 0);
- }
- else
- {
- /* If the predicate is of the form LIMIT COMP VAR, then we need
- to flip around the comparison code to create the proper range
- for VAR. */
- cond_code = swap_tree_comparison (TREE_CODE (cond));
- limit = TREE_OPERAND (cond, 0);
- op = TREE_OPERAND (cond, 1);
- }
- extract_range_for_var_from_comparison_expr (var, cond_code, op,
- limit, vr_p);
-}
-
-/* Extract range information from SSA name VAR and store it in VR. If
- VAR has an interesting range, use it. Otherwise, create the
- range [VAR, VAR] and return it. This is useful in situations where
- we may have conditionals testing values of VARYING names. For
- instance,
-
- x_3 = y_5;
- if (x_3 > y_5)
- ...
-
- Even if y_5 is deemed VARYING, we can determine that x_3 > y_5 is
- always false. */
-
-void
-vr_values::extract_range_from_ssa_name (value_range *vr, tree var)
-{
- value_range *var_vr = get_value_range (var);
-
- if (var_vr->type != VR_VARYING)
- copy_value_range (vr, var_vr);
- else
- set_value_range (vr, VR_RANGE, var, var, NULL);
-
- add_equivalence (&vr->equiv, var);
-}
-
-
-/* Wrapper around int_const_binop. Return true if we can compute the
- result; i.e. if the operation doesn't overflow or if the overflow is
- undefined. In the latter case (if the operation overflows and
- overflow is undefined), then adjust the result to be -INF or +INF
- depending on CODE, VAL1 and VAL2. Return the value in *RES.
-
- Return false for division by zero, for which the result is
- indeterminate. */
-
-static bool
-vrp_int_const_binop (enum tree_code code, tree val1, tree val2, wide_int *res)
-{
- bool overflow = false;
- signop sign = TYPE_SIGN (TREE_TYPE (val1));
-
- switch (code)
- {
- case RSHIFT_EXPR:
- case LSHIFT_EXPR:
- {
- wide_int wval2 = wi::to_wide (val2, TYPE_PRECISION (TREE_TYPE (val1)));
- if (wi::neg_p (wval2))
- {
- wval2 = -wval2;
- if (code == RSHIFT_EXPR)
- code = LSHIFT_EXPR;
- else
- code = RSHIFT_EXPR;
- }
-
- if (code == RSHIFT_EXPR)
- /* It's unclear from the C standard whether shifts can overflow.
- The following code ignores overflow; perhaps a C standard
- interpretation ruling is needed. */
- *res = wi::rshift (wi::to_wide (val1), wval2, sign);
- else
- *res = wi::lshift (wi::to_wide (val1), wval2);
- break;
- }
-
- case MULT_EXPR:
- *res = wi::mul (wi::to_wide (val1),
- wi::to_wide (val2), sign, &overflow);
- break;
-
- case TRUNC_DIV_EXPR:
- case EXACT_DIV_EXPR:
- if (val2 == 0)
- return false;
- else
- *res = wi::div_trunc (wi::to_wide (val1),
- wi::to_wide (val2), sign, &overflow);
- break;
-
- case FLOOR_DIV_EXPR:
- if (val2 == 0)
- return false;
- *res = wi::div_floor (wi::to_wide (val1),
- wi::to_wide (val2), sign, &overflow);
- break;
-
- case CEIL_DIV_EXPR:
- if (val2 == 0)
- return false;
- *res = wi::div_ceil (wi::to_wide (val1),
- wi::to_wide (val2), sign, &overflow);
- break;
-
- case ROUND_DIV_EXPR:
- if (val2 == 0)
- return false;
- *res = wi::div_round (wi::to_wide (val1),
- wi::to_wide (val2), sign, &overflow);
- break;
+ case ROUND_DIV_EXPR:
+ if (val2 == 0)
+ return false;
+ *res = wi::div_round (wi::to_wide (val1),
+ wi::to_wide (val2), sign, &overflow);
+ break;
default:
gcc_unreachable ();
bitmask if some bit is set, it means for all numbers in the range
the bit is 1, otherwise it might be 0 or 1. */
-static bool
+bool
zero_nonzero_bits_from_vr (const tree expr_type,
value_range *vr,
wide_int *may_be_nonzero,
the ranges of each of its operands *VR0 and *VR1 with resulting
type EXPR_TYPE. The resulting range is stored in *VR. */
-static void
+void
extract_range_from_binary_expr_1 (value_range *vr,
enum tree_code code, tree expr_type,
value_range *vr0_, value_range *vr1_)
set_value_range (vr, type, min, max, NULL);
}
-/* Extract range information from a binary expression OP0 CODE OP1 based on
- the ranges of each of its operands with resulting type EXPR_TYPE.
- The resulting range is stored in *VR. */
-
-void
-vr_values::extract_range_from_binary_expr (value_range *vr,
- enum tree_code code,
- tree expr_type, tree op0, tree op1)
-{
- value_range vr0 = VR_INITIALIZER;
- value_range vr1 = VR_INITIALIZER;
-
- /* Get value ranges for each operand. For constant operands, create
- a new value range with the operand to simplify processing. */
- if (TREE_CODE (op0) == SSA_NAME)
- vr0 = *(get_value_range (op0));
- else if (is_gimple_min_invariant (op0))
- set_value_range_to_value (&vr0, op0, NULL);
- else
- set_value_range_to_varying (&vr0);
-
- if (TREE_CODE (op1) == SSA_NAME)
- vr1 = *(get_value_range (op1));
- else if (is_gimple_min_invariant (op1))
- set_value_range_to_value (&vr1, op1, NULL);
- else
- set_value_range_to_varying (&vr1);
-
- extract_range_from_binary_expr_1 (vr, code, expr_type, &vr0, &vr1);
-
- /* Try harder for PLUS and MINUS if the range of one operand is symbolic
- and based on the other operand, for example if it was deduced from a
- symbolic comparison. When a bound of the range of the first operand
- is invariant, we set the corresponding bound of the new range to INF
- in order to avoid recursing on the range of the second operand. */
- if (vr->type == VR_VARYING
- && (code == PLUS_EXPR || code == MINUS_EXPR)
- && TREE_CODE (op1) == SSA_NAME
- && vr0.type == VR_RANGE
- && symbolic_range_based_on_p (&vr0, op1))
- {
- const bool minus_p = (code == MINUS_EXPR);
- value_range n_vr1 = VR_INITIALIZER;
-
- /* Try with VR0 and [-INF, OP1]. */
- if (is_gimple_min_invariant (minus_p ? vr0.max : vr0.min))
- set_value_range (&n_vr1, VR_RANGE, vrp_val_min (expr_type), op1, NULL);
-
- /* Try with VR0 and [OP1, +INF]. */
- else if (is_gimple_min_invariant (minus_p ? vr0.min : vr0.max))
- set_value_range (&n_vr1, VR_RANGE, op1, vrp_val_max (expr_type), NULL);
-
- /* Try with VR0 and [OP1, OP1]. */
- else
- set_value_range (&n_vr1, VR_RANGE, op1, op1, NULL);
-
- extract_range_from_binary_expr_1 (vr, code, expr_type, &vr0, &n_vr1);
- }
-
- if (vr->type == VR_VARYING
- && (code == PLUS_EXPR || code == MINUS_EXPR)
- && TREE_CODE (op0) == SSA_NAME
- && vr1.type == VR_RANGE
- && symbolic_range_based_on_p (&vr1, op0))
- {
- const bool minus_p = (code == MINUS_EXPR);
- value_range n_vr0 = VR_INITIALIZER;
-
- /* Try with [-INF, OP0] and VR1. */
- if (is_gimple_min_invariant (minus_p ? vr1.max : vr1.min))
- set_value_range (&n_vr0, VR_RANGE, vrp_val_min (expr_type), op0, NULL);
-
- /* Try with [OP0, +INF] and VR1. */
- else if (is_gimple_min_invariant (minus_p ? vr1.min : vr1.max))
- set_value_range (&n_vr0, VR_RANGE, op0, vrp_val_max (expr_type), NULL);
-
- /* Try with [OP0, OP0] and VR1. */
- else
- set_value_range (&n_vr0, VR_RANGE, op0, op0, NULL);
-
- extract_range_from_binary_expr_1 (vr, code, expr_type, &n_vr0, &vr1);
- }
-
- /* If we didn't derive a range for MINUS_EXPR, and
- op1's range is ~[op0,op0] or vice-versa, then we
- can derive a non-null range. This happens often for
- pointer subtraction. */
- if (vr->type == VR_VARYING
- && code == MINUS_EXPR
- && TREE_CODE (op0) == SSA_NAME
- && ((vr0.type == VR_ANTI_RANGE
- && vr0.min == op1
- && vr0.min == vr0.max)
- || (vr1.type == VR_ANTI_RANGE
- && vr1.min == op0
- && vr1.min == vr1.max)))
- set_value_range_to_nonnull (vr, TREE_TYPE (op0));
-}
-
/* Extract range information from a unary operation CODE based on
the range of its operand *VR0 with type OP0_TYPE with resulting type TYPE.
The resulting range is stored in *VR. */
return;
}
+/* Debugging dumps. */
-/* Extract range information from a unary expression CODE OP0 based on
- the range of its operand with resulting type TYPE.
- The resulting range is stored in *VR. */
-
-void
-vr_values::extract_range_from_unary_expr (value_range *vr, enum tree_code code,
- tree type, tree op0)
-{
- value_range vr0 = VR_INITIALIZER;
-
- /* Get value ranges for the operand. For constant operands, create
- a new value range with the operand to simplify processing. */
- if (TREE_CODE (op0) == SSA_NAME)
- vr0 = *(get_value_range (op0));
- else if (is_gimple_min_invariant (op0))
- set_value_range_to_value (&vr0, op0, NULL);
- else
- set_value_range_to_varying (&vr0);
-
- ::extract_range_from_unary_expr (vr, code, type, &vr0, TREE_TYPE (op0));
-}
+void dump_value_range (FILE *, const value_range *);
+void debug_value_range (value_range *);
+void dump_all_value_ranges (FILE *);
+void dump_vr_equiv (FILE *, bitmap);
+void debug_vr_equiv (bitmap);
-/* Extract range information from a conditional expression STMT based on
- the ranges of each of its operands and the expression code. */
+/* Dump value range VR to FILE. */
void
-vr_values::extract_range_from_cond_expr (value_range *vr, gassign *stmt)
+dump_value_range (FILE *file, const value_range *vr)
{
- tree op0, op1;
- value_range vr0 = VR_INITIALIZER;
- value_range vr1 = VR_INITIALIZER;
-
- /* Get value ranges for each operand. For constant operands, create
- a new value range with the operand to simplify processing. */
- op0 = gimple_assign_rhs2 (stmt);
- if (TREE_CODE (op0) == SSA_NAME)
- vr0 = *(get_value_range (op0));
- else if (is_gimple_min_invariant (op0))
- set_value_range_to_value (&vr0, op0, NULL);
- else
- set_value_range_to_varying (&vr0);
-
- op1 = gimple_assign_rhs3 (stmt);
- if (TREE_CODE (op1) == SSA_NAME)
- vr1 = *(get_value_range (op1));
- else if (is_gimple_min_invariant (op1))
- set_value_range_to_value (&vr1, op1, NULL);
- else
- set_value_range_to_varying (&vr1);
-
- /* The resulting value range is the union of the operand ranges */
- copy_value_range (vr, &vr0);
- vrp_meet (vr, &vr1);
-}
+ if (vr == NULL)
+ fprintf (file, "[]");
+ else if (vr->type == VR_UNDEFINED)
+ fprintf (file, "UNDEFINED");
+ else if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
+ {
+ tree type = TREE_TYPE (vr->min);
+ fprintf (file, "%s[", (vr->type == VR_ANTI_RANGE) ? "~" : "");
-/* Extract range information from a comparison expression EXPR based
- on the range of its operand and the expression code. */
+ if (INTEGRAL_TYPE_P (type)
+ && !TYPE_UNSIGNED (type)
+ && vrp_val_is_min (vr->min))
+ fprintf (file, "-INF");
+ else
+ print_generic_expr (file, vr->min);
-void
-vr_values::extract_range_from_comparison (value_range *vr, enum tree_code code,
- tree type, tree op0, tree op1)
-{
- bool sop;
- tree val;
+ fprintf (file, ", ");
- val = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, false, &sop,
- NULL);
- if (val)
- {
- /* Since this expression was found on the RHS of an assignment,
- its type may be different from _Bool. Convert VAL to EXPR's
- type. */
- val = fold_convert (type, val);
- if (is_gimple_min_invariant (val))
- set_value_range_to_value (vr, val, vr->equiv);
+ if (INTEGRAL_TYPE_P (type)
+ && vrp_val_is_max (vr->max))
+ fprintf (file, "+INF");
else
- set_value_range (vr, VR_RANGE, val, val, vr->equiv);
- }
- else
- /* The result of a comparison is always true or false. */
- set_value_range_to_truthvalue (vr, type);
-}
+ print_generic_expr (file, vr->max);
-/* Helper function for simplify_internal_call_using_ranges and
- extract_range_basic. Return true if OP0 SUBCODE OP1 for
- SUBCODE {PLUS,MINUS,MULT}_EXPR is known to never overflow or
- always overflow. Set *OVF to true if it is known to always
- overflow. */
+ fprintf (file, "]");
-bool
-vr_values::check_for_binary_op_overflow (enum tree_code subcode, tree type,
- tree op0, tree op1, bool *ovf)
-{
- value_range vr0 = VR_INITIALIZER;
- value_range vr1 = VR_INITIALIZER;
- if (TREE_CODE (op0) == SSA_NAME)
- vr0 = *get_value_range (op0);
- else if (TREE_CODE (op0) == INTEGER_CST)
- set_value_range_to_value (&vr0, op0, NULL);
- else
- set_value_range_to_varying (&vr0);
+ if (vr->equiv)
+ {
+ bitmap_iterator bi;
+ unsigned i, c = 0;
- if (TREE_CODE (op1) == SSA_NAME)
- vr1 = *get_value_range (op1);
- else if (TREE_CODE (op1) == INTEGER_CST)
- set_value_range_to_value (&vr1, op1, NULL);
- else
- set_value_range_to_varying (&vr1);
+ fprintf (file, " EQUIVALENCES: { ");
- if (!range_int_cst_p (&vr0)
- || TREE_OVERFLOW (vr0.min)
- || TREE_OVERFLOW (vr0.max))
- {
- vr0.min = vrp_val_min (TREE_TYPE (op0));
- vr0.max = vrp_val_max (TREE_TYPE (op0));
- }
- if (!range_int_cst_p (&vr1)
- || TREE_OVERFLOW (vr1.min)
- || TREE_OVERFLOW (vr1.max))
- {
- vr1.min = vrp_val_min (TREE_TYPE (op1));
- vr1.max = vrp_val_max (TREE_TYPE (op1));
- }
- *ovf = arith_overflowed_p (subcode, type, vr0.min,
- subcode == MINUS_EXPR ? vr1.max : vr1.min);
- if (arith_overflowed_p (subcode, type, vr0.max,
- subcode == MINUS_EXPR ? vr1.min : vr1.max) != *ovf)
- return false;
- if (subcode == MULT_EXPR)
- {
- if (arith_overflowed_p (subcode, type, vr0.min, vr1.max) != *ovf
- || arith_overflowed_p (subcode, type, vr0.max, vr1.min) != *ovf)
- return false;
- }
- if (*ovf)
- {
- /* So far we found that there is an overflow on the boundaries.
- That doesn't prove that there is an overflow even for all values
- in between the boundaries. For that compute widest_int range
- of the result and see if it doesn't overlap the range of
- type. */
- widest_int wmin, wmax;
- widest_int w[4];
- int i;
- w[0] = wi::to_widest (vr0.min);
- w[1] = wi::to_widest (vr0.max);
- w[2] = wi::to_widest (vr1.min);
- w[3] = wi::to_widest (vr1.max);
- for (i = 0; i < 4; i++)
- {
- widest_int wt;
- switch (subcode)
- {
- case PLUS_EXPR:
- wt = wi::add (w[i & 1], w[2 + (i & 2) / 2]);
- break;
- case MINUS_EXPR:
- wt = wi::sub (w[i & 1], w[2 + (i & 2) / 2]);
- break;
- case MULT_EXPR:
- wt = wi::mul (w[i & 1], w[2 + (i & 2) / 2]);
- break;
- default:
- gcc_unreachable ();
- }
- if (i == 0)
- {
- wmin = wt;
- wmax = wt;
- }
- else
+ EXECUTE_IF_SET_IN_BITMAP (vr->equiv, 0, i, bi)
{
- wmin = wi::smin (wmin, wt);
- wmax = wi::smax (wmax, wt);
+ print_generic_expr (file, ssa_name (i));
+ fprintf (file, " ");
+ c++;
}
+
+ fprintf (file, "} (%u elements)", c);
}
- /* The result of op0 CODE op1 is known to be in range
- [wmin, wmax]. */
- widest_int wtmin = wi::to_widest (vrp_val_min (type));
- widest_int wtmax = wi::to_widest (vrp_val_max (type));
- /* If all values in [wmin, wmax] are smaller than
- [wtmin, wtmax] or all are larger than [wtmin, wtmax],
- the arithmetic operation will always overflow. */
- if (wmax < wtmin || wmin > wtmax)
- return true;
- return false;
}
- return true;
+ else if (vr->type == VR_VARYING)
+ fprintf (file, "VARYING");
+ else
+ fprintf (file, "INVALID RANGE");
}
-/* Try to derive a nonnegative or nonzero range out of STMT relying
- primarily on generic routines in fold in conjunction with range data.
- Store the result in *VR */
-
-void
-vr_values::extract_range_basic (value_range *vr, gimple *stmt)
-{
- bool sop;
- tree type = gimple_expr_type (stmt);
- if (is_gimple_call (stmt))
- {
- tree arg;
- int mini, maxi, zerov = 0, prec;
- enum tree_code subcode = ERROR_MARK;
- combined_fn cfn = gimple_call_combined_fn (stmt);
- scalar_int_mode mode;
+/* Dump value range VR to stderr. */
- switch (cfn)
- {
- case CFN_BUILT_IN_CONSTANT_P:
- /* If the call is __builtin_constant_p and the argument is a
- function parameter resolve it to false. This avoids bogus
- array bound warnings.
- ??? We could do this as early as inlining is finished. */
- arg = gimple_call_arg (stmt, 0);
- if (TREE_CODE (arg) == SSA_NAME
- && SSA_NAME_IS_DEFAULT_DEF (arg)
- && TREE_CODE (SSA_NAME_VAR (arg)) == PARM_DECL
- && cfun->after_inlining)
- {
- set_value_range_to_null (vr, type);
- return;
- }
- break;
- /* Both __builtin_ffs* and __builtin_popcount return
- [0, prec]. */
- CASE_CFN_FFS:
- CASE_CFN_POPCOUNT:
- arg = gimple_call_arg (stmt, 0);
- prec = TYPE_PRECISION (TREE_TYPE (arg));
- mini = 0;
- maxi = prec;
- if (TREE_CODE (arg) == SSA_NAME)
- {
- value_range *vr0 = get_value_range (arg);
- /* If arg is non-zero, then ffs or popcount
- are non-zero. */
- if ((vr0->type == VR_RANGE
- && range_includes_zero_p (vr0->min, vr0->max) == 0)
- || (vr0->type == VR_ANTI_RANGE
- && range_includes_zero_p (vr0->min, vr0->max) == 1))
- mini = 1;
- /* If some high bits are known to be zero,
- we can decrease the maximum. */
- if (vr0->type == VR_RANGE
- && TREE_CODE (vr0->max) == INTEGER_CST
- && !operand_less_p (vr0->min,
- build_zero_cst (TREE_TYPE (vr0->min))))
- maxi = tree_floor_log2 (vr0->max) + 1;
- }
- goto bitop_builtin;
- /* __builtin_parity* returns [0, 1]. */
- CASE_CFN_PARITY:
- mini = 0;
- maxi = 1;
- goto bitop_builtin;
- /* __builtin_c[lt]z* return [0, prec-1], except for
- when the argument is 0, but that is undefined behavior.
- On many targets where the CLZ RTL or optab value is defined
- for 0 the value is prec, so include that in the range
- by default. */
- CASE_CFN_CLZ:
- arg = gimple_call_arg (stmt, 0);
- prec = TYPE_PRECISION (TREE_TYPE (arg));
- mini = 0;
- maxi = prec;
- mode = SCALAR_INT_TYPE_MODE (TREE_TYPE (arg));
- if (optab_handler (clz_optab, mode) != CODE_FOR_nothing
- && CLZ_DEFINED_VALUE_AT_ZERO (mode, zerov)
- /* Handle only the single common value. */
- && zerov != prec)
- /* Magic value to give up, unless vr0 proves
- arg is non-zero. */
- mini = -2;
- if (TREE_CODE (arg) == SSA_NAME)
- {
- value_range *vr0 = get_value_range (arg);
- /* From clz of VR_RANGE minimum we can compute
- result maximum. */
- if (vr0->type == VR_RANGE
- && TREE_CODE (vr0->min) == INTEGER_CST)
- {
- maxi = prec - 1 - tree_floor_log2 (vr0->min);
- if (maxi != prec)
- mini = 0;
- }
- else if (vr0->type == VR_ANTI_RANGE
- && integer_zerop (vr0->min))
- {
- maxi = prec - 1;
- mini = 0;
- }
- if (mini == -2)
- break;
- /* From clz of VR_RANGE maximum we can compute
- result minimum. */
- if (vr0->type == VR_RANGE
- && TREE_CODE (vr0->max) == INTEGER_CST)
- {
- mini = prec - 1 - tree_floor_log2 (vr0->max);
- if (mini == prec)
- break;
- }
- }
- if (mini == -2)
- break;
- goto bitop_builtin;
- /* __builtin_ctz* return [0, prec-1], except for
- when the argument is 0, but that is undefined behavior.
- If there is a ctz optab for this mode and
- CTZ_DEFINED_VALUE_AT_ZERO, include that in the range,
- otherwise just assume 0 won't be seen. */
- CASE_CFN_CTZ:
- arg = gimple_call_arg (stmt, 0);
- prec = TYPE_PRECISION (TREE_TYPE (arg));
- mini = 0;
- maxi = prec - 1;
- mode = SCALAR_INT_TYPE_MODE (TREE_TYPE (arg));
- if (optab_handler (ctz_optab, mode) != CODE_FOR_nothing
- && CTZ_DEFINED_VALUE_AT_ZERO (mode, zerov))
- {
- /* Handle only the two common values. */
- if (zerov == -1)
- mini = -1;
- else if (zerov == prec)
- maxi = prec;
- else
- /* Magic value to give up, unless vr0 proves
- arg is non-zero. */
- mini = -2;
- }
- if (TREE_CODE (arg) == SSA_NAME)
- {
- value_range *vr0 = get_value_range (arg);
- /* If arg is non-zero, then use [0, prec - 1]. */
- if ((vr0->type == VR_RANGE
- && integer_nonzerop (vr0->min))
- || (vr0->type == VR_ANTI_RANGE
- && integer_zerop (vr0->min)))
- {
- mini = 0;
- maxi = prec - 1;
- }
- /* If some high bits are known to be zero,
- we can decrease the result maximum. */
- if (vr0->type == VR_RANGE
- && TREE_CODE (vr0->max) == INTEGER_CST)
- {
- maxi = tree_floor_log2 (vr0->max);
- /* For vr0 [0, 0] give up. */
- if (maxi == -1)
- break;
- }
- }
- if (mini == -2)
- break;
- goto bitop_builtin;
- /* __builtin_clrsb* returns [0, prec-1]. */
- CASE_CFN_CLRSB:
- arg = gimple_call_arg (stmt, 0);
- prec = TYPE_PRECISION (TREE_TYPE (arg));
- mini = 0;
- maxi = prec - 1;
- goto bitop_builtin;
- bitop_builtin:
- set_value_range (vr, VR_RANGE, build_int_cst (type, mini),
- build_int_cst (type, maxi), NULL);
- return;
- case CFN_UBSAN_CHECK_ADD:
- subcode = PLUS_EXPR;
- break;
- case CFN_UBSAN_CHECK_SUB:
- subcode = MINUS_EXPR;
- break;
- case CFN_UBSAN_CHECK_MUL:
- subcode = MULT_EXPR;
- break;
- case CFN_GOACC_DIM_SIZE:
- case CFN_GOACC_DIM_POS:
- /* Optimizing these two internal functions helps the loop
- optimizer eliminate outer comparisons. Size is [1,N]
- and pos is [0,N-1]. */
- {
- bool is_pos = cfn == CFN_GOACC_DIM_POS;
- int axis = oacc_get_ifn_dim_arg (stmt);
- int size = oacc_get_fn_dim_size (current_function_decl, axis);
-
- if (!size)
- /* If it's dynamic, the backend might know a hardware
- limitation. */
- size = targetm.goacc.dim_limit (axis);
-
- tree type = TREE_TYPE (gimple_call_lhs (stmt));
- set_value_range (vr, VR_RANGE,
- build_int_cst (type, is_pos ? 0 : 1),
- size ? build_int_cst (type, size - is_pos)
- : vrp_val_max (type), NULL);
- }
- return;
- case CFN_BUILT_IN_STRLEN:
- if (tree lhs = gimple_call_lhs (stmt))
- if (ptrdiff_type_node
- && (TYPE_PRECISION (ptrdiff_type_node)
- == TYPE_PRECISION (TREE_TYPE (lhs))))
- {
- tree type = TREE_TYPE (lhs);
- tree max = vrp_val_max (ptrdiff_type_node);
- wide_int wmax = wi::to_wide (max, TYPE_PRECISION (TREE_TYPE (max)));
- tree range_min = build_zero_cst (type);
- tree range_max = wide_int_to_tree (type, wmax - 1);
- set_value_range (vr, VR_RANGE, range_min, range_max, NULL);
- return;
- }
- break;
- default:
- break;
- }
- if (subcode != ERROR_MARK)
- {
- bool saved_flag_wrapv = flag_wrapv;
- /* Pretend the arithmetics is wrapping. If there is
- any overflow, we'll complain, but will actually do
- wrapping operation. */
- flag_wrapv = 1;
- extract_range_from_binary_expr (vr, subcode, type,
- gimple_call_arg (stmt, 0),
- gimple_call_arg (stmt, 1));
- flag_wrapv = saved_flag_wrapv;
-
- /* If for both arguments vrp_valueize returned non-NULL,
- this should have been already folded and if not, it
- wasn't folded because of overflow. Avoid removing the
- UBSAN_CHECK_* calls in that case. */
- if (vr->type == VR_RANGE
- && (vr->min == vr->max
- || operand_equal_p (vr->min, vr->max, 0)))
- set_value_range_to_varying (vr);
- return;
- }
- }
- /* Handle extraction of the two results (result of arithmetics and
- a flag whether arithmetics overflowed) from {ADD,SUB,MUL}_OVERFLOW
- internal function. Similarly from ATOMIC_COMPARE_EXCHANGE. */
- else if (is_gimple_assign (stmt)
- && (gimple_assign_rhs_code (stmt) == REALPART_EXPR
- || gimple_assign_rhs_code (stmt) == IMAGPART_EXPR)
- && INTEGRAL_TYPE_P (type))
- {
- enum tree_code code = gimple_assign_rhs_code (stmt);
- tree op = gimple_assign_rhs1 (stmt);
- if (TREE_CODE (op) == code && TREE_CODE (TREE_OPERAND (op, 0)) == SSA_NAME)
- {
- gimple *g = SSA_NAME_DEF_STMT (TREE_OPERAND (op, 0));
- if (is_gimple_call (g) && gimple_call_internal_p (g))
- {
- enum tree_code subcode = ERROR_MARK;
- switch (gimple_call_internal_fn (g))
- {
- case IFN_ADD_OVERFLOW:
- subcode = PLUS_EXPR;
- break;
- case IFN_SUB_OVERFLOW:
- subcode = MINUS_EXPR;
- break;
- case IFN_MUL_OVERFLOW:
- subcode = MULT_EXPR;
- break;
- case IFN_ATOMIC_COMPARE_EXCHANGE:
- if (code == IMAGPART_EXPR)
- {
- /* This is the boolean return value whether compare and
- exchange changed anything or not. */
- set_value_range (vr, VR_RANGE, build_int_cst (type, 0),
- build_int_cst (type, 1), NULL);
- return;
- }
- break;
- default:
- break;
- }
- if (subcode != ERROR_MARK)
- {
- tree op0 = gimple_call_arg (g, 0);
- tree op1 = gimple_call_arg (g, 1);
- if (code == IMAGPART_EXPR)
- {
- bool ovf = false;
- if (check_for_binary_op_overflow (subcode, type,
- op0, op1, &ovf))
- set_value_range_to_value (vr,
- build_int_cst (type, ovf),
- NULL);
- else if (TYPE_PRECISION (type) == 1
- && !TYPE_UNSIGNED (type))
- set_value_range_to_varying (vr);
- else
- set_value_range (vr, VR_RANGE, build_int_cst (type, 0),
- build_int_cst (type, 1), NULL);
- }
- else if (types_compatible_p (type, TREE_TYPE (op0))
- && types_compatible_p (type, TREE_TYPE (op1)))
- {
- bool saved_flag_wrapv = flag_wrapv;
- /* Pretend the arithmetics is wrapping. If there is
- any overflow, IMAGPART_EXPR will be set. */
- flag_wrapv = 1;
- extract_range_from_binary_expr (vr, subcode, type,
- op0, op1);
- flag_wrapv = saved_flag_wrapv;
- }
- else
- {
- value_range vr0 = VR_INITIALIZER;
- value_range vr1 = VR_INITIALIZER;
- bool saved_flag_wrapv = flag_wrapv;
- /* Pretend the arithmetics is wrapping. If there is
- any overflow, IMAGPART_EXPR will be set. */
- flag_wrapv = 1;
- extract_range_from_unary_expr (&vr0, NOP_EXPR,
- type, op0);
- extract_range_from_unary_expr (&vr1, NOP_EXPR,
- type, op1);
- extract_range_from_binary_expr_1 (vr, subcode, type,
- &vr0, &vr1);
- flag_wrapv = saved_flag_wrapv;
- }
- return;
- }
- }
- }
- }
- if (INTEGRAL_TYPE_P (type)
- && gimple_stmt_nonnegative_warnv_p (stmt, &sop))
- set_value_range_to_nonnegative (vr, type);
- else if (vrp_stmt_computes_nonzero (stmt))
- set_value_range_to_nonnull (vr, type);
- else
- set_value_range_to_varying (vr);
+DEBUG_FUNCTION void
+debug_value_range (value_range *vr)
+{
+ dump_value_range (stderr, vr);
+ fprintf (stderr, "\n");
}
-/* Try to compute a useful range out of assignment STMT and store it
- in *VR. */
+/* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
+ create a new SSA name N and return the assertion assignment
+ 'N = ASSERT_EXPR <V, V OP W>'. */
-void
-vr_values::extract_range_from_assignment (value_range *vr, gassign *stmt)
+static gimple *
+build_assert_expr_for (tree cond, tree v)
{
- enum tree_code code = gimple_assign_rhs_code (stmt);
-
- if (code == ASSERT_EXPR)
- extract_range_from_assert (vr, gimple_assign_rhs1 (stmt));
- else if (code == SSA_NAME)
- extract_range_from_ssa_name (vr, gimple_assign_rhs1 (stmt));
- else if (TREE_CODE_CLASS (code) == tcc_binary)
- extract_range_from_binary_expr (vr, gimple_assign_rhs_code (stmt),
- gimple_expr_type (stmt),
- gimple_assign_rhs1 (stmt),
- gimple_assign_rhs2 (stmt));
- else if (TREE_CODE_CLASS (code) == tcc_unary)
- extract_range_from_unary_expr (vr, gimple_assign_rhs_code (stmt),
- gimple_expr_type (stmt),
- gimple_assign_rhs1 (stmt));
- else if (code == COND_EXPR)
- extract_range_from_cond_expr (vr, stmt);
- else if (TREE_CODE_CLASS (code) == tcc_comparison)
- extract_range_from_comparison (vr, gimple_assign_rhs_code (stmt),
- gimple_expr_type (stmt),
- gimple_assign_rhs1 (stmt),
- gimple_assign_rhs2 (stmt));
- else if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS
- && is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
- set_value_range_to_value (vr, gimple_assign_rhs1 (stmt), NULL);
- else
- set_value_range_to_varying (vr);
+ tree a;
+ gassign *assertion;
- if (vr->type == VR_VARYING)
- extract_range_basic (vr, stmt);
-}
+ gcc_assert (TREE_CODE (v) == SSA_NAME
+ && COMPARISON_CLASS_P (cond));
-/* Given a range VR, a LOOP and a variable VAR, determine whether it
- would be profitable to adjust VR using scalar evolution information
- for VAR. If so, update VR with the new limits. */
+ a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond);
+ assertion = gimple_build_assign (NULL_TREE, a);
-void
-vr_values::adjust_range_with_scev (value_range *vr, struct loop *loop,
- gimple *stmt, tree var)
-{
- tree init, step, chrec, tmin, tmax, min, max, type, tem;
- enum ev_direction dir;
+ /* The new ASSERT_EXPR, creates a new SSA name that replaces the
+ operand of the ASSERT_EXPR. Create it so the new name and the old one
+ are registered in the replacement table so that we can fix the SSA web
+ after adding all the ASSERT_EXPRs. */
+ tree new_def = create_new_def_for (v, assertion, NULL);
+ /* Make sure we preserve abnormalness throughout an ASSERT_EXPR chain
+ given we have to be able to fully propagate those out to re-create
+ valid SSA when removing the asserts. */
+ if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (v))
+ SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_def) = 1;
- /* TODO. Don't adjust anti-ranges. An anti-range may provide
- better opportunities than a regular range, but I'm not sure. */
- if (vr->type == VR_ANTI_RANGE)
- return;
+ return assertion;
+}
- chrec = instantiate_parameters (loop, analyze_scalar_evolution (loop, var));
- /* Like in PR19590, scev can return a constant function. */
- if (is_gimple_min_invariant (chrec))
- {
- set_value_range_to_value (vr, chrec, vr->equiv);
- return;
- }
+/* Return false if EXPR is a predicate expression involving floating
+ point values. */
- if (TREE_CODE (chrec) != POLYNOMIAL_CHREC)
- return;
+static inline bool
+fp_predicate (gimple *stmt)
+{
+ GIMPLE_CHECK (stmt, GIMPLE_COND);
- init = initial_condition_in_loop_num (chrec, loop->num);
- tem = op_with_constant_singleton_value_range (init);
- if (tem)
- init = tem;
- step = evolution_part_in_loop_num (chrec, loop->num);
- tem = op_with_constant_singleton_value_range (step);
- if (tem)
- step = tem;
-
- /* If STEP is symbolic, we can't know whether INIT will be the
- minimum or maximum value in the range. Also, unless INIT is
- a simple expression, compare_values and possibly other functions
- in tree-vrp won't be able to handle it. */
- if (step == NULL_TREE
- || !is_gimple_min_invariant (step)
- || !valid_value_p (init))
- return;
+ return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)));
+}
- dir = scev_direction (chrec);
- if (/* Do not adjust ranges if we do not know whether the iv increases
- or decreases, ... */
- dir == EV_DIR_UNKNOWN
- /* ... or if it may wrap. */
- || scev_probably_wraps_p (NULL_TREE, init, step, stmt,
- get_chrec_loop (chrec), true))
- return;
+/* If the range of values taken by OP can be inferred after STMT executes,
+ return the comparison code (COMP_CODE_P) and value (VAL_P) that
+ describes the inferred range. Return true if a range could be
+ inferred. */
- type = TREE_TYPE (var);
- if (POINTER_TYPE_P (type) || !TYPE_MIN_VALUE (type))
- tmin = lower_bound_in_type (type, type);
- else
- tmin = TYPE_MIN_VALUE (type);
- if (POINTER_TYPE_P (type) || !TYPE_MAX_VALUE (type))
- tmax = upper_bound_in_type (type, type);
- else
- tmax = TYPE_MAX_VALUE (type);
+bool
+infer_value_range (gimple *stmt, tree op, tree_code *comp_code_p, tree *val_p)
+{
+ *val_p = NULL_TREE;
+ *comp_code_p = ERROR_MARK;
- /* Try to use estimated number of iterations for the loop to constrain the
- final value in the evolution. */
- if (TREE_CODE (step) == INTEGER_CST
- && is_gimple_val (init)
- && (TREE_CODE (init) != SSA_NAME
- || get_value_range (init)->type == VR_RANGE))
- {
- widest_int nit;
+ /* Do not attempt to infer anything in names that flow through
+ abnormal edges. */
+ if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op))
+ return false;
- /* We are only entering here for loop header PHI nodes, so using
- the number of latch executions is the correct thing to use. */
- if (max_loop_iterations (loop, &nit))
- {
- value_range maxvr = VR_INITIALIZER;
- signop sgn = TYPE_SIGN (TREE_TYPE (step));
- bool overflow;
-
- widest_int wtmp = wi::mul (wi::to_widest (step), nit, sgn,
- &overflow);
- /* If the multiplication overflowed we can't do a meaningful
- adjustment. Likewise if the result doesn't fit in the type
- of the induction variable. For a signed type we have to
- check whether the result has the expected signedness which
- is that of the step as number of iterations is unsigned. */
- if (!overflow
- && wi::fits_to_tree_p (wtmp, TREE_TYPE (init))
- && (sgn == UNSIGNED
- || wi::gts_p (wtmp, 0) == wi::gts_p (wi::to_wide (step), 0)))
- {
- tem = wide_int_to_tree (TREE_TYPE (init), wtmp);
- extract_range_from_binary_expr (&maxvr, PLUS_EXPR,
- TREE_TYPE (init), init, tem);
- /* Likewise if the addition did. */
- if (maxvr.type == VR_RANGE)
- {
- value_range initvr = VR_INITIALIZER;
+ /* If STMT is the last statement of a basic block with no normal
+ successors, there is no point inferring anything about any of its
+ operands. We would not be able to find a proper insertion point
+ for the assertion, anyway. */
+ if (stmt_ends_bb_p (stmt))
+ {
+ edge_iterator ei;
+ edge e;
- if (TREE_CODE (init) == SSA_NAME)
- initvr = *(get_value_range (init));
- else if (is_gimple_min_invariant (init))
- set_value_range_to_value (&initvr, init, NULL);
- else
- return;
-
- /* Check if init + nit * step overflows. Though we checked
- scev {init, step}_loop doesn't wrap, it is not enough
- because the loop may exit immediately. Overflow could
- happen in the plus expression in this case. */
- if ((dir == EV_DIR_DECREASES
- && compare_values (maxvr.min, initvr.min) != -1)
- || (dir == EV_DIR_GROWS
- && compare_values (maxvr.max, initvr.max) != 1))
- return;
-
- tmin = maxvr.min;
- tmax = maxvr.max;
- }
- }
- }
+ FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
+ if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH)))
+ break;
+ if (e == NULL)
+ return false;
}
- if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED)
+ if (infer_nonnull_range (stmt, op))
{
- min = tmin;
- max = tmax;
+ *val_p = build_int_cst (TREE_TYPE (op), 0);
+ *comp_code_p = NE_EXPR;
+ return true;
+ }
- /* For VARYING or UNDEFINED ranges, just about anything we get
- from scalar evolutions should be better. */
+ return false;
+}
- if (dir == EV_DIR_DECREASES)
- max = init;
- else
- min = init;
- }
- else if (vr->type == VR_RANGE)
- {
- min = vr->min;
- max = vr->max;
- if (dir == EV_DIR_DECREASES)
- {
- /* INIT is the maximum value. If INIT is lower than VR->MAX
- but no smaller than VR->MIN, set VR->MAX to INIT. */
- if (compare_values (init, max) == -1)
- max = init;
+void dump_asserts_for (FILE *, tree);
+void debug_asserts_for (tree);
+void dump_all_asserts (FILE *);
+void debug_all_asserts (void);
- /* According to the loop information, the variable does not
- overflow. */
- if (compare_values (min, tmin) == -1)
- min = tmin;
+/* Dump all the registered assertions for NAME to FILE. */
- }
- else
- {
- /* If INIT is bigger than VR->MIN, set VR->MIN to INIT. */
- if (compare_values (init, min) == 1)
- min = init;
+void
+dump_asserts_for (FILE *file, tree name)
+{
+ assert_locus *loc;
+
+ fprintf (file, "Assertions to be inserted for ");
+ print_generic_expr (file, name);
+ fprintf (file, "\n");
- if (compare_values (tmax, max) == -1)
- max = tmax;
+ loc = asserts_for[SSA_NAME_VERSION (name)];
+ while (loc)
+ {
+ fprintf (file, "\t");
+ print_gimple_stmt (file, gsi_stmt (loc->si), 0);
+ fprintf (file, "\n\tBB #%d", loc->bb->index);
+ if (loc->e)
+ {
+ fprintf (file, "\n\tEDGE %d->%d", loc->e->src->index,
+ loc->e->dest->index);
+ dump_edge_info (file, loc->e, dump_flags, 0);
}
+ fprintf (file, "\n\tPREDICATE: ");
+ print_generic_expr (file, loc->expr);
+ fprintf (file, " %s ", get_tree_code_name (loc->comp_code));
+ print_generic_expr (file, loc->val);
+ fprintf (file, "\n\n");
+ loc = loc->next;
}
- else
- return;
- /* If we just created an invalid range with the minimum
- greater than the maximum, we fail conservatively.
- This should happen only in unreachable
- parts of code, or for invalid programs. */
- if (compare_values (min, max) == 1)
- return;
-
- /* Even for valid range info, sometimes overflow flag will leak in.
- As GIMPLE IL should have no constants with TREE_OVERFLOW set, we
- drop them. */
- if (TREE_OVERFLOW_P (min))
- min = drop_tree_overflow (min);
- if (TREE_OVERFLOW_P (max))
- max = drop_tree_overflow (max);
-
- set_value_range (vr, VR_RANGE, min, max, vr->equiv);
+ fprintf (file, "\n");
}
-/* Given two numeric value ranges VR0, VR1 and a comparison code COMP:
-
- - Return BOOLEAN_TRUE_NODE if VR0 COMP VR1 always returns true for
- all the values in the ranges.
-
- - Return BOOLEAN_FALSE_NODE if the comparison always returns false.
+/* Dump all the registered assertions for NAME to stderr. */
- - Return NULL_TREE if it is not always possible to determine the
- value of the comparison.
+DEBUG_FUNCTION void
+debug_asserts_for (tree name)
+{
+ dump_asserts_for (stderr, name);
+}
- Also set *STRICT_OVERFLOW_P to indicate whether comparision evaluation
- assumed signed overflow is undefined. */
+/* Dump all the registered assertions for all the names to FILE. */
-static tree
-compare_ranges (enum tree_code comp, value_range *vr0, value_range *vr1,
- bool *strict_overflow_p)
+void
+dump_all_asserts (FILE *file)
{
- /* VARYING or UNDEFINED ranges cannot be compared. */
- if (vr0->type == VR_VARYING
- || vr0->type == VR_UNDEFINED
- || vr1->type == VR_VARYING
- || vr1->type == VR_UNDEFINED)
- return NULL_TREE;
+ unsigned i;
+ bitmap_iterator bi;
- /* Anti-ranges need to be handled separately. */
- if (vr0->type == VR_ANTI_RANGE || vr1->type == VR_ANTI_RANGE)
- {
- /* If both are anti-ranges, then we cannot compute any
- comparison. */
- if (vr0->type == VR_ANTI_RANGE && vr1->type == VR_ANTI_RANGE)
- return NULL_TREE;
+ fprintf (file, "\nASSERT_EXPRs to be inserted\n\n");
+ EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
+ dump_asserts_for (file, ssa_name (i));
+ fprintf (file, "\n");
+}
- /* These comparisons are never statically computable. */
- if (comp == GT_EXPR
- || comp == GE_EXPR
- || comp == LT_EXPR
- || comp == LE_EXPR)
- return NULL_TREE;
- /* Equality can be computed only between a range and an
- anti-range. ~[VAL1, VAL2] == [VAL1, VAL2] is always false. */
- if (vr0->type == VR_RANGE)
- {
- /* To simplify processing, make VR0 the anti-range. */
- value_range *tmp = vr0;
- vr0 = vr1;
- vr1 = tmp;
- }
+/* Dump all the registered assertions for all the names to stderr. */
- gcc_assert (comp == NE_EXPR || comp == EQ_EXPR);
+DEBUG_FUNCTION void
+debug_all_asserts (void)
+{
+ dump_all_asserts (stderr);
+}
- if (compare_values_warnv (vr0->min, vr1->min, strict_overflow_p) == 0
- && compare_values_warnv (vr0->max, vr1->max, strict_overflow_p) == 0)
- return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node;
+/* Push the assert info for NAME, EXPR, COMP_CODE and VAL to ASSERTS. */
- return NULL_TREE;
- }
+static void
+add_assert_info (vec<assert_info> &asserts,
+ tree name, tree expr, enum tree_code comp_code, tree val)
+{
+ assert_info info;
+ info.comp_code = comp_code;
+ info.name = name;
+ info.val = val;
+ info.expr = expr;
+ asserts.safe_push (info);
+}
- /* Simplify processing. If COMP is GT_EXPR or GE_EXPR, switch the
- operands around and change the comparison code. */
- if (comp == GT_EXPR || comp == GE_EXPR)
- {
- comp = (comp == GT_EXPR) ? LT_EXPR : LE_EXPR;
- std::swap (vr0, vr1);
- }
+/* If NAME doesn't have an ASSERT_EXPR registered for asserting
+ 'EXPR COMP_CODE VAL' at a location that dominates block BB or
+ E->DEST, then register this location as a possible insertion point
+ for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
+
+ BB, E and SI provide the exact insertion point for the new
+ ASSERT_EXPR. If BB is NULL, then the ASSERT_EXPR is to be inserted
+ on edge E. Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
+ BB. If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
+ must not be NULL. */
+
+static void
+register_new_assert_for (tree name, tree expr,
+ enum tree_code comp_code,
+ tree val,
+ basic_block bb,
+ edge e,
+ gimple_stmt_iterator si)
+{
+ assert_locus *n, *loc, *last_loc;
+ basic_block dest_bb;
+
+ gcc_checking_assert (bb == NULL || e == NULL);
+
+ if (e == NULL)
+ gcc_checking_assert (gimple_code (gsi_stmt (si)) != GIMPLE_COND
+ && gimple_code (gsi_stmt (si)) != GIMPLE_SWITCH);
+
+ /* Never build an assert comparing against an integer constant with
+ TREE_OVERFLOW set. This confuses our undefined overflow warning
+ machinery. */
+ if (TREE_OVERFLOW_P (val))
+ val = drop_tree_overflow (val);
- if (comp == EQ_EXPR)
+ /* The new assertion A will be inserted at BB or E. We need to
+ determine if the new location is dominated by a previously
+ registered location for A. If we are doing an edge insertion,
+ assume that A will be inserted at E->DEST. Note that this is not
+ necessarily true.
+
+ If E is a critical edge, it will be split. But even if E is
+ split, the new block will dominate the same set of blocks that
+ E->DEST dominates.
+
+ The reverse, however, is not true, blocks dominated by E->DEST
+ will not be dominated by the new block created to split E. So,
+ if the insertion location is on a critical edge, we will not use
+ the new location to move another assertion previously registered
+ at a block dominated by E->DEST. */
+ dest_bb = (bb) ? bb : e->dest;
+
+ /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and
+ VAL at a block dominating DEST_BB, then we don't need to insert a new
+ one. Similarly, if the same assertion already exists at a block
+ dominated by DEST_BB and the new location is not on a critical
+ edge, then update the existing location for the assertion (i.e.,
+ move the assertion up in the dominance tree).
+
+ Note, this is implemented as a simple linked list because there
+ should not be more than a handful of assertions registered per
+ name. If this becomes a performance problem, a table hashed by
+ COMP_CODE and VAL could be implemented. */
+ loc = asserts_for[SSA_NAME_VERSION (name)];
+ last_loc = loc;
+ while (loc)
{
- /* Equality may only be computed if both ranges represent
- exactly one value. */
- if (compare_values_warnv (vr0->min, vr0->max, strict_overflow_p) == 0
- && compare_values_warnv (vr1->min, vr1->max, strict_overflow_p) == 0)
+ if (loc->comp_code == comp_code
+ && (loc->val == val
+ || operand_equal_p (loc->val, val, 0))
+ && (loc->expr == expr
+ || operand_equal_p (loc->expr, expr, 0)))
{
- int cmp_min = compare_values_warnv (vr0->min, vr1->min,
- strict_overflow_p);
- int cmp_max = compare_values_warnv (vr0->max, vr1->max,
- strict_overflow_p);
- if (cmp_min == 0 && cmp_max == 0)
- return boolean_true_node;
- else if (cmp_min != -2 && cmp_max != -2)
- return boolean_false_node;
+ /* If E is not a critical edge and DEST_BB
+ dominates the existing location for the assertion, move
+ the assertion up in the dominance tree by updating its
+ location information. */
+ if ((e == NULL || !EDGE_CRITICAL_P (e))
+ && dominated_by_p (CDI_DOMINATORS, loc->bb, dest_bb))
+ {
+ loc->bb = dest_bb;
+ loc->e = e;
+ loc->si = si;
+ return;
+ }
}
- /* If [V0_MIN, V1_MAX] < [V1_MIN, V1_MAX] then V0 != V1. */
- else if (compare_values_warnv (vr0->min, vr1->max,
- strict_overflow_p) == 1
- || compare_values_warnv (vr1->min, vr0->max,
- strict_overflow_p) == 1)
- return boolean_false_node;
- return NULL_TREE;
- }
- else if (comp == NE_EXPR)
- {
- int cmp1, cmp2;
-
- /* If VR0 is completely to the left or completely to the right
- of VR1, they are always different. Notice that we need to
- make sure that both comparisons yield similar results to
- avoid comparing values that cannot be compared at
- compile-time. */
- cmp1 = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p);
- cmp2 = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p);
- if ((cmp1 == -1 && cmp2 == -1) || (cmp1 == 1 && cmp2 == 1))
- return boolean_true_node;
-
- /* If VR0 and VR1 represent a single value and are identical,
- return false. */
- else if (compare_values_warnv (vr0->min, vr0->max,
- strict_overflow_p) == 0
- && compare_values_warnv (vr1->min, vr1->max,
- strict_overflow_p) == 0
- && compare_values_warnv (vr0->min, vr1->min,
- strict_overflow_p) == 0
- && compare_values_warnv (vr0->max, vr1->max,
- strict_overflow_p) == 0)
- return boolean_false_node;
-
- /* Otherwise, they may or may not be different. */
- else
- return NULL_TREE;
+ /* Update the last node of the list and move to the next one. */
+ last_loc = loc;
+ loc = loc->next;
}
- else if (comp == LT_EXPR || comp == LE_EXPR)
- {
- int tst;
- /* If VR0 is to the left of VR1, return true. */
- tst = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p);
- if ((comp == LT_EXPR && tst == -1)
- || (comp == LE_EXPR && (tst == -1 || tst == 0)))
- return boolean_true_node;
-
- /* If VR0 is to the right of VR1, return false. */
- tst = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p);
- if ((comp == LT_EXPR && (tst == 0 || tst == 1))
- || (comp == LE_EXPR && tst == 1))
- return boolean_false_node;
+ /* If we didn't find an assertion already registered for
+ NAME COMP_CODE VAL, add a new one at the end of the list of
+ assertions associated with NAME. */
+ n = XNEW (struct assert_locus);
+ n->bb = dest_bb;
+ n->e = e;
+ n->si = si;
+ n->comp_code = comp_code;
+ n->val = val;
+ n->expr = expr;
+ n->next = NULL;
- /* Otherwise, we don't know. */
- return NULL_TREE;
- }
+ if (last_loc)
+ last_loc->next = n;
+ else
+ asserts_for[SSA_NAME_VERSION (name)] = n;
- gcc_unreachable ();
+ bitmap_set_bit (need_assert_for, SSA_NAME_VERSION (name));
}
+/* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME.
+ Extract a suitable test code and value and store them into *CODE_P and
+ *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P.
+
+ If no extraction was possible, return FALSE, otherwise return TRUE.
-/* Given a value range VR, a value VAL and a comparison code COMP, return
- BOOLEAN_TRUE_NODE if VR COMP VAL always returns true for all the
- values in VR. Return BOOLEAN_FALSE_NODE if the comparison
- always returns false. Return NULL_TREE if it is not always
- possible to determine the value of the comparison. Also set
- *STRICT_OVERFLOW_P to indicate whether comparision evaluation
- assumed signed overflow is undefined. */
+ If INVERT is true, then we invert the result stored into *CODE_P. */
-static tree
-compare_range_with_value (enum tree_code comp, value_range *vr, tree val,
- bool *strict_overflow_p)
+static bool
+extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code,
+ tree cond_op0, tree cond_op1,
+ bool invert, enum tree_code *code_p,
+ tree *val_p)
{
- if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED)
- return NULL_TREE;
+ enum tree_code comp_code;
+ tree val;
- /* Anti-ranges need to be handled separately. */
- if (vr->type == VR_ANTI_RANGE)
+ /* Otherwise, we have a comparison of the form NAME COMP VAL
+ or VAL COMP NAME. */
+ if (name == cond_op1)
{
- /* For anti-ranges, the only predicates that we can compute at
- compile time are equality and inequality. */
- if (comp == GT_EXPR
- || comp == GE_EXPR
- || comp == LT_EXPR
- || comp == LE_EXPR)
- return NULL_TREE;
-
- /* ~[VAL_1, VAL_2] OP VAL is known if VAL_1 <= VAL <= VAL_2. */
- if (value_inside_range (val, vr->min, vr->max) == 1)
- return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node;
-
- return NULL_TREE;
+ /* If the predicate is of the form VAL COMP NAME, flip
+ COMP around because we need to register NAME as the
+ first operand in the predicate. */
+ comp_code = swap_tree_comparison (cond_code);
+ val = cond_op0;
}
-
- if (comp == EQ_EXPR)
+ else if (name == cond_op0)
{
- /* EQ_EXPR may only be computed if VR represents exactly
- one value. */
- if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0)
- {
- int cmp = compare_values_warnv (vr->min, val, strict_overflow_p);
- if (cmp == 0)
- return boolean_true_node;
- else if (cmp == -1 || cmp == 1 || cmp == 2)
- return boolean_false_node;
- }
- else if (compare_values_warnv (val, vr->min, strict_overflow_p) == -1
- || compare_values_warnv (vr->max, val, strict_overflow_p) == -1)
- return boolean_false_node;
-
- return NULL_TREE;
+ /* The comparison is of the form NAME COMP VAL, so the
+ comparison code remains unchanged. */
+ comp_code = cond_code;
+ val = cond_op1;
}
- else if (comp == NE_EXPR)
- {
- /* If VAL is not inside VR, then they are always different. */
- if (compare_values_warnv (vr->max, val, strict_overflow_p) == -1
- || compare_values_warnv (vr->min, val, strict_overflow_p) == 1)
- return boolean_true_node;
+ else
+ gcc_unreachable ();
- /* If VR represents exactly one value equal to VAL, then return
- false. */
- if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0
- && compare_values_warnv (vr->min, val, strict_overflow_p) == 0)
- return boolean_false_node;
+ /* Invert the comparison code as necessary. */
+ if (invert)
+ comp_code = invert_tree_comparison (comp_code, 0);
- /* Otherwise, they may or may not be different. */
- return NULL_TREE;
- }
- else if (comp == LT_EXPR || comp == LE_EXPR)
- {
- int tst;
+ /* VRP only handles integral and pointer types. */
+ if (! INTEGRAL_TYPE_P (TREE_TYPE (val))
+ && ! POINTER_TYPE_P (TREE_TYPE (val)))
+ return false;
- /* If VR is to the left of VAL, return true. */
- tst = compare_values_warnv (vr->max, val, strict_overflow_p);
- if ((comp == LT_EXPR && tst == -1)
- || (comp == LE_EXPR && (tst == -1 || tst == 0)))
- return boolean_true_node;
+ /* Do not register always-false predicates.
+ FIXME: this works around a limitation in fold() when dealing with
+ enumerations. Given 'enum { N1, N2 } x;', fold will not
+ fold 'if (x > N2)' to 'if (0)'. */
+ if ((comp_code == GT_EXPR || comp_code == LT_EXPR)
+ && INTEGRAL_TYPE_P (TREE_TYPE (val)))
+ {
+ tree min = TYPE_MIN_VALUE (TREE_TYPE (val));
+ tree max = TYPE_MAX_VALUE (TREE_TYPE (val));
- /* If VR is to the right of VAL, return false. */
- tst = compare_values_warnv (vr->min, val, strict_overflow_p);
- if ((comp == LT_EXPR && (tst == 0 || tst == 1))
- || (comp == LE_EXPR && tst == 1))
- return boolean_false_node;
+ if (comp_code == GT_EXPR
+ && (!max
+ || compare_values (val, max) == 0))
+ return false;
- /* Otherwise, we don't know. */
- return NULL_TREE;
+ if (comp_code == LT_EXPR
+ && (!min
+ || compare_values (val, min) == 0))
+ return false;
}
- else if (comp == GT_EXPR || comp == GE_EXPR)
- {
- int tst;
+ *code_p = comp_code;
+ *val_p = val;
+ return true;
+}
- /* If VR is to the right of VAL, return true. */
- tst = compare_values_warnv (vr->min, val, strict_overflow_p);
- if ((comp == GT_EXPR && tst == 1)
- || (comp == GE_EXPR && (tst == 0 || tst == 1)))
- return boolean_true_node;
+/* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any
+ (otherwise return VAL). VAL and MASK must be zero-extended for
+ precision PREC. If SGNBIT is non-zero, first xor VAL with SGNBIT
+ (to transform signed values into unsigned) and at the end xor
+ SGNBIT back. */
- /* If VR is to the left of VAL, return false. */
- tst = compare_values_warnv (vr->max, val, strict_overflow_p);
- if ((comp == GT_EXPR && (tst == -1 || tst == 0))
- || (comp == GE_EXPR && tst == -1))
- return boolean_false_node;
+static wide_int
+masked_increment (const wide_int &val_in, const wide_int &mask,
+ const wide_int &sgnbit, unsigned int prec)
+{
+ wide_int bit = wi::one (prec), res;
+ unsigned int i;
- /* Otherwise, we don't know. */
- return NULL_TREE;
+ wide_int val = val_in ^ sgnbit;
+ for (i = 0; i < prec; i++, bit += bit)
+ {
+ res = mask;
+ if ((res & bit) == 0)
+ continue;
+ res = bit - 1;
+ res = wi::bit_and_not (val + bit, res);
+ res &= mask;
+ if (wi::gtu_p (res, val))
+ return res ^ sgnbit;
}
-
- gcc_unreachable ();
+ return val ^ sgnbit;
}
+/* Helper for overflow_comparison_p
-/* Debugging dumps. */
+ OP0 CODE OP1 is a comparison. Examine the comparison and potentially
+ OP1's defining statement to see if it ultimately has the form
+ OP0 CODE (OP0 PLUS INTEGER_CST)
-void dump_value_range (FILE *, const value_range *);
-void debug_value_range (value_range *);
-void dump_all_value_ranges (FILE *);
-void dump_vr_equiv (FILE *, bitmap);
-void debug_vr_equiv (bitmap);
+ If so, return TRUE indicating this is an overflow test and store into
+ *NEW_CST an updated constant that can be used in a narrowed range test.
+ REVERSED indicates if the comparison was originally:
-/* Dump value range VR to FILE. */
+ OP1 CODE' OP0.
-void
-dump_value_range (FILE *file, const value_range *vr)
+ This affects how we build the updated constant. */
+
+static bool
+overflow_comparison_p_1 (enum tree_code code, tree op0, tree op1,
+ bool follow_assert_exprs, bool reversed, tree *new_cst)
{
- if (vr == NULL)
- fprintf (file, "[]");
- else if (vr->type == VR_UNDEFINED)
- fprintf (file, "UNDEFINED");
- else if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
+ /* See if this is a relational operation between two SSA_NAMES with
+ unsigned, overflow wrapping values. If so, check it more deeply. */
+ if ((code == LT_EXPR || code == LE_EXPR
+ || code == GE_EXPR || code == GT_EXPR)
+ && TREE_CODE (op0) == SSA_NAME
+ && TREE_CODE (op1) == SSA_NAME
+ && INTEGRAL_TYPE_P (TREE_TYPE (op0))
+ && TYPE_UNSIGNED (TREE_TYPE (op0))
+ && TYPE_OVERFLOW_WRAPS (TREE_TYPE (op0)))
{
- tree type = TREE_TYPE (vr->min);
-
- fprintf (file, "%s[", (vr->type == VR_ANTI_RANGE) ? "~" : "");
-
- if (INTEGRAL_TYPE_P (type)
- && !TYPE_UNSIGNED (type)
- && vrp_val_is_min (vr->min))
- fprintf (file, "-INF");
- else
- print_generic_expr (file, vr->min);
-
- fprintf (file, ", ");
-
- if (INTEGRAL_TYPE_P (type)
- && vrp_val_is_max (vr->max))
- fprintf (file, "+INF");
- else
- print_generic_expr (file, vr->max);
-
- fprintf (file, "]");
+ gimple *op1_def = SSA_NAME_DEF_STMT (op1);
- if (vr->equiv)
+ /* If requested, follow any ASSERT_EXPRs backwards for OP1. */
+ if (follow_assert_exprs)
{
- bitmap_iterator bi;
- unsigned i, c = 0;
+ while (gimple_assign_single_p (op1_def)
+ && TREE_CODE (gimple_assign_rhs1 (op1_def)) == ASSERT_EXPR)
+ {
+ op1 = TREE_OPERAND (gimple_assign_rhs1 (op1_def), 0);
+ if (TREE_CODE (op1) != SSA_NAME)
+ break;
+ op1_def = SSA_NAME_DEF_STMT (op1);
+ }
+ }
- fprintf (file, " EQUIVALENCES: { ");
+ /* Now look at the defining statement of OP1 to see if it adds
+ or subtracts a nonzero constant from another operand. */
+ if (op1_def
+ && is_gimple_assign (op1_def)
+ && gimple_assign_rhs_code (op1_def) == PLUS_EXPR
+ && TREE_CODE (gimple_assign_rhs2 (op1_def)) == INTEGER_CST
+ && !integer_zerop (gimple_assign_rhs2 (op1_def)))
+ {
+ tree target = gimple_assign_rhs1 (op1_def);
- EXECUTE_IF_SET_IN_BITMAP (vr->equiv, 0, i, bi)
+ /* If requested, follow ASSERT_EXPRs backwards for op0 looking
+ for one where TARGET appears on the RHS. */
+ if (follow_assert_exprs)
{
- print_generic_expr (file, ssa_name (i));
- fprintf (file, " ");
- c++;
+ /* Now see if that "other operand" is op0, following the chain
+ of ASSERT_EXPRs if necessary. */
+ gimple *op0_def = SSA_NAME_DEF_STMT (op0);
+ while (op0 != target
+ && gimple_assign_single_p (op0_def)
+ && TREE_CODE (gimple_assign_rhs1 (op0_def)) == ASSERT_EXPR)
+ {
+ op0 = TREE_OPERAND (gimple_assign_rhs1 (op0_def), 0);
+ if (TREE_CODE (op0) != SSA_NAME)
+ break;
+ op0_def = SSA_NAME_DEF_STMT (op0);
+ }
}
- fprintf (file, "} (%u elements)", c);
+ /* If we did not find our target SSA_NAME, then this is not
+ an overflow test. */
+ if (op0 != target)
+ return false;
+
+ tree type = TREE_TYPE (op0);
+ wide_int max = wi::max_value (TYPE_PRECISION (type), UNSIGNED);
+ tree inc = gimple_assign_rhs2 (op1_def);
+ if (reversed)
+ *new_cst = wide_int_to_tree (type, max + wi::to_wide (inc));
+ else
+ *new_cst = wide_int_to_tree (type, max - wi::to_wide (inc));
+ return true;
}
}
- else if (vr->type == VR_VARYING)
- fprintf (file, "VARYING");
- else
- fprintf (file, "INVALID RANGE");
+ return false;
}
+/* OP0 CODE OP1 is a comparison. Examine the comparison and potentially
+ OP1's defining statement to see if it ultimately has the form
+ OP0 CODE (OP0 PLUS INTEGER_CST)
-/* Dump value range VR to stderr. */
+ If so, return TRUE indicating this is an overflow test and store into
+ *NEW_CST an updated constant that can be used in a narrowed range test.
-DEBUG_FUNCTION void
-debug_value_range (value_range *vr)
+ These statements are left as-is in the IL to facilitate discovery of
+ {ADD,SUB}_OVERFLOW sequences later in the optimizer pipeline. But
+ the alternate range representation is often useful within VRP. */
+
+bool
+overflow_comparison_p (tree_code code, tree name, tree val,
+ bool use_equiv_p, tree *new_cst)
{
- dump_value_range (stderr, vr);
- fprintf (stderr, "\n");
+ if (overflow_comparison_p_1 (code, name, val, use_equiv_p, false, new_cst))
+ return true;
+ return overflow_comparison_p_1 (swap_tree_comparison (code), val, name,
+ use_equiv_p, true, new_cst);
}
-/* Dump value ranges of all SSA_NAMEs to FILE. */
+/* Try to register an edge assertion for SSA name NAME on edge E for
+ the condition COND contributing to the conditional jump pointed to by BSI.
+ Invert the condition COND if INVERT is true. */
-void
-vr_values::dump_all_value_ranges (FILE *file)
+static void
+register_edge_assert_for_2 (tree name, edge e,
+ enum tree_code cond_code,
+ tree cond_op0, tree cond_op1, bool invert,
+ vec<assert_info> &asserts)
{
- size_t i;
+ tree val;
+ enum tree_code comp_code;
+
+ if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
+ cond_op0,
+ cond_op1,
+ invert, &comp_code, &val))
+ return;
- for (i = 0; i < num_vr_values; i++)
+ /* Queue the assert. */
+ tree x;
+ if (overflow_comparison_p (comp_code, name, val, false, &x))
{
- if (vr_value[i])
- {
- print_generic_expr (file, ssa_name (i));
- fprintf (file, ": ");
- dump_value_range (file, vr_value[i]);
- fprintf (file, "\n");
- }
+ enum tree_code new_code = ((comp_code == GT_EXPR || comp_code == GE_EXPR)
+ ? GT_EXPR : LE_EXPR);
+ add_assert_info (asserts, name, name, new_code, x);
}
+ add_assert_info (asserts, name, name, comp_code, val);
- fprintf (file, "\n");
-}
+ /* In the case of NAME <= CST and NAME being defined as
+ NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2
+ and NAME2 <= CST - CST2. We can do the same for NAME > CST.
+ This catches range and anti-range tests. */
+ if ((comp_code == LE_EXPR
+ || comp_code == GT_EXPR)
+ && TREE_CODE (val) == INTEGER_CST
+ && TYPE_UNSIGNED (TREE_TYPE (val)))
+ {
+ gimple *def_stmt = SSA_NAME_DEF_STMT (name);
+ tree cst2 = NULL_TREE, name2 = NULL_TREE, name3 = NULL_TREE;
-/* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
- create a new SSA name N and return the assertion assignment
- 'N = ASSERT_EXPR <V, V OP W>'. */
+ /* Extract CST2 from the (optional) addition. */
+ if (is_gimple_assign (def_stmt)
+ && gimple_assign_rhs_code (def_stmt) == PLUS_EXPR)
+ {
+ name2 = gimple_assign_rhs1 (def_stmt);
+ cst2 = gimple_assign_rhs2 (def_stmt);
+ if (TREE_CODE (name2) == SSA_NAME
+ && TREE_CODE (cst2) == INTEGER_CST)
+ def_stmt = SSA_NAME_DEF_STMT (name2);
+ }
-static gimple *
-build_assert_expr_for (tree cond, tree v)
-{
- tree a;
- gassign *assertion;
+ /* Extract NAME2 from the (optional) sign-changing cast. */
+ if (gimple_assign_cast_p (def_stmt))
+ {
+ if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt))
+ && ! TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
+ && (TYPE_PRECISION (gimple_expr_type (def_stmt))
+ == TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))))
+ name3 = gimple_assign_rhs1 (def_stmt);
+ }
- gcc_assert (TREE_CODE (v) == SSA_NAME
- && COMPARISON_CLASS_P (cond));
+ /* If name3 is used later, create an ASSERT_EXPR for it. */
+ if (name3 != NULL_TREE
+ && TREE_CODE (name3) == SSA_NAME
+ && (cst2 == NULL_TREE
+ || TREE_CODE (cst2) == INTEGER_CST)
+ && INTEGRAL_TYPE_P (TREE_TYPE (name3)))
+ {
+ tree tmp;
- a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond);
- assertion = gimple_build_assign (NULL_TREE, a);
+ /* Build an expression for the range test. */
+ tmp = build1 (NOP_EXPR, TREE_TYPE (name), name3);
+ if (cst2 != NULL_TREE)
+ tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
- /* The new ASSERT_EXPR, creates a new SSA name that replaces the
- operand of the ASSERT_EXPR. Create it so the new name and the old one
- are registered in the replacement table so that we can fix the SSA web
- after adding all the ASSERT_EXPRs. */
- tree new_def = create_new_def_for (v, assertion, NULL);
- /* Make sure we preserve abnormalness throughout an ASSERT_EXPR chain
- given we have to be able to fully propagate those out to re-create
- valid SSA when removing the asserts. */
- if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (v))
- SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_def) = 1;
+ if (dump_file)
+ {
+ fprintf (dump_file, "Adding assert for ");
+ print_generic_expr (dump_file, name3);
+ fprintf (dump_file, " from ");
+ print_generic_expr (dump_file, tmp);
+ fprintf (dump_file, "\n");
+ }
- return assertion;
-}
+ add_assert_info (asserts, name3, tmp, comp_code, val);
+ }
+ /* If name2 is used later, create an ASSERT_EXPR for it. */
+ if (name2 != NULL_TREE
+ && TREE_CODE (name2) == SSA_NAME
+ && TREE_CODE (cst2) == INTEGER_CST
+ && INTEGRAL_TYPE_P (TREE_TYPE (name2)))
+ {
+ tree tmp;
-/* Return false if EXPR is a predicate expression involving floating
- point values. */
+ /* Build an expression for the range test. */
+ tmp = name2;
+ if (TREE_TYPE (name) != TREE_TYPE (name2))
+ tmp = build1 (NOP_EXPR, TREE_TYPE (name), tmp);
+ if (cst2 != NULL_TREE)
+ tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
-static inline bool
-fp_predicate (gimple *stmt)
-{
- GIMPLE_CHECK (stmt, GIMPLE_COND);
+ if (dump_file)
+ {
+ fprintf (dump_file, "Adding assert for ");
+ print_generic_expr (dump_file, name2);
+ fprintf (dump_file, " from ");
+ print_generic_expr (dump_file, tmp);
+ fprintf (dump_file, "\n");
+ }
- return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)));
-}
+ add_assert_info (asserts, name2, tmp, comp_code, val);
+ }
+ }
-/* If the range of values taken by OP can be inferred after STMT executes,
- return the comparison code (COMP_CODE_P) and value (VAL_P) that
- describes the inferred range. Return true if a range could be
- inferred. */
+ /* In the case of post-in/decrement tests like if (i++) ... and uses
+ of the in/decremented value on the edge the extra name we want to
+ assert for is not on the def chain of the name compared. Instead
+ it is in the set of use stmts.
+ Similar cases happen for conversions that were simplified through
+ fold_{sign_changed,widened}_comparison. */
+ if ((comp_code == NE_EXPR
+ || comp_code == EQ_EXPR)
+ && TREE_CODE (val) == INTEGER_CST)
+ {
+ imm_use_iterator ui;
+ gimple *use_stmt;
+ FOR_EACH_IMM_USE_STMT (use_stmt, ui, name)
+ {
+ if (!is_gimple_assign (use_stmt))
+ continue;
-bool
-infer_value_range (gimple *stmt, tree op, tree_code *comp_code_p, tree *val_p)
-{
- *val_p = NULL_TREE;
- *comp_code_p = ERROR_MARK;
+ /* Cut off to use-stmts that are dominating the predecessor. */
+ if (!dominated_by_p (CDI_DOMINATORS, e->src, gimple_bb (use_stmt)))
+ continue;
- /* Do not attempt to infer anything in names that flow through
- abnormal edges. */
- if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op))
- return false;
+ tree name2 = gimple_assign_lhs (use_stmt);
+ if (TREE_CODE (name2) != SSA_NAME)
+ continue;
- /* If STMT is the last statement of a basic block with no normal
- successors, there is no point inferring anything about any of its
- operands. We would not be able to find a proper insertion point
- for the assertion, anyway. */
- if (stmt_ends_bb_p (stmt))
- {
- edge_iterator ei;
- edge e;
+ enum tree_code code = gimple_assign_rhs_code (use_stmt);
+ tree cst;
+ if (code == PLUS_EXPR
+ || code == MINUS_EXPR)
+ {
+ cst = gimple_assign_rhs2 (use_stmt);
+ if (TREE_CODE (cst) != INTEGER_CST)
+ continue;
+ cst = int_const_binop (code, val, cst);
+ }
+ else if (CONVERT_EXPR_CODE_P (code))
+ {
+ /* For truncating conversions we cannot record
+ an inequality. */
+ if (comp_code == NE_EXPR
+ && (TYPE_PRECISION (TREE_TYPE (name2))
+ < TYPE_PRECISION (TREE_TYPE (name))))
+ continue;
+ cst = fold_convert (TREE_TYPE (name2), val);
+ }
+ else
+ continue;
- FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
- if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH)))
- break;
- if (e == NULL)
- return false;
+ if (TREE_OVERFLOW_P (cst))
+ cst = drop_tree_overflow (cst);
+ add_assert_info (asserts, name2, name2, comp_code, cst);
+ }
}
-
- if (infer_nonnull_range (stmt, op))
+
+ if (TREE_CODE_CLASS (comp_code) == tcc_comparison
+ && TREE_CODE (val) == INTEGER_CST)
{
- *val_p = build_int_cst (TREE_TYPE (op), 0);
- *comp_code_p = NE_EXPR;
- return true;
- }
+ gimple *def_stmt = SSA_NAME_DEF_STMT (name);
+ tree name2 = NULL_TREE, names[2], cst2 = NULL_TREE;
+ tree val2 = NULL_TREE;
+ unsigned int prec = TYPE_PRECISION (TREE_TYPE (val));
+ wide_int mask = wi::zero (prec);
+ unsigned int nprec = prec;
+ enum tree_code rhs_code = ERROR_MARK;
- return false;
-}
+ if (is_gimple_assign (def_stmt))
+ rhs_code = gimple_assign_rhs_code (def_stmt);
+ /* In the case of NAME != CST1 where NAME = A +- CST2 we can
+ assert that A != CST1 -+ CST2. */
+ if ((comp_code == EQ_EXPR || comp_code == NE_EXPR)
+ && (rhs_code == PLUS_EXPR || rhs_code == MINUS_EXPR))
+ {
+ tree op0 = gimple_assign_rhs1 (def_stmt);
+ tree op1 = gimple_assign_rhs2 (def_stmt);
+ if (TREE_CODE (op0) == SSA_NAME
+ && TREE_CODE (op1) == INTEGER_CST)
+ {
+ enum tree_code reverse_op = (rhs_code == PLUS_EXPR
+ ? MINUS_EXPR : PLUS_EXPR);
+ op1 = int_const_binop (reverse_op, val, op1);
+ if (TREE_OVERFLOW (op1))
+ op1 = drop_tree_overflow (op1);
+ add_assert_info (asserts, op0, op0, comp_code, op1);
+ }
+ }
-void dump_asserts_for (FILE *, tree);
-void debug_asserts_for (tree);
-void dump_all_asserts (FILE *);
-void debug_all_asserts (void);
+ /* Add asserts for NAME cmp CST and NAME being defined
+ as NAME = (int) NAME2. */
+ if (!TYPE_UNSIGNED (TREE_TYPE (val))
+ && (comp_code == LE_EXPR || comp_code == LT_EXPR
+ || comp_code == GT_EXPR || comp_code == GE_EXPR)
+ && gimple_assign_cast_p (def_stmt))
+ {
+ name2 = gimple_assign_rhs1 (def_stmt);
+ if (CONVERT_EXPR_CODE_P (rhs_code)
+ && INTEGRAL_TYPE_P (TREE_TYPE (name2))
+ && TYPE_UNSIGNED (TREE_TYPE (name2))
+ && prec == TYPE_PRECISION (TREE_TYPE (name2))
+ && (comp_code == LE_EXPR || comp_code == GT_EXPR
+ || !tree_int_cst_equal (val,
+ TYPE_MIN_VALUE (TREE_TYPE (val)))))
+ {
+ tree tmp, cst;
+ enum tree_code new_comp_code = comp_code;
-/* Dump all the registered assertions for NAME to FILE. */
+ cst = fold_convert (TREE_TYPE (name2),
+ TYPE_MIN_VALUE (TREE_TYPE (val)));
+ /* Build an expression for the range test. */
+ tmp = build2 (PLUS_EXPR, TREE_TYPE (name2), name2, cst);
+ cst = fold_build2 (PLUS_EXPR, TREE_TYPE (name2), cst,
+ fold_convert (TREE_TYPE (name2), val));
+ if (comp_code == LT_EXPR || comp_code == GE_EXPR)
+ {
+ new_comp_code = comp_code == LT_EXPR ? LE_EXPR : GT_EXPR;
+ cst = fold_build2 (MINUS_EXPR, TREE_TYPE (name2), cst,
+ build_int_cst (TREE_TYPE (name2), 1));
+ }
-void
-dump_asserts_for (FILE *file, tree name)
-{
- assert_locus *loc;
+ if (dump_file)
+ {
+ fprintf (dump_file, "Adding assert for ");
+ print_generic_expr (dump_file, name2);
+ fprintf (dump_file, " from ");
+ print_generic_expr (dump_file, tmp);
+ fprintf (dump_file, "\n");
+ }
- fprintf (file, "Assertions to be inserted for ");
- print_generic_expr (file, name);
- fprintf (file, "\n");
+ add_assert_info (asserts, name2, tmp, new_comp_code, cst);
+ }
+ }
- loc = asserts_for[SSA_NAME_VERSION (name)];
- while (loc)
- {
- fprintf (file, "\t");
- print_gimple_stmt (file, gsi_stmt (loc->si), 0);
- fprintf (file, "\n\tBB #%d", loc->bb->index);
- if (loc->e)
+ /* Add asserts for NAME cmp CST and NAME being defined as
+ NAME = NAME2 >> CST2.
+
+ Extract CST2 from the right shift. */
+ if (rhs_code == RSHIFT_EXPR)
{
- fprintf (file, "\n\tEDGE %d->%d", loc->e->src->index,
- loc->e->dest->index);
- dump_edge_info (file, loc->e, dump_flags, 0);
+ name2 = gimple_assign_rhs1 (def_stmt);
+ cst2 = gimple_assign_rhs2 (def_stmt);
+ if (TREE_CODE (name2) == SSA_NAME
+ && tree_fits_uhwi_p (cst2)
+ && INTEGRAL_TYPE_P (TREE_TYPE (name2))
+ && IN_RANGE (tree_to_uhwi (cst2), 1, prec - 1)
+ && type_has_mode_precision_p (TREE_TYPE (val)))
+ {
+ mask = wi::mask (tree_to_uhwi (cst2), false, prec);
+ val2 = fold_binary (LSHIFT_EXPR, TREE_TYPE (val), val, cst2);
+ }
}
- fprintf (file, "\n\tPREDICATE: ");
- print_generic_expr (file, loc->expr);
- fprintf (file, " %s ", get_tree_code_name (loc->comp_code));
- print_generic_expr (file, loc->val);
- fprintf (file, "\n\n");
- loc = loc->next;
- }
-
- fprintf (file, "\n");
-}
+ if (val2 != NULL_TREE
+ && TREE_CODE (val2) == INTEGER_CST
+ && simple_cst_equal (fold_build2 (RSHIFT_EXPR,
+ TREE_TYPE (val),
+ val2, cst2), val))
+ {
+ enum tree_code new_comp_code = comp_code;
+ tree tmp, new_val;
+ tmp = name2;
+ if (comp_code == EQ_EXPR || comp_code == NE_EXPR)
+ {
+ if (!TYPE_UNSIGNED (TREE_TYPE (val)))
+ {
+ tree type = build_nonstandard_integer_type (prec, 1);
+ tmp = build1 (NOP_EXPR, type, name2);
+ val2 = fold_convert (type, val2);
+ }
+ tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (tmp), tmp, val2);
+ new_val = wide_int_to_tree (TREE_TYPE (tmp), mask);
+ new_comp_code = comp_code == EQ_EXPR ? LE_EXPR : GT_EXPR;
+ }
+ else if (comp_code == LT_EXPR || comp_code == GE_EXPR)
+ {
+ wide_int minval
+ = wi::min_value (prec, TYPE_SIGN (TREE_TYPE (val)));
+ new_val = val2;
+ if (minval == wi::to_wide (new_val))
+ new_val = NULL_TREE;
+ }
+ else
+ {
+ wide_int maxval
+ = wi::max_value (prec, TYPE_SIGN (TREE_TYPE (val)));
+ mask |= wi::to_wide (val2);
+ if (wi::eq_p (mask, maxval))
+ new_val = NULL_TREE;
+ else
+ new_val = wide_int_to_tree (TREE_TYPE (val2), mask);
+ }
-/* Dump all the registered assertions for NAME to stderr. */
+ if (new_val)
+ {
+ if (dump_file)
+ {
+ fprintf (dump_file, "Adding assert for ");
+ print_generic_expr (dump_file, name2);
+ fprintf (dump_file, " from ");
+ print_generic_expr (dump_file, tmp);
+ fprintf (dump_file, "\n");
+ }
-DEBUG_FUNCTION void
-debug_asserts_for (tree name)
-{
- dump_asserts_for (stderr, name);
-}
-
-
-/* Dump all the registered assertions for all the names to FILE. */
-
-void
-dump_all_asserts (FILE *file)
-{
- unsigned i;
- bitmap_iterator bi;
+ add_assert_info (asserts, name2, tmp, new_comp_code, new_val);
+ }
+ }
- fprintf (file, "\nASSERT_EXPRs to be inserted\n\n");
- EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
- dump_asserts_for (file, ssa_name (i));
- fprintf (file, "\n");
-}
+ /* Add asserts for NAME cmp CST and NAME being defined as
+ NAME = NAME2 & CST2.
+ Extract CST2 from the and.
-/* Dump all the registered assertions for all the names to stderr. */
+ Also handle
+ NAME = (unsigned) NAME2;
+ casts where NAME's type is unsigned and has smaller precision
+ than NAME2's type as if it was NAME = NAME2 & MASK. */
+ names[0] = NULL_TREE;
+ names[1] = NULL_TREE;
+ cst2 = NULL_TREE;
+ if (rhs_code == BIT_AND_EXPR
+ || (CONVERT_EXPR_CODE_P (rhs_code)
+ && INTEGRAL_TYPE_P (TREE_TYPE (val))
+ && TYPE_UNSIGNED (TREE_TYPE (val))
+ && TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
+ > prec))
+ {
+ name2 = gimple_assign_rhs1 (def_stmt);
+ if (rhs_code == BIT_AND_EXPR)
+ cst2 = gimple_assign_rhs2 (def_stmt);
+ else
+ {
+ cst2 = TYPE_MAX_VALUE (TREE_TYPE (val));
+ nprec = TYPE_PRECISION (TREE_TYPE (name2));
+ }
+ if (TREE_CODE (name2) == SSA_NAME
+ && INTEGRAL_TYPE_P (TREE_TYPE (name2))
+ && TREE_CODE (cst2) == INTEGER_CST
+ && !integer_zerop (cst2)
+ && (nprec > 1
+ || TYPE_UNSIGNED (TREE_TYPE (val))))
+ {
+ gimple *def_stmt2 = SSA_NAME_DEF_STMT (name2);
+ if (gimple_assign_cast_p (def_stmt2))
+ {
+ names[1] = gimple_assign_rhs1 (def_stmt2);
+ if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt2))
+ || !INTEGRAL_TYPE_P (TREE_TYPE (names[1]))
+ || (TYPE_PRECISION (TREE_TYPE (name2))
+ != TYPE_PRECISION (TREE_TYPE (names[1]))))
+ names[1] = NULL_TREE;
+ }
+ names[0] = name2;
+ }
+ }
+ if (names[0] || names[1])
+ {
+ wide_int minv, maxv, valv, cst2v;
+ wide_int tem, sgnbit;
+ bool valid_p = false, valn, cst2n;
+ enum tree_code ccode = comp_code;
-DEBUG_FUNCTION void
-debug_all_asserts (void)
-{
- dump_all_asserts (stderr);
-}
+ valv = wide_int::from (wi::to_wide (val), nprec, UNSIGNED);
+ cst2v = wide_int::from (wi::to_wide (cst2), nprec, UNSIGNED);
+ valn = wi::neg_p (valv, TYPE_SIGN (TREE_TYPE (val)));
+ cst2n = wi::neg_p (cst2v, TYPE_SIGN (TREE_TYPE (val)));
+ /* If CST2 doesn't have most significant bit set,
+ but VAL is negative, we have comparison like
+ if ((x & 0x123) > -4) (always true). Just give up. */
+ if (!cst2n && valn)
+ ccode = ERROR_MARK;
+ if (cst2n)
+ sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
+ else
+ sgnbit = wi::zero (nprec);
+ minv = valv & cst2v;
+ switch (ccode)
+ {
+ case EQ_EXPR:
+ /* Minimum unsigned value for equality is VAL & CST2
+ (should be equal to VAL, otherwise we probably should
+ have folded the comparison into false) and
+ maximum unsigned value is VAL | ~CST2. */
+ maxv = valv | ~cst2v;
+ valid_p = true;
+ break;
-/* Push the assert info for NAME, EXPR, COMP_CODE and VAL to ASSERTS. */
+ case NE_EXPR:
+ tem = valv | ~cst2v;
+ /* If VAL is 0, handle (X & CST2) != 0 as (X & CST2) > 0U. */
+ if (valv == 0)
+ {
+ cst2n = false;
+ sgnbit = wi::zero (nprec);
+ goto gt_expr;
+ }
+ /* If (VAL | ~CST2) is all ones, handle it as
+ (X & CST2) < VAL. */
+ if (tem == -1)
+ {
+ cst2n = false;
+ valn = false;
+ sgnbit = wi::zero (nprec);
+ goto lt_expr;
+ }
+ if (!cst2n && wi::neg_p (cst2v))
+ sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
+ if (sgnbit != 0)
+ {
+ if (valv == sgnbit)
+ {
+ cst2n = true;
+ valn = true;
+ goto gt_expr;
+ }
+ if (tem == wi::mask (nprec - 1, false, nprec))
+ {
+ cst2n = true;
+ goto lt_expr;
+ }
+ if (!cst2n)
+ sgnbit = wi::zero (nprec);
+ }
+ break;
-static void
-add_assert_info (vec<assert_info> &asserts,
- tree name, tree expr, enum tree_code comp_code, tree val)
-{
- assert_info info;
- info.comp_code = comp_code;
- info.name = name;
- info.val = val;
- info.expr = expr;
- asserts.safe_push (info);
-}
+ case GE_EXPR:
+ /* Minimum unsigned value for >= if (VAL & CST2) == VAL
+ is VAL and maximum unsigned value is ~0. For signed
+ comparison, if CST2 doesn't have most significant bit
+ set, handle it similarly. If CST2 has MSB set,
+ the minimum is the same, and maximum is ~0U/2. */
+ if (minv != valv)
+ {
+ /* If (VAL & CST2) != VAL, X & CST2 can't be equal to
+ VAL. */
+ minv = masked_increment (valv, cst2v, sgnbit, nprec);
+ if (minv == valv)
+ break;
+ }
+ maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
+ valid_p = true;
+ break;
-/* If NAME doesn't have an ASSERT_EXPR registered for asserting
- 'EXPR COMP_CODE VAL' at a location that dominates block BB or
- E->DEST, then register this location as a possible insertion point
- for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
+ case GT_EXPR:
+ gt_expr:
+ /* Find out smallest MINV where MINV > VAL
+ && (MINV & CST2) == MINV, if any. If VAL is signed and
+ CST2 has MSB set, compute it biased by 1 << (nprec - 1). */
+ minv = masked_increment (valv, cst2v, sgnbit, nprec);
+ if (minv == valv)
+ break;
+ maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
+ valid_p = true;
+ break;
- BB, E and SI provide the exact insertion point for the new
- ASSERT_EXPR. If BB is NULL, then the ASSERT_EXPR is to be inserted
- on edge E. Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
- BB. If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
- must not be NULL. */
+ case LE_EXPR:
+ /* Minimum unsigned value for <= is 0 and maximum
+ unsigned value is VAL | ~CST2 if (VAL & CST2) == VAL.
+ Otherwise, find smallest VAL2 where VAL2 > VAL
+ && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
+ as maximum.
+ For signed comparison, if CST2 doesn't have most
+ significant bit set, handle it similarly. If CST2 has
+ MSB set, the maximum is the same and minimum is INT_MIN. */
+ if (minv == valv)
+ maxv = valv;
+ else
+ {
+ maxv = masked_increment (valv, cst2v, sgnbit, nprec);
+ if (maxv == valv)
+ break;
+ maxv -= 1;
+ }
+ maxv |= ~cst2v;
+ minv = sgnbit;
+ valid_p = true;
+ break;
-static void
-register_new_assert_for (tree name, tree expr,
- enum tree_code comp_code,
- tree val,
- basic_block bb,
- edge e,
- gimple_stmt_iterator si)
-{
- assert_locus *n, *loc, *last_loc;
- basic_block dest_bb;
+ case LT_EXPR:
+ lt_expr:
+ /* Minimum unsigned value for < is 0 and maximum
+ unsigned value is (VAL-1) | ~CST2 if (VAL & CST2) == VAL.
+ Otherwise, find smallest VAL2 where VAL2 > VAL
+ && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
+ as maximum.
+ For signed comparison, if CST2 doesn't have most
+ significant bit set, handle it similarly. If CST2 has
+ MSB set, the maximum is the same and minimum is INT_MIN. */
+ if (minv == valv)
+ {
+ if (valv == sgnbit)
+ break;
+ maxv = valv;
+ }
+ else
+ {
+ maxv = masked_increment (valv, cst2v, sgnbit, nprec);
+ if (maxv == valv)
+ break;
+ }
+ maxv -= 1;
+ maxv |= ~cst2v;
+ minv = sgnbit;
+ valid_p = true;
+ break;
- gcc_checking_assert (bb == NULL || e == NULL);
+ default:
+ break;
+ }
+ if (valid_p
+ && (maxv - minv) != -1)
+ {
+ tree tmp, new_val, type;
+ int i;
- if (e == NULL)
- gcc_checking_assert (gimple_code (gsi_stmt (si)) != GIMPLE_COND
- && gimple_code (gsi_stmt (si)) != GIMPLE_SWITCH);
-
- /* Never build an assert comparing against an integer constant with
- TREE_OVERFLOW set. This confuses our undefined overflow warning
- machinery. */
- if (TREE_OVERFLOW_P (val))
- val = drop_tree_overflow (val);
-
- /* The new assertion A will be inserted at BB or E. We need to
- determine if the new location is dominated by a previously
- registered location for A. If we are doing an edge insertion,
- assume that A will be inserted at E->DEST. Note that this is not
- necessarily true.
-
- If E is a critical edge, it will be split. But even if E is
- split, the new block will dominate the same set of blocks that
- E->DEST dominates.
-
- The reverse, however, is not true, blocks dominated by E->DEST
- will not be dominated by the new block created to split E. So,
- if the insertion location is on a critical edge, we will not use
- the new location to move another assertion previously registered
- at a block dominated by E->DEST. */
- dest_bb = (bb) ? bb : e->dest;
+ for (i = 0; i < 2; i++)
+ if (names[i])
+ {
+ wide_int maxv2 = maxv;
+ tmp = names[i];
+ type = TREE_TYPE (names[i]);
+ if (!TYPE_UNSIGNED (type))
+ {
+ type = build_nonstandard_integer_type (nprec, 1);
+ tmp = build1 (NOP_EXPR, type, names[i]);
+ }
+ if (minv != 0)
+ {
+ tmp = build2 (PLUS_EXPR, type, tmp,
+ wide_int_to_tree (type, -minv));
+ maxv2 = maxv - minv;
+ }
+ new_val = wide_int_to_tree (type, maxv2);
- /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and
- VAL at a block dominating DEST_BB, then we don't need to insert a new
- one. Similarly, if the same assertion already exists at a block
- dominated by DEST_BB and the new location is not on a critical
- edge, then update the existing location for the assertion (i.e.,
- move the assertion up in the dominance tree).
+ if (dump_file)
+ {
+ fprintf (dump_file, "Adding assert for ");
+ print_generic_expr (dump_file, names[i]);
+ fprintf (dump_file, " from ");
+ print_generic_expr (dump_file, tmp);
+ fprintf (dump_file, "\n");
+ }
- Note, this is implemented as a simple linked list because there
- should not be more than a handful of assertions registered per
- name. If this becomes a performance problem, a table hashed by
- COMP_CODE and VAL could be implemented. */
- loc = asserts_for[SSA_NAME_VERSION (name)];
- last_loc = loc;
- while (loc)
- {
- if (loc->comp_code == comp_code
- && (loc->val == val
- || operand_equal_p (loc->val, val, 0))
- && (loc->expr == expr
- || operand_equal_p (loc->expr, expr, 0)))
- {
- /* If E is not a critical edge and DEST_BB
- dominates the existing location for the assertion, move
- the assertion up in the dominance tree by updating its
- location information. */
- if ((e == NULL || !EDGE_CRITICAL_P (e))
- && dominated_by_p (CDI_DOMINATORS, loc->bb, dest_bb))
- {
- loc->bb = dest_bb;
- loc->e = e;
- loc->si = si;
- return;
+ add_assert_info (asserts, names[i], tmp, LE_EXPR, new_val);
+ }
}
}
-
- /* Update the last node of the list and move to the next one. */
- last_loc = loc;
- loc = loc->next;
}
+}
- /* If we didn't find an assertion already registered for
- NAME COMP_CODE VAL, add a new one at the end of the list of
- assertions associated with NAME. */
- n = XNEW (struct assert_locus);
- n->bb = dest_bb;
- n->e = e;
- n->si = si;
- n->comp_code = comp_code;
- n->val = val;
- n->expr = expr;
- n->next = NULL;
+/* OP is an operand of a truth value expression which is known to have
+ a particular value. Register any asserts for OP and for any
+ operands in OP's defining statement.
- if (last_loc)
- last_loc->next = n;
- else
- asserts_for[SSA_NAME_VERSION (name)] = n;
+ If CODE is EQ_EXPR, then we want to register OP is zero (false),
+ if CODE is NE_EXPR, then we want to register OP is nonzero (true). */
- bitmap_set_bit (need_assert_for, SSA_NAME_VERSION (name));
-}
+static void
+register_edge_assert_for_1 (tree op, enum tree_code code,
+ edge e, vec<assert_info> &asserts)
+{
+ gimple *op_def;
+ tree val;
+ enum tree_code rhs_code;
-/* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME.
- Extract a suitable test code and value and store them into *CODE_P and
- *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P.
+ /* We only care about SSA_NAMEs. */
+ if (TREE_CODE (op) != SSA_NAME)
+ return;
- If no extraction was possible, return FALSE, otherwise return TRUE.
+ /* We know that OP will have a zero or nonzero value. */
+ val = build_int_cst (TREE_TYPE (op), 0);
+ add_assert_info (asserts, op, op, code, val);
- If INVERT is true, then we invert the result stored into *CODE_P. */
+ /* Now look at how OP is set. If it's set from a comparison,
+ a truth operation or some bit operations, then we may be able
+ to register information about the operands of that assignment. */
+ op_def = SSA_NAME_DEF_STMT (op);
+ if (gimple_code (op_def) != GIMPLE_ASSIGN)
+ return;
-static bool
-extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code,
- tree cond_op0, tree cond_op1,
- bool invert, enum tree_code *code_p,
- tree *val_p)
-{
- enum tree_code comp_code;
- tree val;
+ rhs_code = gimple_assign_rhs_code (op_def);
- /* Otherwise, we have a comparison of the form NAME COMP VAL
- or VAL COMP NAME. */
- if (name == cond_op1)
+ if (TREE_CODE_CLASS (rhs_code) == tcc_comparison)
{
- /* If the predicate is of the form VAL COMP NAME, flip
- COMP around because we need to register NAME as the
- first operand in the predicate. */
- comp_code = swap_tree_comparison (cond_code);
- val = cond_op0;
+ bool invert = (code == EQ_EXPR ? true : false);
+ tree op0 = gimple_assign_rhs1 (op_def);
+ tree op1 = gimple_assign_rhs2 (op_def);
+
+ if (TREE_CODE (op0) == SSA_NAME)
+ register_edge_assert_for_2 (op0, e, rhs_code, op0, op1, invert, asserts);
+ if (TREE_CODE (op1) == SSA_NAME)
+ register_edge_assert_for_2 (op1, e, rhs_code, op0, op1, invert, asserts);
}
- else if (name == cond_op0)
+ else if ((code == NE_EXPR
+ && gimple_assign_rhs_code (op_def) == BIT_AND_EXPR)
+ || (code == EQ_EXPR
+ && gimple_assign_rhs_code (op_def) == BIT_IOR_EXPR))
{
- /* The comparison is of the form NAME COMP VAL, so the
- comparison code remains unchanged. */
- comp_code = cond_code;
- val = cond_op1;
+ /* Recurse on each operand. */
+ tree op0 = gimple_assign_rhs1 (op_def);
+ tree op1 = gimple_assign_rhs2 (op_def);
+ if (TREE_CODE (op0) == SSA_NAME
+ && has_single_use (op0))
+ register_edge_assert_for_1 (op0, code, e, asserts);
+ if (TREE_CODE (op1) == SSA_NAME
+ && has_single_use (op1))
+ register_edge_assert_for_1 (op1, code, e, asserts);
}
- else
- gcc_unreachable ();
-
- /* Invert the comparison code as necessary. */
- if (invert)
- comp_code = invert_tree_comparison (comp_code, 0);
-
- /* VRP only handles integral and pointer types. */
- if (! INTEGRAL_TYPE_P (TREE_TYPE (val))
- && ! POINTER_TYPE_P (TREE_TYPE (val)))
- return false;
-
- /* Do not register always-false predicates.
- FIXME: this works around a limitation in fold() when dealing with
- enumerations. Given 'enum { N1, N2 } x;', fold will not
- fold 'if (x > N2)' to 'if (0)'. */
- if ((comp_code == GT_EXPR || comp_code == LT_EXPR)
- && INTEGRAL_TYPE_P (TREE_TYPE (val)))
+ else if (gimple_assign_rhs_code (op_def) == BIT_NOT_EXPR
+ && TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (op_def))) == 1)
{
- tree min = TYPE_MIN_VALUE (TREE_TYPE (val));
- tree max = TYPE_MAX_VALUE (TREE_TYPE (val));
-
- if (comp_code == GT_EXPR
- && (!max
- || compare_values (val, max) == 0))
- return false;
-
- if (comp_code == LT_EXPR
- && (!min
- || compare_values (val, min) == 0))
- return false;
+ /* Recurse, flipping CODE. */
+ code = invert_tree_comparison (code, false);
+ register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, asserts);
+ }
+ else if (gimple_assign_rhs_code (op_def) == SSA_NAME)
+ {
+ /* Recurse through the copy. */
+ register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, asserts);
+ }
+ else if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (op_def)))
+ {
+ /* Recurse through the type conversion, unless it is a narrowing
+ conversion or conversion from non-integral type. */
+ tree rhs = gimple_assign_rhs1 (op_def);
+ if (INTEGRAL_TYPE_P (TREE_TYPE (rhs))
+ && (TYPE_PRECISION (TREE_TYPE (rhs))
+ <= TYPE_PRECISION (TREE_TYPE (op))))
+ register_edge_assert_for_1 (rhs, code, e, asserts);
}
- *code_p = comp_code;
- *val_p = val;
- return true;
}
-/* Find out smallest RES where RES > VAL && (RES & MASK) == RES, if any
- (otherwise return VAL). VAL and MASK must be zero-extended for
- precision PREC. If SGNBIT is non-zero, first xor VAL with SGNBIT
- (to transform signed values into unsigned) and at the end xor
- SGNBIT back. */
+/* Check if comparison
+ NAME COND_OP INTEGER_CST
+ has a form of
+ (X & 11...100..0) COND_OP XX...X00...0
+ Such comparison can yield assertions like
+ X >= XX...X00...0
+ X <= XX...X11...1
+ in case of COND_OP being NE_EXPR or
+ X < XX...X00...0
+ X > XX...X11...1
+ in case of EQ_EXPR. */
-static wide_int
-masked_increment (const wide_int &val_in, const wide_int &mask,
- const wide_int &sgnbit, unsigned int prec)
+static bool
+is_masked_range_test (tree name, tree valt, enum tree_code cond_code,
+ tree *new_name, tree *low, enum tree_code *low_code,
+ tree *high, enum tree_code *high_code)
{
- wide_int bit = wi::one (prec), res;
- unsigned int i;
-
- wide_int val = val_in ^ sgnbit;
- for (i = 0; i < prec; i++, bit += bit)
- {
- res = mask;
- if ((res & bit) == 0)
- continue;
- res = bit - 1;
- res = wi::bit_and_not (val + bit, res);
- res &= mask;
- if (wi::gtu_p (res, val))
- return res ^ sgnbit;
- }
- return val ^ sgnbit;
-}
+ gimple *def_stmt = SSA_NAME_DEF_STMT (name);
-/* Helper for overflow_comparison_p
+ if (!is_gimple_assign (def_stmt)
+ || gimple_assign_rhs_code (def_stmt) != BIT_AND_EXPR)
+ return false;
- OP0 CODE OP1 is a comparison. Examine the comparison and potentially
- OP1's defining statement to see if it ultimately has the form
- OP0 CODE (OP0 PLUS INTEGER_CST)
+ tree t = gimple_assign_rhs1 (def_stmt);
+ tree maskt = gimple_assign_rhs2 (def_stmt);
+ if (TREE_CODE (t) != SSA_NAME || TREE_CODE (maskt) != INTEGER_CST)
+ return false;
- If so, return TRUE indicating this is an overflow test and store into
- *NEW_CST an updated constant that can be used in a narrowed range test.
+ wi::tree_to_wide_ref mask = wi::to_wide (maskt);
+ wide_int inv_mask = ~mask;
+ /* Assume VALT is INTEGER_CST. */
+ wi::tree_to_wide_ref val = wi::to_wide (valt);
- REVERSED indicates if the comparison was originally:
+ if ((inv_mask & (inv_mask + 1)) != 0
+ || (val & mask) != val)
+ return false;
- OP1 CODE' OP0.
+ bool is_range = cond_code == EQ_EXPR;
- This affects how we build the updated constant. */
+ tree type = TREE_TYPE (t);
+ wide_int min = wi::min_value (type),
+ max = wi::max_value (type);
-static bool
-overflow_comparison_p_1 (enum tree_code code, tree op0, tree op1,
- bool follow_assert_exprs, bool reversed, tree *new_cst)
-{
- /* See if this is a relational operation between two SSA_NAMES with
- unsigned, overflow wrapping values. If so, check it more deeply. */
- if ((code == LT_EXPR || code == LE_EXPR
- || code == GE_EXPR || code == GT_EXPR)
- && TREE_CODE (op0) == SSA_NAME
- && TREE_CODE (op1) == SSA_NAME
- && INTEGRAL_TYPE_P (TREE_TYPE (op0))
- && TYPE_UNSIGNED (TREE_TYPE (op0))
- && TYPE_OVERFLOW_WRAPS (TREE_TYPE (op0)))
+ if (is_range)
{
- gimple *op1_def = SSA_NAME_DEF_STMT (op1);
-
- /* If requested, follow any ASSERT_EXPRs backwards for OP1. */
- if (follow_assert_exprs)
+ *low_code = val == min ? ERROR_MARK : GE_EXPR;
+ *high_code = val == max ? ERROR_MARK : LE_EXPR;
+ }
+ else
+ {
+ /* We can still generate assertion if one of alternatives
+ is known to always be false. */
+ if (val == min)
{
- while (gimple_assign_single_p (op1_def)
- && TREE_CODE (gimple_assign_rhs1 (op1_def)) == ASSERT_EXPR)
- {
- op1 = TREE_OPERAND (gimple_assign_rhs1 (op1_def), 0);
- if (TREE_CODE (op1) != SSA_NAME)
- break;
- op1_def = SSA_NAME_DEF_STMT (op1);
- }
+ *low_code = (enum tree_code) 0;
+ *high_code = GT_EXPR;
}
-
- /* Now look at the defining statement of OP1 to see if it adds
- or subtracts a nonzero constant from another operand. */
- if (op1_def
- && is_gimple_assign (op1_def)
- && gimple_assign_rhs_code (op1_def) == PLUS_EXPR
- && TREE_CODE (gimple_assign_rhs2 (op1_def)) == INTEGER_CST
- && !integer_zerop (gimple_assign_rhs2 (op1_def)))
+ else if ((val | inv_mask) == max)
{
- tree target = gimple_assign_rhs1 (op1_def);
-
- /* If requested, follow ASSERT_EXPRs backwards for op0 looking
- for one where TARGET appears on the RHS. */
- if (follow_assert_exprs)
- {
- /* Now see if that "other operand" is op0, following the chain
- of ASSERT_EXPRs if necessary. */
- gimple *op0_def = SSA_NAME_DEF_STMT (op0);
- while (op0 != target
- && gimple_assign_single_p (op0_def)
- && TREE_CODE (gimple_assign_rhs1 (op0_def)) == ASSERT_EXPR)
- {
- op0 = TREE_OPERAND (gimple_assign_rhs1 (op0_def), 0);
- if (TREE_CODE (op0) != SSA_NAME)
- break;
- op0_def = SSA_NAME_DEF_STMT (op0);
- }
- }
-
- /* If we did not find our target SSA_NAME, then this is not
- an overflow test. */
- if (op0 != target)
- return false;
-
- tree type = TREE_TYPE (op0);
- wide_int max = wi::max_value (TYPE_PRECISION (type), UNSIGNED);
- tree inc = gimple_assign_rhs2 (op1_def);
- if (reversed)
- *new_cst = wide_int_to_tree (type, max + wi::to_wide (inc));
- else
- *new_cst = wide_int_to_tree (type, max - wi::to_wide (inc));
- return true;
+ *low_code = LT_EXPR;
+ *high_code = (enum tree_code) 0;
}
+ else
+ return false;
}
- return false;
-}
-
-/* OP0 CODE OP1 is a comparison. Examine the comparison and potentially
- OP1's defining statement to see if it ultimately has the form
- OP0 CODE (OP0 PLUS INTEGER_CST)
- If so, return TRUE indicating this is an overflow test and store into
- *NEW_CST an updated constant that can be used in a narrowed range test.
+ *new_name = t;
+ *low = wide_int_to_tree (type, val);
+ *high = wide_int_to_tree (type, val | inv_mask);
- These statements are left as-is in the IL to facilitate discovery of
- {ADD,SUB}_OVERFLOW sequences later in the optimizer pipeline. But
- the alternate range representation is often useful within VRP. */
+ if (wi::neg_p (val, TYPE_SIGN (type)))
+ std::swap (*low, *high);
-static bool
-overflow_comparison_p (tree_code code, tree name, tree val,
- bool use_equiv_p, tree *new_cst)
-{
- if (overflow_comparison_p_1 (code, name, val, use_equiv_p, false, new_cst))
- return true;
- return overflow_comparison_p_1 (swap_tree_comparison (code), val, name,
- use_equiv_p, true, new_cst);
+ return true;
}
-
/* Try to register an edge assertion for SSA name NAME on edge E for
- the condition COND contributing to the conditional jump pointed to by BSI.
- Invert the condition COND if INVERT is true. */
+ the condition COND contributing to the conditional jump pointed to by
+ SI. */
-static void
-register_edge_assert_for_2 (tree name, edge e,
- enum tree_code cond_code,
- tree cond_op0, tree cond_op1, bool invert,
- vec<assert_info> &asserts)
+void
+register_edge_assert_for (tree name, edge e,
+ enum tree_code cond_code, tree cond_op0,
+ tree cond_op1, vec<assert_info> &asserts)
{
tree val;
enum tree_code comp_code;
+ bool is_else_edge = (e->flags & EDGE_FALSE_VALUE) != 0;
+
+ /* Do not attempt to infer anything in names that flow through
+ abnormal edges. */
+ if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
+ return;
if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
- cond_op0,
- cond_op1,
- invert, &comp_code, &val))
+ cond_op0, cond_op1,
+ is_else_edge,
+ &comp_code, &val))
return;
- /* Queue the assert. */
- tree x;
- if (overflow_comparison_p (comp_code, name, val, false, &x))
- {
- enum tree_code new_code = ((comp_code == GT_EXPR || comp_code == GE_EXPR)
- ? GT_EXPR : LE_EXPR);
- add_assert_info (asserts, name, name, new_code, x);
- }
- add_assert_info (asserts, name, name, comp_code, val);
+ /* Register ASSERT_EXPRs for name. */
+ register_edge_assert_for_2 (name, e, cond_code, cond_op0,
+ cond_op1, is_else_edge, asserts);
- /* In the case of NAME <= CST and NAME being defined as
- NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2
- and NAME2 <= CST - CST2. We can do the same for NAME > CST.
- This catches range and anti-range tests. */
- if ((comp_code == LE_EXPR
- || comp_code == GT_EXPR)
- && TREE_CODE (val) == INTEGER_CST
- && TYPE_UNSIGNED (TREE_TYPE (val)))
+
+ /* If COND is effectively an equality test of an SSA_NAME against
+ the value zero or one, then we may be able to assert values
+ for SSA_NAMEs which flow into COND. */
+
+ /* In the case of NAME == 1 or NAME != 0, for BIT_AND_EXPR defining
+ statement of NAME we can assert both operands of the BIT_AND_EXPR
+ have nonzero value. */
+ if (((comp_code == EQ_EXPR && integer_onep (val))
+ || (comp_code == NE_EXPR && integer_zerop (val))))
{
gimple *def_stmt = SSA_NAME_DEF_STMT (name);
- tree cst2 = NULL_TREE, name2 = NULL_TREE, name3 = NULL_TREE;
- /* Extract CST2 from the (optional) addition. */
if (is_gimple_assign (def_stmt)
- && gimple_assign_rhs_code (def_stmt) == PLUS_EXPR)
+ && gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR)
{
- name2 = gimple_assign_rhs1 (def_stmt);
- cst2 = gimple_assign_rhs2 (def_stmt);
- if (TREE_CODE (name2) == SSA_NAME
- && TREE_CODE (cst2) == INTEGER_CST)
- def_stmt = SSA_NAME_DEF_STMT (name2);
+ tree op0 = gimple_assign_rhs1 (def_stmt);
+ tree op1 = gimple_assign_rhs2 (def_stmt);
+ register_edge_assert_for_1 (op0, NE_EXPR, e, asserts);
+ register_edge_assert_for_1 (op1, NE_EXPR, e, asserts);
}
+ }
- /* Extract NAME2 from the (optional) sign-changing cast. */
- if (gimple_assign_cast_p (def_stmt))
+ /* In the case of NAME == 0 or NAME != 1, for BIT_IOR_EXPR defining
+ statement of NAME we can assert both operands of the BIT_IOR_EXPR
+ have zero value. */
+ if (((comp_code == EQ_EXPR && integer_zerop (val))
+ || (comp_code == NE_EXPR && integer_onep (val))))
+ {
+ gimple *def_stmt = SSA_NAME_DEF_STMT (name);
+
+ /* For BIT_IOR_EXPR only if NAME == 0 both operands have
+ necessarily zero value, or if type-precision is one. */
+ if (is_gimple_assign (def_stmt)
+ && (gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR
+ && (TYPE_PRECISION (TREE_TYPE (name)) == 1
+ || comp_code == EQ_EXPR)))
{
- if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt))
- && ! TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
- && (TYPE_PRECISION (gimple_expr_type (def_stmt))
- == TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))))
- name3 = gimple_assign_rhs1 (def_stmt);
+ tree op0 = gimple_assign_rhs1 (def_stmt);
+ tree op1 = gimple_assign_rhs2 (def_stmt);
+ register_edge_assert_for_1 (op0, EQ_EXPR, e, asserts);
+ register_edge_assert_for_1 (op1, EQ_EXPR, e, asserts);
}
+ }
- /* If name3 is used later, create an ASSERT_EXPR for it. */
- if (name3 != NULL_TREE
- && TREE_CODE (name3) == SSA_NAME
- && (cst2 == NULL_TREE
- || TREE_CODE (cst2) == INTEGER_CST)
- && INTEGRAL_TYPE_P (TREE_TYPE (name3)))
- {
- tree tmp;
+ /* Sometimes we can infer ranges from (NAME & MASK) == VALUE. */
+ if ((comp_code == EQ_EXPR || comp_code == NE_EXPR)
+ && TREE_CODE (val) == INTEGER_CST)
+ {
+ enum tree_code low_code, high_code;
+ tree low, high;
+ if (is_masked_range_test (name, val, comp_code, &name, &low,
+ &low_code, &high, &high_code))
+ {
+ if (low_code != ERROR_MARK)
+ register_edge_assert_for_2 (name, e, low_code, name,
+ low, /*invert*/false, asserts);
+ if (high_code != ERROR_MARK)
+ register_edge_assert_for_2 (name, e, high_code, name,
+ high, /*invert*/false, asserts);
+ }
+ }
+}
- /* Build an expression for the range test. */
- tmp = build1 (NOP_EXPR, TREE_TYPE (name), name3);
- if (cst2 != NULL_TREE)
- tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
+/* Finish found ASSERTS for E and register them at GSI. */
- if (dump_file)
- {
- fprintf (dump_file, "Adding assert for ");
- print_generic_expr (dump_file, name3);
- fprintf (dump_file, " from ");
- print_generic_expr (dump_file, tmp);
- fprintf (dump_file, "\n");
- }
+static void
+finish_register_edge_assert_for (edge e, gimple_stmt_iterator gsi,
+ vec<assert_info> &asserts)
+{
+ for (unsigned i = 0; i < asserts.length (); ++i)
+ /* Only register an ASSERT_EXPR if NAME was found in the sub-graph
+ reachable from E. */
+ if (live_on_edge (e, asserts[i].name))
+ register_new_assert_for (asserts[i].name, asserts[i].expr,
+ asserts[i].comp_code, asserts[i].val,
+ NULL, e, gsi);
+}
- add_assert_info (asserts, name3, tmp, comp_code, val);
- }
- /* If name2 is used later, create an ASSERT_EXPR for it. */
- if (name2 != NULL_TREE
- && TREE_CODE (name2) == SSA_NAME
- && TREE_CODE (cst2) == INTEGER_CST
- && INTEGRAL_TYPE_P (TREE_TYPE (name2)))
- {
- tree tmp;
- /* Build an expression for the range test. */
- tmp = name2;
- if (TREE_TYPE (name) != TREE_TYPE (name2))
- tmp = build1 (NOP_EXPR, TREE_TYPE (name), tmp);
- if (cst2 != NULL_TREE)
- tmp = build2 (PLUS_EXPR, TREE_TYPE (name), tmp, cst2);
+/* Determine whether the outgoing edges of BB should receive an
+ ASSERT_EXPR for each of the operands of BB's LAST statement.
+ The last statement of BB must be a COND_EXPR.
- if (dump_file)
- {
- fprintf (dump_file, "Adding assert for ");
- print_generic_expr (dump_file, name2);
- fprintf (dump_file, " from ");
- print_generic_expr (dump_file, tmp);
- fprintf (dump_file, "\n");
- }
+ If any of the sub-graphs rooted at BB have an interesting use of
+ the predicate operands, an assert location node is added to the
+ list of assertions for the corresponding operands. */
- add_assert_info (asserts, name2, tmp, comp_code, val);
- }
+static void
+find_conditional_asserts (basic_block bb, gcond *last)
+{
+ gimple_stmt_iterator bsi;
+ tree op;
+ edge_iterator ei;
+ edge e;
+ ssa_op_iter iter;
+
+ bsi = gsi_for_stmt (last);
+
+ /* Look for uses of the operands in each of the sub-graphs
+ rooted at BB. We need to check each of the outgoing edges
+ separately, so that we know what kind of ASSERT_EXPR to
+ insert. */
+ FOR_EACH_EDGE (e, ei, bb->succs)
+ {
+ if (e->dest == bb)
+ continue;
+
+ /* Register the necessary assertions for each operand in the
+ conditional predicate. */
+ auto_vec<assert_info, 8> asserts;
+ FOR_EACH_SSA_TREE_OPERAND (op, last, iter, SSA_OP_USE)
+ register_edge_assert_for (op, e,
+ gimple_cond_code (last),
+ gimple_cond_lhs (last),
+ gimple_cond_rhs (last), asserts);
+ finish_register_edge_assert_for (e, bsi, asserts);
}
+}
- /* In the case of post-in/decrement tests like if (i++) ... and uses
- of the in/decremented value on the edge the extra name we want to
- assert for is not on the def chain of the name compared. Instead
- it is in the set of use stmts.
- Similar cases happen for conversions that were simplified through
- fold_{sign_changed,widened}_comparison. */
- if ((comp_code == NE_EXPR
- || comp_code == EQ_EXPR)
- && TREE_CODE (val) == INTEGER_CST)
+struct case_info
+{
+ tree expr;
+ basic_block bb;
+};
+
+/* Compare two case labels sorting first by the destination bb index
+ and then by the case value. */
+
+static int
+compare_case_labels (const void *p1, const void *p2)
+{
+ const struct case_info *ci1 = (const struct case_info *) p1;
+ const struct case_info *ci2 = (const struct case_info *) p2;
+ int idx1 = ci1->bb->index;
+ int idx2 = ci2->bb->index;
+
+ if (idx1 < idx2)
+ return -1;
+ else if (idx1 == idx2)
{
- imm_use_iterator ui;
- gimple *use_stmt;
- FOR_EACH_IMM_USE_STMT (use_stmt, ui, name)
- {
- if (!is_gimple_assign (use_stmt))
- continue;
+ /* Make sure the default label is first in a group. */
+ if (!CASE_LOW (ci1->expr))
+ return -1;
+ else if (!CASE_LOW (ci2->expr))
+ return 1;
+ else
+ return tree_int_cst_compare (CASE_LOW (ci1->expr),
+ CASE_LOW (ci2->expr));
+ }
+ else
+ return 1;
+}
- /* Cut off to use-stmts that are dominating the predecessor. */
- if (!dominated_by_p (CDI_DOMINATORS, e->src, gimple_bb (use_stmt)))
- continue;
+/* Determine whether the outgoing edges of BB should receive an
+ ASSERT_EXPR for each of the operands of BB's LAST statement.
+ The last statement of BB must be a SWITCH_EXPR.
- tree name2 = gimple_assign_lhs (use_stmt);
- if (TREE_CODE (name2) != SSA_NAME)
- continue;
+ If any of the sub-graphs rooted at BB have an interesting use of
+ the predicate operands, an assert location node is added to the
+ list of assertions for the corresponding operands. */
- enum tree_code code = gimple_assign_rhs_code (use_stmt);
- tree cst;
- if (code == PLUS_EXPR
- || code == MINUS_EXPR)
- {
- cst = gimple_assign_rhs2 (use_stmt);
- if (TREE_CODE (cst) != INTEGER_CST)
- continue;
- cst = int_const_binop (code, val, cst);
- }
- else if (CONVERT_EXPR_CODE_P (code))
- {
- /* For truncating conversions we cannot record
- an inequality. */
- if (comp_code == NE_EXPR
- && (TYPE_PRECISION (TREE_TYPE (name2))
- < TYPE_PRECISION (TREE_TYPE (name))))
- continue;
- cst = fold_convert (TREE_TYPE (name2), val);
- }
- else
- continue;
+static void
+find_switch_asserts (basic_block bb, gswitch *last)
+{
+ gimple_stmt_iterator bsi;
+ tree op;
+ edge e;
+ struct case_info *ci;
+ size_t n = gimple_switch_num_labels (last);
+#if GCC_VERSION >= 4000
+ unsigned int idx;
+#else
+ /* Work around GCC 3.4 bug (PR 37086). */
+ volatile unsigned int idx;
+#endif
- if (TREE_OVERFLOW_P (cst))
- cst = drop_tree_overflow (cst);
- add_assert_info (asserts, name2, name2, comp_code, cst);
- }
+ bsi = gsi_for_stmt (last);
+ op = gimple_switch_index (last);
+ if (TREE_CODE (op) != SSA_NAME)
+ return;
+
+ /* Build a vector of case labels sorted by destination label. */
+ ci = XNEWVEC (struct case_info, n);
+ for (idx = 0; idx < n; ++idx)
+ {
+ ci[idx].expr = gimple_switch_label (last, idx);
+ ci[idx].bb = label_to_block (CASE_LABEL (ci[idx].expr));
}
-
- if (TREE_CODE_CLASS (comp_code) == tcc_comparison
- && TREE_CODE (val) == INTEGER_CST)
+ edge default_edge = find_edge (bb, ci[0].bb);
+ qsort (ci, n, sizeof (struct case_info), compare_case_labels);
+
+ for (idx = 0; idx < n; ++idx)
{
- gimple *def_stmt = SSA_NAME_DEF_STMT (name);
- tree name2 = NULL_TREE, names[2], cst2 = NULL_TREE;
- tree val2 = NULL_TREE;
- unsigned int prec = TYPE_PRECISION (TREE_TYPE (val));
- wide_int mask = wi::zero (prec);
- unsigned int nprec = prec;
- enum tree_code rhs_code = ERROR_MARK;
+ tree min, max;
+ tree cl = ci[idx].expr;
+ basic_block cbb = ci[idx].bb;
- if (is_gimple_assign (def_stmt))
- rhs_code = gimple_assign_rhs_code (def_stmt);
+ min = CASE_LOW (cl);
+ max = CASE_HIGH (cl);
- /* In the case of NAME != CST1 where NAME = A +- CST2 we can
- assert that A != CST1 -+ CST2. */
- if ((comp_code == EQ_EXPR || comp_code == NE_EXPR)
- && (rhs_code == PLUS_EXPR || rhs_code == MINUS_EXPR))
+ /* If there are multiple case labels with the same destination
+ we need to combine them to a single value range for the edge. */
+ if (idx + 1 < n && cbb == ci[idx + 1].bb)
{
- tree op0 = gimple_assign_rhs1 (def_stmt);
- tree op1 = gimple_assign_rhs2 (def_stmt);
- if (TREE_CODE (op0) == SSA_NAME
- && TREE_CODE (op1) == INTEGER_CST)
- {
- enum tree_code reverse_op = (rhs_code == PLUS_EXPR
- ? MINUS_EXPR : PLUS_EXPR);
- op1 = int_const_binop (reverse_op, val, op1);
- if (TREE_OVERFLOW (op1))
- op1 = drop_tree_overflow (op1);
- add_assert_info (asserts, op0, op0, comp_code, op1);
- }
- }
-
- /* Add asserts for NAME cmp CST and NAME being defined
- as NAME = (int) NAME2. */
- if (!TYPE_UNSIGNED (TREE_TYPE (val))
- && (comp_code == LE_EXPR || comp_code == LT_EXPR
- || comp_code == GT_EXPR || comp_code == GE_EXPR)
- && gimple_assign_cast_p (def_stmt))
- {
- name2 = gimple_assign_rhs1 (def_stmt);
- if (CONVERT_EXPR_CODE_P (rhs_code)
- && INTEGRAL_TYPE_P (TREE_TYPE (name2))
- && TYPE_UNSIGNED (TREE_TYPE (name2))
- && prec == TYPE_PRECISION (TREE_TYPE (name2))
- && (comp_code == LE_EXPR || comp_code == GT_EXPR
- || !tree_int_cst_equal (val,
- TYPE_MIN_VALUE (TREE_TYPE (val)))))
- {
- tree tmp, cst;
- enum tree_code new_comp_code = comp_code;
-
- cst = fold_convert (TREE_TYPE (name2),
- TYPE_MIN_VALUE (TREE_TYPE (val)));
- /* Build an expression for the range test. */
- tmp = build2 (PLUS_EXPR, TREE_TYPE (name2), name2, cst);
- cst = fold_build2 (PLUS_EXPR, TREE_TYPE (name2), cst,
- fold_convert (TREE_TYPE (name2), val));
- if (comp_code == LT_EXPR || comp_code == GE_EXPR)
- {
- new_comp_code = comp_code == LT_EXPR ? LE_EXPR : GT_EXPR;
- cst = fold_build2 (MINUS_EXPR, TREE_TYPE (name2), cst,
- build_int_cst (TREE_TYPE (name2), 1));
- }
-
- if (dump_file)
- {
- fprintf (dump_file, "Adding assert for ");
- print_generic_expr (dump_file, name2);
- fprintf (dump_file, " from ");
- print_generic_expr (dump_file, tmp);
- fprintf (dump_file, "\n");
- }
+ /* Skip labels until the last of the group. */
+ do {
+ ++idx;
+ } while (idx < n && cbb == ci[idx].bb);
+ --idx;
- add_assert_info (asserts, name2, tmp, new_comp_code, cst);
- }
+ /* Pick up the maximum of the case label range. */
+ if (CASE_HIGH (ci[idx].expr))
+ max = CASE_HIGH (ci[idx].expr);
+ else
+ max = CASE_LOW (ci[idx].expr);
}
- /* Add asserts for NAME cmp CST and NAME being defined as
- NAME = NAME2 >> CST2.
+ /* Can't extract a useful assertion out of a range that includes the
+ default label. */
+ if (min == NULL_TREE)
+ continue;
- Extract CST2 from the right shift. */
- if (rhs_code == RSHIFT_EXPR)
- {
- name2 = gimple_assign_rhs1 (def_stmt);
- cst2 = gimple_assign_rhs2 (def_stmt);
- if (TREE_CODE (name2) == SSA_NAME
- && tree_fits_uhwi_p (cst2)
- && INTEGRAL_TYPE_P (TREE_TYPE (name2))
- && IN_RANGE (tree_to_uhwi (cst2), 1, prec - 1)
- && type_has_mode_precision_p (TREE_TYPE (val)))
- {
- mask = wi::mask (tree_to_uhwi (cst2), false, prec);
- val2 = fold_binary (LSHIFT_EXPR, TREE_TYPE (val), val, cst2);
- }
- }
- if (val2 != NULL_TREE
- && TREE_CODE (val2) == INTEGER_CST
- && simple_cst_equal (fold_build2 (RSHIFT_EXPR,
- TREE_TYPE (val),
- val2, cst2), val))
- {
- enum tree_code new_comp_code = comp_code;
- tree tmp, new_val;
+ /* Find the edge to register the assert expr on. */
+ e = find_edge (bb, cbb);
- tmp = name2;
- if (comp_code == EQ_EXPR || comp_code == NE_EXPR)
- {
- if (!TYPE_UNSIGNED (TREE_TYPE (val)))
- {
- tree type = build_nonstandard_integer_type (prec, 1);
- tmp = build1 (NOP_EXPR, type, name2);
- val2 = fold_convert (type, val2);
- }
- tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (tmp), tmp, val2);
- new_val = wide_int_to_tree (TREE_TYPE (tmp), mask);
- new_comp_code = comp_code == EQ_EXPR ? LE_EXPR : GT_EXPR;
- }
- else if (comp_code == LT_EXPR || comp_code == GE_EXPR)
- {
- wide_int minval
- = wi::min_value (prec, TYPE_SIGN (TREE_TYPE (val)));
- new_val = val2;
- if (minval == wi::to_wide (new_val))
- new_val = NULL_TREE;
- }
- else
- {
- wide_int maxval
- = wi::max_value (prec, TYPE_SIGN (TREE_TYPE (val)));
- mask |= wi::to_wide (val2);
- if (wi::eq_p (mask, maxval))
- new_val = NULL_TREE;
- else
- new_val = wide_int_to_tree (TREE_TYPE (val2), mask);
- }
+ /* Register the necessary assertions for the operand in the
+ SWITCH_EXPR. */
+ auto_vec<assert_info, 8> asserts;
+ register_edge_assert_for (op, e,
+ max ? GE_EXPR : EQ_EXPR,
+ op, fold_convert (TREE_TYPE (op), min),
+ asserts);
+ if (max)
+ register_edge_assert_for (op, e, LE_EXPR, op,
+ fold_convert (TREE_TYPE (op), max),
+ asserts);
+ finish_register_edge_assert_for (e, bsi, asserts);
+ }
- if (new_val)
- {
- if (dump_file)
- {
- fprintf (dump_file, "Adding assert for ");
- print_generic_expr (dump_file, name2);
- fprintf (dump_file, " from ");
- print_generic_expr (dump_file, tmp);
- fprintf (dump_file, "\n");
- }
+ XDELETEVEC (ci);
- add_assert_info (asserts, name2, tmp, new_comp_code, new_val);
- }
- }
+ if (!live_on_edge (default_edge, op))
+ return;
- /* Add asserts for NAME cmp CST and NAME being defined as
- NAME = NAME2 & CST2.
+ /* Now register along the default label assertions that correspond to the
+ anti-range of each label. */
+ int insertion_limit = PARAM_VALUE (PARAM_MAX_VRP_SWITCH_ASSERTIONS);
+ if (insertion_limit == 0)
+ return;
- Extract CST2 from the and.
+ /* We can't do this if the default case shares a label with another case. */
+ tree default_cl = gimple_switch_default_label (last);
+ for (idx = 1; idx < n; idx++)
+ {
+ tree min, max;
+ tree cl = gimple_switch_label (last, idx);
+ if (CASE_LABEL (cl) == CASE_LABEL (default_cl))
+ continue;
- Also handle
- NAME = (unsigned) NAME2;
- casts where NAME's type is unsigned and has smaller precision
- than NAME2's type as if it was NAME = NAME2 & MASK. */
- names[0] = NULL_TREE;
- names[1] = NULL_TREE;
- cst2 = NULL_TREE;
- if (rhs_code == BIT_AND_EXPR
- || (CONVERT_EXPR_CODE_P (rhs_code)
- && INTEGRAL_TYPE_P (TREE_TYPE (val))
- && TYPE_UNSIGNED (TREE_TYPE (val))
- && TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt)))
- > prec))
+ min = CASE_LOW (cl);
+ max = CASE_HIGH (cl);
+
+ /* Combine contiguous case ranges to reduce the number of assertions
+ to insert. */
+ for (idx = idx + 1; idx < n; idx++)
{
- name2 = gimple_assign_rhs1 (def_stmt);
- if (rhs_code == BIT_AND_EXPR)
- cst2 = gimple_assign_rhs2 (def_stmt);
+ tree next_min, next_max;
+ tree next_cl = gimple_switch_label (last, idx);
+ if (CASE_LABEL (next_cl) == CASE_LABEL (default_cl))
+ break;
+
+ next_min = CASE_LOW (next_cl);
+ next_max = CASE_HIGH (next_cl);
+
+ wide_int difference = (wi::to_wide (next_min)
+ - wi::to_wide (max ? max : min));
+ if (wi::eq_p (difference, 1))
+ max = next_max ? next_max : next_min;
else
- {
- cst2 = TYPE_MAX_VALUE (TREE_TYPE (val));
- nprec = TYPE_PRECISION (TREE_TYPE (name2));
- }
- if (TREE_CODE (name2) == SSA_NAME
- && INTEGRAL_TYPE_P (TREE_TYPE (name2))
- && TREE_CODE (cst2) == INTEGER_CST
- && !integer_zerop (cst2)
- && (nprec > 1
- || TYPE_UNSIGNED (TREE_TYPE (val))))
- {
- gimple *def_stmt2 = SSA_NAME_DEF_STMT (name2);
- if (gimple_assign_cast_p (def_stmt2))
- {
- names[1] = gimple_assign_rhs1 (def_stmt2);
- if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt2))
- || !INTEGRAL_TYPE_P (TREE_TYPE (names[1]))
- || (TYPE_PRECISION (TREE_TYPE (name2))
- != TYPE_PRECISION (TREE_TYPE (names[1]))))
- names[1] = NULL_TREE;
- }
- names[0] = name2;
- }
+ break;
}
- if (names[0] || names[1])
+ idx--;
+
+ if (max == NULL_TREE)
{
- wide_int minv, maxv, valv, cst2v;
- wide_int tem, sgnbit;
- bool valid_p = false, valn, cst2n;
- enum tree_code ccode = comp_code;
+ /* Register the assertion OP != MIN. */
+ auto_vec<assert_info, 8> asserts;
+ min = fold_convert (TREE_TYPE (op), min);
+ register_edge_assert_for (op, default_edge, NE_EXPR, op, min,
+ asserts);
+ finish_register_edge_assert_for (default_edge, bsi, asserts);
+ }
+ else
+ {
+ /* Register the assertion (unsigned)OP - MIN > (MAX - MIN),
+ which will give OP the anti-range ~[MIN,MAX]. */
+ tree uop = fold_convert (unsigned_type_for (TREE_TYPE (op)), op);
+ min = fold_convert (TREE_TYPE (uop), min);
+ max = fold_convert (TREE_TYPE (uop), max);
- valv = wide_int::from (wi::to_wide (val), nprec, UNSIGNED);
- cst2v = wide_int::from (wi::to_wide (cst2), nprec, UNSIGNED);
- valn = wi::neg_p (valv, TYPE_SIGN (TREE_TYPE (val)));
- cst2n = wi::neg_p (cst2v, TYPE_SIGN (TREE_TYPE (val)));
- /* If CST2 doesn't have most significant bit set,
- but VAL is negative, we have comparison like
- if ((x & 0x123) > -4) (always true). Just give up. */
- if (!cst2n && valn)
- ccode = ERROR_MARK;
- if (cst2n)
- sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
- else
- sgnbit = wi::zero (nprec);
- minv = valv & cst2v;
- switch (ccode)
- {
- case EQ_EXPR:
- /* Minimum unsigned value for equality is VAL & CST2
- (should be equal to VAL, otherwise we probably should
- have folded the comparison into false) and
- maximum unsigned value is VAL | ~CST2. */
- maxv = valv | ~cst2v;
- valid_p = true;
- break;
+ tree lhs = fold_build2 (MINUS_EXPR, TREE_TYPE (uop), uop, min);
+ tree rhs = int_const_binop (MINUS_EXPR, max, min);
+ register_new_assert_for (op, lhs, GT_EXPR, rhs,
+ NULL, default_edge, bsi);
+ }
- case NE_EXPR:
- tem = valv | ~cst2v;
- /* If VAL is 0, handle (X & CST2) != 0 as (X & CST2) > 0U. */
- if (valv == 0)
- {
- cst2n = false;
- sgnbit = wi::zero (nprec);
- goto gt_expr;
- }
- /* If (VAL | ~CST2) is all ones, handle it as
- (X & CST2) < VAL. */
- if (tem == -1)
- {
- cst2n = false;
- valn = false;
- sgnbit = wi::zero (nprec);
- goto lt_expr;
- }
- if (!cst2n && wi::neg_p (cst2v))
- sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
- if (sgnbit != 0)
- {
- if (valv == sgnbit)
- {
- cst2n = true;
- valn = true;
- goto gt_expr;
- }
- if (tem == wi::mask (nprec - 1, false, nprec))
- {
- cst2n = true;
- goto lt_expr;
- }
- if (!cst2n)
- sgnbit = wi::zero (nprec);
- }
- break;
+ if (--insertion_limit == 0)
+ break;
+ }
+}
- case GE_EXPR:
- /* Minimum unsigned value for >= if (VAL & CST2) == VAL
- is VAL and maximum unsigned value is ~0. For signed
- comparison, if CST2 doesn't have most significant bit
- set, handle it similarly. If CST2 has MSB set,
- the minimum is the same, and maximum is ~0U/2. */
- if (minv != valv)
- {
- /* If (VAL & CST2) != VAL, X & CST2 can't be equal to
- VAL. */
- minv = masked_increment (valv, cst2v, sgnbit, nprec);
- if (minv == valv)
- break;
- }
- maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
- valid_p = true;
- break;
- case GT_EXPR:
- gt_expr:
- /* Find out smallest MINV where MINV > VAL
- && (MINV & CST2) == MINV, if any. If VAL is signed and
- CST2 has MSB set, compute it biased by 1 << (nprec - 1). */
- minv = masked_increment (valv, cst2v, sgnbit, nprec);
- if (minv == valv)
- break;
- maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
- valid_p = true;
- break;
+/* Traverse all the statements in block BB looking for statements that
+ may generate useful assertions for the SSA names in their operand.
+ If a statement produces a useful assertion A for name N_i, then the
+ list of assertions already generated for N_i is scanned to
+ determine if A is actually needed.
- case LE_EXPR:
- /* Minimum unsigned value for <= is 0 and maximum
- unsigned value is VAL | ~CST2 if (VAL & CST2) == VAL.
- Otherwise, find smallest VAL2 where VAL2 > VAL
- && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
- as maximum.
- For signed comparison, if CST2 doesn't have most
- significant bit set, handle it similarly. If CST2 has
- MSB set, the maximum is the same and minimum is INT_MIN. */
- if (minv == valv)
- maxv = valv;
- else
- {
- maxv = masked_increment (valv, cst2v, sgnbit, nprec);
- if (maxv == valv)
- break;
- maxv -= 1;
- }
- maxv |= ~cst2v;
- minv = sgnbit;
- valid_p = true;
- break;
+ If N_i already had the assertion A at a location dominating the
+ current location, then nothing needs to be done. Otherwise, the
+ new location for A is recorded instead.
- case LT_EXPR:
- lt_expr:
- /* Minimum unsigned value for < is 0 and maximum
- unsigned value is (VAL-1) | ~CST2 if (VAL & CST2) == VAL.
- Otherwise, find smallest VAL2 where VAL2 > VAL
- && (VAL2 & CST2) == VAL2 and use (VAL2 - 1) | ~CST2
- as maximum.
- For signed comparison, if CST2 doesn't have most
- significant bit set, handle it similarly. If CST2 has
- MSB set, the maximum is the same and minimum is INT_MIN. */
- if (minv == valv)
- {
- if (valv == sgnbit)
- break;
- maxv = valv;
- }
- else
- {
- maxv = masked_increment (valv, cst2v, sgnbit, nprec);
- if (maxv == valv)
- break;
- }
- maxv -= 1;
- maxv |= ~cst2v;
- minv = sgnbit;
- valid_p = true;
- break;
+ 1- For every statement S in BB, all the variables used by S are
+ added to bitmap FOUND_IN_SUBGRAPH.
- default:
- break;
- }
- if (valid_p
- && (maxv - minv) != -1)
- {
- tree tmp, new_val, type;
- int i;
+ 2- If statement S uses an operand N in a way that exposes a known
+ value range for N, then if N was not already generated by an
+ ASSERT_EXPR, create a new assert location for N. For instance,
+ if N is a pointer and the statement dereferences it, we can
+ assume that N is not NULL.
- for (i = 0; i < 2; i++)
- if (names[i])
- {
- wide_int maxv2 = maxv;
- tmp = names[i];
- type = TREE_TYPE (names[i]);
- if (!TYPE_UNSIGNED (type))
- {
- type = build_nonstandard_integer_type (nprec, 1);
- tmp = build1 (NOP_EXPR, type, names[i]);
- }
- if (minv != 0)
- {
- tmp = build2 (PLUS_EXPR, type, tmp,
- wide_int_to_tree (type, -minv));
- maxv2 = maxv - minv;
- }
- new_val = wide_int_to_tree (type, maxv2);
+ 3- COND_EXPRs are a special case of #2. We can derive range
+ information from the predicate but need to insert different
+ ASSERT_EXPRs for each of the sub-graphs rooted at the
+ conditional block. If the last statement of BB is a conditional
+ expression of the form 'X op Y', then
- if (dump_file)
- {
- fprintf (dump_file, "Adding assert for ");
- print_generic_expr (dump_file, names[i]);
- fprintf (dump_file, " from ");
- print_generic_expr (dump_file, tmp);
- fprintf (dump_file, "\n");
- }
+ a) Remove X and Y from the set FOUND_IN_SUBGRAPH.
- add_assert_info (asserts, names[i], tmp, LE_EXPR, new_val);
- }
- }
- }
- }
-}
+ b) If the conditional is the only entry point to the sub-graph
+ corresponding to the THEN_CLAUSE, recurse into it. On
+ return, if X and/or Y are marked in FOUND_IN_SUBGRAPH, then
+ an ASSERT_EXPR is added for the corresponding variable.
-/* OP is an operand of a truth value expression which is known to have
- a particular value. Register any asserts for OP and for any
- operands in OP's defining statement.
+ c) Repeat step (b) on the ELSE_CLAUSE.
- If CODE is EQ_EXPR, then we want to register OP is zero (false),
- if CODE is NE_EXPR, then we want to register OP is nonzero (true). */
+ d) Mark X and Y in FOUND_IN_SUBGRAPH.
-static void
-register_edge_assert_for_1 (tree op, enum tree_code code,
- edge e, vec<assert_info> &asserts)
-{
- gimple *op_def;
- tree val;
- enum tree_code rhs_code;
+ For instance,
- /* We only care about SSA_NAMEs. */
- if (TREE_CODE (op) != SSA_NAME)
- return;
+ if (a == 9)
+ b = a;
+ else
+ b = c + 1;
- /* We know that OP will have a zero or nonzero value. */
- val = build_int_cst (TREE_TYPE (op), 0);
- add_assert_info (asserts, op, op, code, val);
+ In this case, an assertion on the THEN clause is useful to
+ determine that 'a' is always 9 on that edge. However, an assertion
+ on the ELSE clause would be unnecessary.
- /* Now look at how OP is set. If it's set from a comparison,
- a truth operation or some bit operations, then we may be able
- to register information about the operands of that assignment. */
- op_def = SSA_NAME_DEF_STMT (op);
- if (gimple_code (op_def) != GIMPLE_ASSIGN)
- return;
+ 4- If BB does not end in a conditional expression, then we recurse
+ into BB's dominator children.
- rhs_code = gimple_assign_rhs_code (op_def);
+ At the end of the recursive traversal, every SSA name will have a
+ list of locations where ASSERT_EXPRs should be added. When a new
+ location for name N is found, it is registered by calling
+ register_new_assert_for. That function keeps track of all the
+ registered assertions to prevent adding unnecessary assertions.
+ For instance, if a pointer P_4 is dereferenced more than once in a
+ dominator tree, only the location dominating all the dereference of
+ P_4 will receive an ASSERT_EXPR. */
- if (TREE_CODE_CLASS (rhs_code) == tcc_comparison)
- {
- bool invert = (code == EQ_EXPR ? true : false);
- tree op0 = gimple_assign_rhs1 (op_def);
- tree op1 = gimple_assign_rhs2 (op_def);
+static void
+find_assert_locations_1 (basic_block bb, sbitmap live)
+{
+ gimple *last;
- if (TREE_CODE (op0) == SSA_NAME)
- register_edge_assert_for_2 (op0, e, rhs_code, op0, op1, invert, asserts);
- if (TREE_CODE (op1) == SSA_NAME)
- register_edge_assert_for_2 (op1, e, rhs_code, op0, op1, invert, asserts);
- }
- else if ((code == NE_EXPR
- && gimple_assign_rhs_code (op_def) == BIT_AND_EXPR)
- || (code == EQ_EXPR
- && gimple_assign_rhs_code (op_def) == BIT_IOR_EXPR))
- {
- /* Recurse on each operand. */
- tree op0 = gimple_assign_rhs1 (op_def);
- tree op1 = gimple_assign_rhs2 (op_def);
- if (TREE_CODE (op0) == SSA_NAME
- && has_single_use (op0))
- register_edge_assert_for_1 (op0, code, e, asserts);
- if (TREE_CODE (op1) == SSA_NAME
- && has_single_use (op1))
- register_edge_assert_for_1 (op1, code, e, asserts);
- }
- else if (gimple_assign_rhs_code (op_def) == BIT_NOT_EXPR
- && TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (op_def))) == 1)
- {
- /* Recurse, flipping CODE. */
- code = invert_tree_comparison (code, false);
- register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, asserts);
- }
- else if (gimple_assign_rhs_code (op_def) == SSA_NAME)
- {
- /* Recurse through the copy. */
- register_edge_assert_for_1 (gimple_assign_rhs1 (op_def), code, e, asserts);
- }
- else if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (op_def)))
+ last = last_stmt (bb);
+
+ /* If BB's last statement is a conditional statement involving integer
+ operands, determine if we need to add ASSERT_EXPRs. */
+ if (last
+ && gimple_code (last) == GIMPLE_COND
+ && !fp_predicate (last)
+ && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
+ find_conditional_asserts (bb, as_a <gcond *> (last));
+
+ /* If BB's last statement is a switch statement involving integer
+ operands, determine if we need to add ASSERT_EXPRs. */
+ if (last
+ && gimple_code (last) == GIMPLE_SWITCH
+ && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
+ find_switch_asserts (bb, as_a <gswitch *> (last));
+
+ /* Traverse all the statements in BB marking used names and looking
+ for statements that may infer assertions for their used operands. */
+ for (gimple_stmt_iterator si = gsi_last_bb (bb); !gsi_end_p (si);
+ gsi_prev (&si))
{
- /* Recurse through the type conversion, unless it is a narrowing
- conversion or conversion from non-integral type. */
- tree rhs = gimple_assign_rhs1 (op_def);
- if (INTEGRAL_TYPE_P (TREE_TYPE (rhs))
- && (TYPE_PRECISION (TREE_TYPE (rhs))
- <= TYPE_PRECISION (TREE_TYPE (op))))
- register_edge_assert_for_1 (rhs, code, e, asserts);
- }
-}
+ gimple *stmt;
+ tree op;
+ ssa_op_iter i;
-/* Check if comparison
- NAME COND_OP INTEGER_CST
- has a form of
- (X & 11...100..0) COND_OP XX...X00...0
- Such comparison can yield assertions like
- X >= XX...X00...0
- X <= XX...X11...1
- in case of COND_OP being NE_EXPR or
- X < XX...X00...0
- X > XX...X11...1
- in case of EQ_EXPR. */
+ stmt = gsi_stmt (si);
-static bool
-is_masked_range_test (tree name, tree valt, enum tree_code cond_code,
- tree *new_name, tree *low, enum tree_code *low_code,
- tree *high, enum tree_code *high_code)
-{
- gimple *def_stmt = SSA_NAME_DEF_STMT (name);
+ if (is_gimple_debug (stmt))
+ continue;
- if (!is_gimple_assign (def_stmt)
- || gimple_assign_rhs_code (def_stmt) != BIT_AND_EXPR)
- return false;
+ /* See if we can derive an assertion for any of STMT's operands. */
+ FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
+ {
+ tree value;
+ enum tree_code comp_code;
- tree t = gimple_assign_rhs1 (def_stmt);
- tree maskt = gimple_assign_rhs2 (def_stmt);
- if (TREE_CODE (t) != SSA_NAME || TREE_CODE (maskt) != INTEGER_CST)
- return false;
+ /* If op is not live beyond this stmt, do not bother to insert
+ asserts for it. */
+ if (!bitmap_bit_p (live, SSA_NAME_VERSION (op)))
+ continue;
- wi::tree_to_wide_ref mask = wi::to_wide (maskt);
- wide_int inv_mask = ~mask;
- /* Assume VALT is INTEGER_CST. */
- wi::tree_to_wide_ref val = wi::to_wide (valt);
+ /* If OP is used in such a way that we can infer a value
+ range for it, and we don't find a previous assertion for
+ it, create a new assertion location node for OP. */
+ if (infer_value_range (stmt, op, &comp_code, &value))
+ {
+ /* If we are able to infer a nonzero value range for OP,
+ then walk backwards through the use-def chain to see if OP
+ was set via a typecast.
- if ((inv_mask & (inv_mask + 1)) != 0
- || (val & mask) != val)
- return false;
+ If so, then we can also infer a nonzero value range
+ for the operand of the NOP_EXPR. */
+ if (comp_code == NE_EXPR && integer_zerop (value))
+ {
+ tree t = op;
+ gimple *def_stmt = SSA_NAME_DEF_STMT (t);
- bool is_range = cond_code == EQ_EXPR;
+ while (is_gimple_assign (def_stmt)
+ && CONVERT_EXPR_CODE_P
+ (gimple_assign_rhs_code (def_stmt))
+ && TREE_CODE
+ (gimple_assign_rhs1 (def_stmt)) == SSA_NAME
+ && POINTER_TYPE_P
+ (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))
+ {
+ t = gimple_assign_rhs1 (def_stmt);
+ def_stmt = SSA_NAME_DEF_STMT (t);
- tree type = TREE_TYPE (t);
- wide_int min = wi::min_value (type),
- max = wi::max_value (type);
+ /* Note we want to register the assert for the
+ operand of the NOP_EXPR after SI, not after the
+ conversion. */
+ if (bitmap_bit_p (live, SSA_NAME_VERSION (t)))
+ register_new_assert_for (t, t, comp_code, value,
+ bb, NULL, si);
+ }
+ }
- if (is_range)
- {
- *low_code = val == min ? ERROR_MARK : GE_EXPR;
- *high_code = val == max ? ERROR_MARK : LE_EXPR;
- }
- else
- {
- /* We can still generate assertion if one of alternatives
- is known to always be false. */
- if (val == min)
- {
- *low_code = (enum tree_code) 0;
- *high_code = GT_EXPR;
- }
- else if ((val | inv_mask) == max)
- {
- *low_code = LT_EXPR;
- *high_code = (enum tree_code) 0;
+ register_new_assert_for (op, op, comp_code, value, bb, NULL, si);
+ }
}
- else
- return false;
+
+ /* Update live. */
+ FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
+ bitmap_set_bit (live, SSA_NAME_VERSION (op));
+ FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_DEF)
+ bitmap_clear_bit (live, SSA_NAME_VERSION (op));
}
- *new_name = t;
- *low = wide_int_to_tree (type, val);
- *high = wide_int_to_tree (type, val | inv_mask);
+ /* Traverse all PHI nodes in BB, updating live. */
+ for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
+ gsi_next (&si))
+ {
+ use_operand_p arg_p;
+ ssa_op_iter i;
+ gphi *phi = si.phi ();
+ tree res = gimple_phi_result (phi);
- if (wi::neg_p (val, TYPE_SIGN (type)))
- std::swap (*low, *high);
+ if (virtual_operand_p (res))
+ continue;
- return true;
+ FOR_EACH_PHI_ARG (arg_p, phi, i, SSA_OP_USE)
+ {
+ tree arg = USE_FROM_PTR (arg_p);
+ if (TREE_CODE (arg) == SSA_NAME)
+ bitmap_set_bit (live, SSA_NAME_VERSION (arg));
+ }
+
+ bitmap_clear_bit (live, SSA_NAME_VERSION (res));
+ }
}
-/* Try to register an edge assertion for SSA name NAME on edge E for
- the condition COND contributing to the conditional jump pointed to by
- SI. */
+/* Do an RPO walk over the function computing SSA name liveness
+ on-the-fly and deciding on assert expressions to insert. */
-void
-register_edge_assert_for (tree name, edge e,
- enum tree_code cond_code, tree cond_op0,
- tree cond_op1, vec<assert_info> &asserts)
+static void
+find_assert_locations (void)
{
- tree val;
- enum tree_code comp_code;
- bool is_else_edge = (e->flags & EDGE_FALSE_VALUE) != 0;
-
- /* Do not attempt to infer anything in names that flow through
- abnormal edges. */
- if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
- return;
-
- if (!extract_code_and_val_from_cond_with_ops (name, cond_code,
- cond_op0, cond_op1,
- is_else_edge,
- &comp_code, &val))
- return;
-
- /* Register ASSERT_EXPRs for name. */
- register_edge_assert_for_2 (name, e, cond_code, cond_op0,
- cond_op1, is_else_edge, asserts);
-
+ int *rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
+ int *bb_rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
+ int *last_rpo = XCNEWVEC (int, last_basic_block_for_fn (cfun));
+ int rpo_cnt, i;
- /* If COND is effectively an equality test of an SSA_NAME against
- the value zero or one, then we may be able to assert values
- for SSA_NAMEs which flow into COND. */
+ live = XCNEWVEC (sbitmap, last_basic_block_for_fn (cfun));
+ rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
+ for (i = 0; i < rpo_cnt; ++i)
+ bb_rpo[rpo[i]] = i;
- /* In the case of NAME == 1 or NAME != 0, for BIT_AND_EXPR defining
- statement of NAME we can assert both operands of the BIT_AND_EXPR
- have nonzero value. */
- if (((comp_code == EQ_EXPR && integer_onep (val))
- || (comp_code == NE_EXPR && integer_zerop (val))))
+ /* Pre-seed loop latch liveness from loop header PHI nodes. Due to
+ the order we compute liveness and insert asserts we otherwise
+ fail to insert asserts into the loop latch. */
+ loop_p loop;
+ FOR_EACH_LOOP (loop, 0)
{
- gimple *def_stmt = SSA_NAME_DEF_STMT (name);
-
- if (is_gimple_assign (def_stmt)
- && gimple_assign_rhs_code (def_stmt) == BIT_AND_EXPR)
+ i = loop->latch->index;
+ unsigned int j = single_succ_edge (loop->latch)->dest_idx;
+ for (gphi_iterator gsi = gsi_start_phis (loop->header);
+ !gsi_end_p (gsi); gsi_next (&gsi))
{
- tree op0 = gimple_assign_rhs1 (def_stmt);
- tree op1 = gimple_assign_rhs2 (def_stmt);
- register_edge_assert_for_1 (op0, NE_EXPR, e, asserts);
- register_edge_assert_for_1 (op1, NE_EXPR, e, asserts);
- }
- }
+ gphi *phi = gsi.phi ();
+ if (virtual_operand_p (gimple_phi_result (phi)))
+ continue;
+ tree arg = gimple_phi_arg_def (phi, j);
+ if (TREE_CODE (arg) == SSA_NAME)
+ {
+ if (live[i] == NULL)
+ {
+ live[i] = sbitmap_alloc (num_ssa_names);
+ bitmap_clear (live[i]);
+ }
+ bitmap_set_bit (live[i], SSA_NAME_VERSION (arg));
+ }
+ }
+ }
- /* In the case of NAME == 0 or NAME != 1, for BIT_IOR_EXPR defining
- statement of NAME we can assert both operands of the BIT_IOR_EXPR
- have zero value. */
- if (((comp_code == EQ_EXPR && integer_zerop (val))
- || (comp_code == NE_EXPR && integer_onep (val))))
+ for (i = rpo_cnt - 1; i >= 0; --i)
{
- gimple *def_stmt = SSA_NAME_DEF_STMT (name);
+ basic_block bb = BASIC_BLOCK_FOR_FN (cfun, rpo[i]);
+ edge e;
+ edge_iterator ei;
- /* For BIT_IOR_EXPR only if NAME == 0 both operands have
- necessarily zero value, or if type-precision is one. */
- if (is_gimple_assign (def_stmt)
- && (gimple_assign_rhs_code (def_stmt) == BIT_IOR_EXPR
- && (TYPE_PRECISION (TREE_TYPE (name)) == 1
- || comp_code == EQ_EXPR)))
+ if (!live[rpo[i]])
{
- tree op0 = gimple_assign_rhs1 (def_stmt);
- tree op1 = gimple_assign_rhs2 (def_stmt);
- register_edge_assert_for_1 (op0, EQ_EXPR, e, asserts);
- register_edge_assert_for_1 (op1, EQ_EXPR, e, asserts);
+ live[rpo[i]] = sbitmap_alloc (num_ssa_names);
+ bitmap_clear (live[rpo[i]]);
}
- }
- /* Sometimes we can infer ranges from (NAME & MASK) == VALUE. */
- if ((comp_code == EQ_EXPR || comp_code == NE_EXPR)
- && TREE_CODE (val) == INTEGER_CST)
- {
- enum tree_code low_code, high_code;
- tree low, high;
- if (is_masked_range_test (name, val, comp_code, &name, &low,
- &low_code, &high, &high_code))
+ /* Process BB and update the live information with uses in
+ this block. */
+ find_assert_locations_1 (bb, live[rpo[i]]);
+
+ /* Merge liveness into the predecessor blocks and free it. */
+ if (!bitmap_empty_p (live[rpo[i]]))
{
- if (low_code != ERROR_MARK)
- register_edge_assert_for_2 (name, e, low_code, name,
- low, /*invert*/false, asserts);
- if (high_code != ERROR_MARK)
- register_edge_assert_for_2 (name, e, high_code, name,
- high, /*invert*/false, asserts);
- }
- }
-}
+ int pred_rpo = i;
+ FOR_EACH_EDGE (e, ei, bb->preds)
+ {
+ int pred = e->src->index;
+ if ((e->flags & EDGE_DFS_BACK) || pred == ENTRY_BLOCK)
+ continue;
-/* Finish found ASSERTS for E and register them at GSI. */
+ if (!live[pred])
+ {
+ live[pred] = sbitmap_alloc (num_ssa_names);
+ bitmap_clear (live[pred]);
+ }
+ bitmap_ior (live[pred], live[pred], live[rpo[i]]);
-static void
-finish_register_edge_assert_for (edge e, gimple_stmt_iterator gsi,
- vec<assert_info> &asserts)
-{
- for (unsigned i = 0; i < asserts.length (); ++i)
- /* Only register an ASSERT_EXPR if NAME was found in the sub-graph
- reachable from E. */
- if (live_on_edge (e, asserts[i].name))
- register_new_assert_for (asserts[i].name, asserts[i].expr,
- asserts[i].comp_code, asserts[i].val,
- NULL, e, gsi);
-}
+ if (bb_rpo[pred] < pred_rpo)
+ pred_rpo = bb_rpo[pred];
+ }
+ /* Record the RPO number of the last visited block that needs
+ live information from this block. */
+ last_rpo[rpo[i]] = pred_rpo;
+ }
+ else
+ {
+ sbitmap_free (live[rpo[i]]);
+ live[rpo[i]] = NULL;
+ }
+ /* We can free all successors live bitmaps if all their
+ predecessors have been visited already. */
+ FOR_EACH_EDGE (e, ei, bb->succs)
+ if (last_rpo[e->dest->index] == i
+ && live[e->dest->index])
+ {
+ sbitmap_free (live[e->dest->index]);
+ live[e->dest->index] = NULL;
+ }
+ }
-/* Determine whether the outgoing edges of BB should receive an
- ASSERT_EXPR for each of the operands of BB's LAST statement.
- The last statement of BB must be a COND_EXPR.
+ XDELETEVEC (rpo);
+ XDELETEVEC (bb_rpo);
+ XDELETEVEC (last_rpo);
+ for (i = 0; i < last_basic_block_for_fn (cfun); ++i)
+ if (live[i])
+ sbitmap_free (live[i]);
+ XDELETEVEC (live);
+}
- If any of the sub-graphs rooted at BB have an interesting use of
- the predicate operands, an assert location node is added to the
- list of assertions for the corresponding operands. */
+/* Create an ASSERT_EXPR for NAME and insert it in the location
+ indicated by LOC. Return true if we made any edge insertions. */
-static void
-find_conditional_asserts (basic_block bb, gcond *last)
+static bool
+process_assert_insertions_for (tree name, assert_locus *loc)
{
- gimple_stmt_iterator bsi;
- tree op;
+ /* Build the comparison expression NAME_i COMP_CODE VAL. */
+ gimple *stmt;
+ tree cond;
+ gimple *assert_stmt;
edge_iterator ei;
edge e;
- ssa_op_iter iter;
- bsi = gsi_for_stmt (last);
+ /* If we have X <=> X do not insert an assert expr for that. */
+ if (loc->expr == loc->val)
+ return false;
- /* Look for uses of the operands in each of the sub-graphs
- rooted at BB. We need to check each of the outgoing edges
- separately, so that we know what kind of ASSERT_EXPR to
- insert. */
- FOR_EACH_EDGE (e, ei, bb->succs)
+ cond = build2 (loc->comp_code, boolean_type_node, loc->expr, loc->val);
+ assert_stmt = build_assert_expr_for (cond, name);
+ if (loc->e)
{
- if (e->dest == bb)
- continue;
+ /* We have been asked to insert the assertion on an edge. This
+ is used only by COND_EXPR and SWITCH_EXPR assertions. */
+ gcc_checking_assert (gimple_code (gsi_stmt (loc->si)) == GIMPLE_COND
+ || (gimple_code (gsi_stmt (loc->si))
+ == GIMPLE_SWITCH));
- /* Register the necessary assertions for each operand in the
- conditional predicate. */
- auto_vec<assert_info, 8> asserts;
- FOR_EACH_SSA_TREE_OPERAND (op, last, iter, SSA_OP_USE)
- register_edge_assert_for (op, e,
- gimple_cond_code (last),
- gimple_cond_lhs (last),
- gimple_cond_rhs (last), asserts);
- finish_register_edge_assert_for (e, bsi, asserts);
+ gsi_insert_on_edge (loc->e, assert_stmt);
+ return true;
}
-}
-struct case_info
-{
- tree expr;
- basic_block bb;
-};
+ /* If the stmt iterator points at the end then this is an insertion
+ at the beginning of a block. */
+ if (gsi_end_p (loc->si))
+ {
+ gimple_stmt_iterator si = gsi_after_labels (loc->bb);
+ gsi_insert_before (&si, assert_stmt, GSI_SAME_STMT);
+ return false;
-/* Compare two case labels sorting first by the destination bb index
- and then by the case value. */
+ }
+ /* Otherwise, we can insert right after LOC->SI iff the
+ statement must not be the last statement in the block. */
+ stmt = gsi_stmt (loc->si);
+ if (!stmt_ends_bb_p (stmt))
+ {
+ gsi_insert_after (&loc->si, assert_stmt, GSI_SAME_STMT);
+ return false;
+ }
+
+ /* If STMT must be the last statement in BB, we can only insert new
+ assertions on the non-abnormal edge out of BB. Note that since
+ STMT is not control flow, there may only be one non-abnormal/eh edge
+ out of BB. */
+ FOR_EACH_EDGE (e, ei, loc->bb->succs)
+ if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH)))
+ {
+ gsi_insert_on_edge (e, assert_stmt);
+ return true;
+ }
+
+ gcc_unreachable ();
+}
+
+/* Qsort helper for sorting assert locations. If stable is true, don't
+ use iterative_hash_expr because it can be unstable for -fcompare-debug,
+ on the other side some pointers might be NULL. */
+template <bool stable>
static int
-compare_case_labels (const void *p1, const void *p2)
+compare_assert_loc (const void *pa, const void *pb)
{
- const struct case_info *ci1 = (const struct case_info *) p1;
- const struct case_info *ci2 = (const struct case_info *) p2;
- int idx1 = ci1->bb->index;
- int idx2 = ci2->bb->index;
+ assert_locus * const a = *(assert_locus * const *)pa;
+ assert_locus * const b = *(assert_locus * const *)pb;
- if (idx1 < idx2)
- return -1;
- else if (idx1 == idx2)
+ /* If stable, some asserts might be optimized away already, sort
+ them last. */
+ if (stable)
{
- /* Make sure the default label is first in a group. */
- if (!CASE_LOW (ci1->expr))
+ if (a == NULL)
+ return b != NULL;
+ else if (b == NULL)
return -1;
- else if (!CASE_LOW (ci2->expr))
- return 1;
- else
- return tree_int_cst_compare (CASE_LOW (ci1->expr),
- CASE_LOW (ci2->expr));
}
- else
+
+ if (a->e == NULL && b->e != NULL)
return 1;
-}
+ else if (a->e != NULL && b->e == NULL)
+ return -1;
-/* Determine whether the outgoing edges of BB should receive an
- ASSERT_EXPR for each of the operands of BB's LAST statement.
- The last statement of BB must be a SWITCH_EXPR.
+ /* After the above checks, we know that (a->e == NULL) == (b->e == NULL),
+ no need to test both a->e and b->e. */
- If any of the sub-graphs rooted at BB have an interesting use of
- the predicate operands, an assert location node is added to the
- list of assertions for the corresponding operands. */
+ /* Sort after destination index. */
+ if (a->e == NULL)
+ ;
+ else if (a->e->dest->index > b->e->dest->index)
+ return 1;
+ else if (a->e->dest->index < b->e->dest->index)
+ return -1;
-static void
-find_switch_asserts (basic_block bb, gswitch *last)
-{
- gimple_stmt_iterator bsi;
- tree op;
- edge e;
- struct case_info *ci;
- size_t n = gimple_switch_num_labels (last);
-#if GCC_VERSION >= 4000
- unsigned int idx;
-#else
- /* Work around GCC 3.4 bug (PR 37086). */
- volatile unsigned int idx;
-#endif
+ /* Sort after comp_code. */
+ if (a->comp_code > b->comp_code)
+ return 1;
+ else if (a->comp_code < b->comp_code)
+ return -1;
- bsi = gsi_for_stmt (last);
- op = gimple_switch_index (last);
- if (TREE_CODE (op) != SSA_NAME)
- return;
+ hashval_t ha, hb;
- /* Build a vector of case labels sorted by destination label. */
- ci = XNEWVEC (struct case_info, n);
- for (idx = 0; idx < n; ++idx)
+ /* E.g. if a->val is ADDR_EXPR of a VAR_DECL, iterative_hash_expr
+ uses DECL_UID of the VAR_DECL, so sorting might differ between
+ -g and -g0. When doing the removal of redundant assert exprs
+ and commonization to successors, this does not matter, but for
+ the final sort needs to be stable. */
+ if (stable)
{
- ci[idx].expr = gimple_switch_label (last, idx);
- ci[idx].bb = label_to_block (CASE_LABEL (ci[idx].expr));
+ ha = 0;
+ hb = 0;
}
- edge default_edge = find_edge (bb, ci[0].bb);
- qsort (ci, n, sizeof (struct case_info), compare_case_labels);
-
- for (idx = 0; idx < n; ++idx)
+ else
{
- tree min, max;
- tree cl = ci[idx].expr;
- basic_block cbb = ci[idx].bb;
-
- min = CASE_LOW (cl);
- max = CASE_HIGH (cl);
-
- /* If there are multiple case labels with the same destination
- we need to combine them to a single value range for the edge. */
- if (idx + 1 < n && cbb == ci[idx + 1].bb)
- {
- /* Skip labels until the last of the group. */
- do {
- ++idx;
- } while (idx < n && cbb == ci[idx].bb);
- --idx;
-
- /* Pick up the maximum of the case label range. */
- if (CASE_HIGH (ci[idx].expr))
- max = CASE_HIGH (ci[idx].expr);
- else
- max = CASE_LOW (ci[idx].expr);
- }
-
- /* Can't extract a useful assertion out of a range that includes the
- default label. */
- if (min == NULL_TREE)
- continue;
-
- /* Find the edge to register the assert expr on. */
- e = find_edge (bb, cbb);
-
- /* Register the necessary assertions for the operand in the
- SWITCH_EXPR. */
- auto_vec<assert_info, 8> asserts;
- register_edge_assert_for (op, e,
- max ? GE_EXPR : EQ_EXPR,
- op, fold_convert (TREE_TYPE (op), min),
- asserts);
- if (max)
- register_edge_assert_for (op, e, LE_EXPR, op,
- fold_convert (TREE_TYPE (op), max),
- asserts);
- finish_register_edge_assert_for (e, bsi, asserts);
+ ha = iterative_hash_expr (a->expr, iterative_hash_expr (a->val, 0));
+ hb = iterative_hash_expr (b->expr, iterative_hash_expr (b->val, 0));
}
- XDELETEVEC (ci);
+ /* Break the tie using hashing and source/bb index. */
+ if (ha == hb)
+ return (a->e != NULL
+ ? a->e->src->index - b->e->src->index
+ : a->bb->index - b->bb->index);
+ return ha > hb ? 1 : -1;
+}
- if (!live_on_edge (default_edge, op))
- return;
+/* Process all the insertions registered for every name N_i registered
+ in NEED_ASSERT_FOR. The list of assertions to be inserted are
+ found in ASSERTS_FOR[i]. */
- /* Now register along the default label assertions that correspond to the
- anti-range of each label. */
- int insertion_limit = PARAM_VALUE (PARAM_MAX_VRP_SWITCH_ASSERTIONS);
- if (insertion_limit == 0)
- return;
+static void
+process_assert_insertions (void)
+{
+ unsigned i;
+ bitmap_iterator bi;
+ bool update_edges_p = false;
+ int num_asserts = 0;
- /* We can't do this if the default case shares a label with another case. */
- tree default_cl = gimple_switch_default_label (last);
- for (idx = 1; idx < n; idx++)
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ dump_all_asserts (dump_file);
+
+ EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
{
- tree min, max;
- tree cl = gimple_switch_label (last, idx);
- if (CASE_LABEL (cl) == CASE_LABEL (default_cl))
- continue;
+ assert_locus *loc = asserts_for[i];
+ gcc_assert (loc);
- min = CASE_LOW (cl);
- max = CASE_HIGH (cl);
+ auto_vec<assert_locus *, 16> asserts;
+ for (; loc; loc = loc->next)
+ asserts.safe_push (loc);
+ asserts.qsort (compare_assert_loc<false>);
- /* Combine contiguous case ranges to reduce the number of assertions
- to insert. */
- for (idx = idx + 1; idx < n; idx++)
+ /* Push down common asserts to successors and remove redundant ones. */
+ unsigned ecnt = 0;
+ assert_locus *common = NULL;
+ unsigned commonj = 0;
+ for (unsigned j = 0; j < asserts.length (); ++j)
{
- tree next_min, next_max;
- tree next_cl = gimple_switch_label (last, idx);
- if (CASE_LABEL (next_cl) == CASE_LABEL (default_cl))
- break;
-
- next_min = CASE_LOW (next_cl);
- next_max = CASE_HIGH (next_cl);
-
- wide_int difference = (wi::to_wide (next_min)
- - wi::to_wide (max ? max : min));
- if (wi::eq_p (difference, 1))
- max = next_max ? next_max : next_min;
+ loc = asserts[j];
+ if (! loc->e)
+ common = NULL;
+ else if (! common
+ || loc->e->dest != common->e->dest
+ || loc->comp_code != common->comp_code
+ || ! operand_equal_p (loc->val, common->val, 0)
+ || ! operand_equal_p (loc->expr, common->expr, 0))
+ {
+ commonj = j;
+ common = loc;
+ ecnt = 1;
+ }
+ else if (loc->e == asserts[j-1]->e)
+ {
+ /* Remove duplicate asserts. */
+ if (commonj == j - 1)
+ {
+ commonj = j;
+ common = loc;
+ }
+ free (asserts[j-1]);
+ asserts[j-1] = NULL;
+ }
else
- break;
+ {
+ ecnt++;
+ if (EDGE_COUNT (common->e->dest->preds) == ecnt)
+ {
+ /* We have the same assertion on all incoming edges of a BB.
+ Insert it at the beginning of that block. */
+ loc->bb = loc->e->dest;
+ loc->e = NULL;
+ loc->si = gsi_none ();
+ common = NULL;
+ /* Clear asserts commoned. */
+ for (; commonj != j; ++commonj)
+ if (asserts[commonj])
+ {
+ free (asserts[commonj]);
+ asserts[commonj] = NULL;
+ }
+ }
+ }
}
- idx--;
- if (max == NULL_TREE)
+ /* The asserts vector sorting above might be unstable for
+ -fcompare-debug, sort again to ensure a stable sort. */
+ asserts.qsort (compare_assert_loc<true>);
+ for (unsigned j = 0; j < asserts.length (); ++j)
{
- /* Register the assertion OP != MIN. */
- auto_vec<assert_info, 8> asserts;
- min = fold_convert (TREE_TYPE (op), min);
- register_edge_assert_for (op, default_edge, NE_EXPR, op, min,
- asserts);
- finish_register_edge_assert_for (default_edge, bsi, asserts);
+ loc = asserts[j];
+ if (! loc)
+ break;
+ update_edges_p |= process_assert_insertions_for (ssa_name (i), loc);
+ num_asserts++;
+ free (loc);
}
- else
- {
- /* Register the assertion (unsigned)OP - MIN > (MAX - MIN),
- which will give OP the anti-range ~[MIN,MAX]. */
- tree uop = fold_convert (unsigned_type_for (TREE_TYPE (op)), op);
- min = fold_convert (TREE_TYPE (uop), min);
- max = fold_convert (TREE_TYPE (uop), max);
+ }
- tree lhs = fold_build2 (MINUS_EXPR, TREE_TYPE (uop), uop, min);
- tree rhs = int_const_binop (MINUS_EXPR, max, min);
- register_new_assert_for (op, lhs, GT_EXPR, rhs,
- NULL, default_edge, bsi);
- }
+ if (update_edges_p)
+ gsi_commit_edge_inserts ();
- if (--insertion_limit == 0)
- break;
- }
+ statistics_counter_event (cfun, "Number of ASSERT_EXPR expressions inserted",
+ num_asserts);
}
-/* Traverse all the statements in block BB looking for statements that
- may generate useful assertions for the SSA names in their operand.
- If a statement produces a useful assertion A for name N_i, then the
- list of assertions already generated for N_i is scanned to
- determine if A is actually needed.
+/* Traverse the flowgraph looking for conditional jumps to insert range
+ expressions. These range expressions are meant to provide information
+ to optimizations that need to reason in terms of value ranges. They
+ will not be expanded into RTL. For instance, given:
- If N_i already had the assertion A at a location dominating the
- current location, then nothing needs to be done. Otherwise, the
- new location for A is recorded instead.
+ x = ...
+ y = ...
+ if (x < y)
+ y = x - 2;
+ else
+ x = y + 3;
- 1- For every statement S in BB, all the variables used by S are
- added to bitmap FOUND_IN_SUBGRAPH.
+ this pass will transform the code into:
- 2- If statement S uses an operand N in a way that exposes a known
- value range for N, then if N was not already generated by an
- ASSERT_EXPR, create a new assert location for N. For instance,
- if N is a pointer and the statement dereferences it, we can
- assume that N is not NULL.
-
- 3- COND_EXPRs are a special case of #2. We can derive range
- information from the predicate but need to insert different
- ASSERT_EXPRs for each of the sub-graphs rooted at the
- conditional block. If the last statement of BB is a conditional
- expression of the form 'X op Y', then
-
- a) Remove X and Y from the set FOUND_IN_SUBGRAPH.
-
- b) If the conditional is the only entry point to the sub-graph
- corresponding to the THEN_CLAUSE, recurse into it. On
- return, if X and/or Y are marked in FOUND_IN_SUBGRAPH, then
- an ASSERT_EXPR is added for the corresponding variable.
-
- c) Repeat step (b) on the ELSE_CLAUSE.
-
- d) Mark X and Y in FOUND_IN_SUBGRAPH.
-
- For instance,
-
- if (a == 9)
- b = a;
- else
- b = c + 1;
-
- In this case, an assertion on the THEN clause is useful to
- determine that 'a' is always 9 on that edge. However, an assertion
- on the ELSE clause would be unnecessary.
-
- 4- If BB does not end in a conditional expression, then we recurse
- into BB's dominator children.
+ x = ...
+ y = ...
+ if (x < y)
+ {
+ x = ASSERT_EXPR <x, x < y>
+ y = x - 2
+ }
+ else
+ {
+ y = ASSERT_EXPR <y, x >= y>
+ x = y + 3
+ }
- At the end of the recursive traversal, every SSA name will have a
- list of locations where ASSERT_EXPRs should be added. When a new
- location for name N is found, it is registered by calling
- register_new_assert_for. That function keeps track of all the
- registered assertions to prevent adding unnecessary assertions.
- For instance, if a pointer P_4 is dereferenced more than once in a
- dominator tree, only the location dominating all the dereference of
- P_4 will receive an ASSERT_EXPR. */
+ The idea is that once copy and constant propagation have run, other
+ optimizations will be able to determine what ranges of values can 'x'
+ take in different paths of the code, simply by checking the reaching
+ definition of 'x'. */
static void
-find_assert_locations_1 (basic_block bb, sbitmap live)
+insert_range_assertions (void)
{
- gimple *last;
-
- last = last_stmt (bb);
+ need_assert_for = BITMAP_ALLOC (NULL);
+ asserts_for = XCNEWVEC (assert_locus *, num_ssa_names);
- /* If BB's last statement is a conditional statement involving integer
- operands, determine if we need to add ASSERT_EXPRs. */
- if (last
- && gimple_code (last) == GIMPLE_COND
- && !fp_predicate (last)
- && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
- find_conditional_asserts (bb, as_a <gcond *> (last));
+ calculate_dominance_info (CDI_DOMINATORS);
- /* If BB's last statement is a switch statement involving integer
- operands, determine if we need to add ASSERT_EXPRs. */
- if (last
- && gimple_code (last) == GIMPLE_SWITCH
- && !ZERO_SSA_OPERANDS (last, SSA_OP_USE))
- find_switch_asserts (bb, as_a <gswitch *> (last));
+ find_assert_locations ();
+ if (!bitmap_empty_p (need_assert_for))
+ {
+ process_assert_insertions ();
+ update_ssa (TODO_update_ssa_no_phi);
+ }
- /* Traverse all the statements in BB marking used names and looking
- for statements that may infer assertions for their used operands. */
- for (gimple_stmt_iterator si = gsi_last_bb (bb); !gsi_end_p (si);
- gsi_prev (&si))
+ if (dump_file && (dump_flags & TDF_DETAILS))
{
- gimple *stmt;
- tree op;
- ssa_op_iter i;
+ fprintf (dump_file, "\nSSA form after inserting ASSERT_EXPRs\n");
+ dump_function_to_file (current_function_decl, dump_file, dump_flags);
+ }
- stmt = gsi_stmt (si);
+ free (asserts_for);
+ BITMAP_FREE (need_assert_for);
+}
- if (is_gimple_debug (stmt))
- continue;
+class vrp_prop : public ssa_propagation_engine
+{
+ public:
+ enum ssa_prop_result visit_stmt (gimple *, edge *, tree *) FINAL OVERRIDE;
+ enum ssa_prop_result visit_phi (gphi *) FINAL OVERRIDE;
- /* See if we can derive an assertion for any of STMT's operands. */
- FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
- {
- tree value;
- enum tree_code comp_code;
+ void vrp_initialize (void);
+ void vrp_finalize (bool);
+ void check_all_array_refs (void);
+ void check_array_ref (location_t, tree, bool);
+ void search_for_addr_array (tree, location_t);
- /* If op is not live beyond this stmt, do not bother to insert
- asserts for it. */
- if (!bitmap_bit_p (live, SSA_NAME_VERSION (op)))
- continue;
+ class vr_values vr_values;
+ /* Temporary delegator to minimize code churn. */
+ value_range *get_value_range (const_tree op)
+ { return vr_values.get_value_range (op); }
+ void set_defs_to_varying (gimple *stmt)
+ { return vr_values.set_defs_to_varying (stmt); }
+ void extract_range_from_stmt (gimple *stmt, edge *taken_edge_p,
+ tree *output_p, value_range *vr)
+ { vr_values.extract_range_from_stmt (stmt, taken_edge_p, output_p, vr); }
+ bool update_value_range (const_tree op, value_range *vr)
+ { return vr_values.update_value_range (op, vr); }
+ void extract_range_basic (value_range *vr, gimple *stmt)
+ { vr_values.extract_range_basic (vr, stmt); }
+ void extract_range_from_phi_node (gphi *phi, value_range *vr)
+ { vr_values.extract_range_from_phi_node (phi, vr); }
+};
+/* Checks one ARRAY_REF in REF, located at LOCUS. Ignores flexible arrays
+ and "struct" hacks. If VRP can determine that the
+ array subscript is a constant, check if it is outside valid
+ range. If the array subscript is a RANGE, warn if it is
+ non-overlapping with valid range.
+ IGNORE_OFF_BY_ONE is true if the ARRAY_REF is inside a ADDR_EXPR. */
- /* If OP is used in such a way that we can infer a value
- range for it, and we don't find a previous assertion for
- it, create a new assertion location node for OP. */
- if (infer_value_range (stmt, op, &comp_code, &value))
- {
- /* If we are able to infer a nonzero value range for OP,
- then walk backwards through the use-def chain to see if OP
- was set via a typecast.
+void
+vrp_prop::check_array_ref (location_t location, tree ref,
+ bool ignore_off_by_one)
+{
+ value_range *vr = NULL;
+ tree low_sub, up_sub;
+ tree low_bound, up_bound, up_bound_p1;
- If so, then we can also infer a nonzero value range
- for the operand of the NOP_EXPR. */
- if (comp_code == NE_EXPR && integer_zerop (value))
- {
- tree t = op;
- gimple *def_stmt = SSA_NAME_DEF_STMT (t);
+ if (TREE_NO_WARNING (ref))
+ return;
- while (is_gimple_assign (def_stmt)
- && CONVERT_EXPR_CODE_P
- (gimple_assign_rhs_code (def_stmt))
- && TREE_CODE
- (gimple_assign_rhs1 (def_stmt)) == SSA_NAME
- && POINTER_TYPE_P
- (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))
- {
- t = gimple_assign_rhs1 (def_stmt);
- def_stmt = SSA_NAME_DEF_STMT (t);
+ low_sub = up_sub = TREE_OPERAND (ref, 1);
+ up_bound = array_ref_up_bound (ref);
- /* Note we want to register the assert for the
- operand of the NOP_EXPR after SI, not after the
- conversion. */
- if (bitmap_bit_p (live, SSA_NAME_VERSION (t)))
- register_new_assert_for (t, t, comp_code, value,
- bb, NULL, si);
- }
- }
+ /* Can not check flexible arrays. */
+ if (!up_bound
+ || TREE_CODE (up_bound) != INTEGER_CST)
+ return;
- register_new_assert_for (op, op, comp_code, value, bb, NULL, si);
- }
- }
+ /* Accesses to trailing arrays via pointers may access storage
+ beyond the types array bounds. */
+ if (warn_array_bounds < 2
+ && array_at_struct_end_p (ref))
+ return;
- /* Update live. */
- FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
- bitmap_set_bit (live, SSA_NAME_VERSION (op));
- FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_DEF)
- bitmap_clear_bit (live, SSA_NAME_VERSION (op));
- }
+ low_bound = array_ref_low_bound (ref);
+ up_bound_p1 = int_const_binop (PLUS_EXPR, up_bound,
+ build_int_cst (TREE_TYPE (up_bound), 1));
- /* Traverse all PHI nodes in BB, updating live. */
- for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
- gsi_next (&si))
+ /* Empty array. */
+ if (tree_int_cst_equal (low_bound, up_bound_p1))
{
- use_operand_p arg_p;
- ssa_op_iter i;
- gphi *phi = si.phi ();
- tree res = gimple_phi_result (phi);
-
- if (virtual_operand_p (res))
- continue;
-
- FOR_EACH_PHI_ARG (arg_p, phi, i, SSA_OP_USE)
- {
- tree arg = USE_FROM_PTR (arg_p);
- if (TREE_CODE (arg) == SSA_NAME)
- bitmap_set_bit (live, SSA_NAME_VERSION (arg));
- }
-
- bitmap_clear_bit (live, SSA_NAME_VERSION (res));
+ warning_at (location, OPT_Warray_bounds,
+ "array subscript is above array bounds");
+ TREE_NO_WARNING (ref) = 1;
}
-}
-
-/* Do an RPO walk over the function computing SSA name liveness
- on-the-fly and deciding on assert expressions to insert. */
-
-static void
-find_assert_locations (void)
-{
- int *rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
- int *bb_rpo = XNEWVEC (int, last_basic_block_for_fn (cfun));
- int *last_rpo = XCNEWVEC (int, last_basic_block_for_fn (cfun));
- int rpo_cnt, i;
-
- live = XCNEWVEC (sbitmap, last_basic_block_for_fn (cfun));
- rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
- for (i = 0; i < rpo_cnt; ++i)
- bb_rpo[rpo[i]] = i;
- /* Pre-seed loop latch liveness from loop header PHI nodes. Due to
- the order we compute liveness and insert asserts we otherwise
- fail to insert asserts into the loop latch. */
- loop_p loop;
- FOR_EACH_LOOP (loop, 0)
+ if (TREE_CODE (low_sub) == SSA_NAME)
{
- i = loop->latch->index;
- unsigned int j = single_succ_edge (loop->latch)->dest_idx;
- for (gphi_iterator gsi = gsi_start_phis (loop->header);
- !gsi_end_p (gsi); gsi_next (&gsi))
+ vr = get_value_range (low_sub);
+ if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
+ {
+ low_sub = vr->type == VR_RANGE ? vr->max : vr->min;
+ up_sub = vr->type == VR_RANGE ? vr->min : vr->max;
+ }
+ }
+
+ if (vr && vr->type == VR_ANTI_RANGE)
+ {
+ if (TREE_CODE (up_sub) == INTEGER_CST
+ && (ignore_off_by_one
+ ? tree_int_cst_lt (up_bound, up_sub)
+ : tree_int_cst_le (up_bound, up_sub))
+ && TREE_CODE (low_sub) == INTEGER_CST
+ && tree_int_cst_le (low_sub, low_bound))
+ {
+ warning_at (location, OPT_Warray_bounds,
+ "array subscript is outside array bounds");
+ TREE_NO_WARNING (ref) = 1;
+ }
+ }
+ else if (TREE_CODE (up_sub) == INTEGER_CST
+ && (ignore_off_by_one
+ ? !tree_int_cst_le (up_sub, up_bound_p1)
+ : !tree_int_cst_le (up_sub, up_bound)))
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
{
- gphi *phi = gsi.phi ();
- if (virtual_operand_p (gimple_phi_result (phi)))
- continue;
- tree arg = gimple_phi_arg_def (phi, j);
- if (TREE_CODE (arg) == SSA_NAME)
- {
- if (live[i] == NULL)
- {
- live[i] = sbitmap_alloc (num_ssa_names);
- bitmap_clear (live[i]);
- }
- bitmap_set_bit (live[i], SSA_NAME_VERSION (arg));
- }
+ fprintf (dump_file, "Array bound warning for ");
+ dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
+ fprintf (dump_file, "\n");
}
+ warning_at (location, OPT_Warray_bounds,
+ "array subscript is above array bounds");
+ TREE_NO_WARNING (ref) = 1;
}
-
- for (i = rpo_cnt - 1; i >= 0; --i)
+ else if (TREE_CODE (low_sub) == INTEGER_CST
+ && tree_int_cst_lt (low_sub, low_bound))
{
- basic_block bb = BASIC_BLOCK_FOR_FN (cfun, rpo[i]);
- edge e;
- edge_iterator ei;
-
- if (!live[rpo[i]])
+ if (dump_file && (dump_flags & TDF_DETAILS))
{
- live[rpo[i]] = sbitmap_alloc (num_ssa_names);
- bitmap_clear (live[rpo[i]]);
+ fprintf (dump_file, "Array bound warning for ");
+ dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
+ fprintf (dump_file, "\n");
}
+ warning_at (location, OPT_Warray_bounds,
+ "array subscript is below array bounds");
+ TREE_NO_WARNING (ref) = 1;
+ }
+}
- /* Process BB and update the live information with uses in
- this block. */
- find_assert_locations_1 (bb, live[rpo[i]]);
+/* Searches if the expr T, located at LOCATION computes
+ address of an ARRAY_REF, and call check_array_ref on it. */
- /* Merge liveness into the predecessor blocks and free it. */
- if (!bitmap_empty_p (live[rpo[i]]))
- {
- int pred_rpo = i;
- FOR_EACH_EDGE (e, ei, bb->preds)
- {
- int pred = e->src->index;
- if ((e->flags & EDGE_DFS_BACK) || pred == ENTRY_BLOCK)
- continue;
+void
+vrp_prop::search_for_addr_array (tree t, location_t location)
+{
+ /* Check each ARRAY_REFs in the reference chain. */
+ do
+ {
+ if (TREE_CODE (t) == ARRAY_REF)
+ check_array_ref (location, t, true /*ignore_off_by_one*/);
- if (!live[pred])
- {
- live[pred] = sbitmap_alloc (num_ssa_names);
- bitmap_clear (live[pred]);
- }
- bitmap_ior (live[pred], live[pred], live[rpo[i]]);
+ t = TREE_OPERAND (t, 0);
+ }
+ while (handled_component_p (t));
- if (bb_rpo[pred] < pred_rpo)
- pred_rpo = bb_rpo[pred];
- }
+ if (TREE_CODE (t) == MEM_REF
+ && TREE_CODE (TREE_OPERAND (t, 0)) == ADDR_EXPR
+ && !TREE_NO_WARNING (t))
+ {
+ tree tem = TREE_OPERAND (TREE_OPERAND (t, 0), 0);
+ tree low_bound, up_bound, el_sz;
+ offset_int idx;
+ if (TREE_CODE (TREE_TYPE (tem)) != ARRAY_TYPE
+ || TREE_CODE (TREE_TYPE (TREE_TYPE (tem))) == ARRAY_TYPE
+ || !TYPE_DOMAIN (TREE_TYPE (tem)))
+ return;
- /* Record the RPO number of the last visited block that needs
- live information from this block. */
- last_rpo[rpo[i]] = pred_rpo;
+ low_bound = TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
+ up_bound = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
+ el_sz = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (tem)));
+ if (!low_bound
+ || TREE_CODE (low_bound) != INTEGER_CST
+ || !up_bound
+ || TREE_CODE (up_bound) != INTEGER_CST
+ || !el_sz
+ || TREE_CODE (el_sz) != INTEGER_CST)
+ return;
+
+ idx = mem_ref_offset (t);
+ idx = wi::sdiv_trunc (idx, wi::to_offset (el_sz));
+ if (idx < 0)
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "Array bound warning for ");
+ dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
+ fprintf (dump_file, "\n");
+ }
+ warning_at (location, OPT_Warray_bounds,
+ "array subscript is below array bounds");
+ TREE_NO_WARNING (t) = 1;
}
- else
+ else if (idx > (wi::to_offset (up_bound)
+ - wi::to_offset (low_bound) + 1))
{
- sbitmap_free (live[rpo[i]]);
- live[rpo[i]] = NULL;
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "Array bound warning for ");
+ dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
+ fprintf (dump_file, "\n");
+ }
+ warning_at (location, OPT_Warray_bounds,
+ "array subscript is above array bounds");
+ TREE_NO_WARNING (t) = 1;
}
-
- /* We can free all successors live bitmaps if all their
- predecessors have been visited already. */
- FOR_EACH_EDGE (e, ei, bb->succs)
- if (last_rpo[e->dest->index] == i
- && live[e->dest->index])
- {
- sbitmap_free (live[e->dest->index]);
- live[e->dest->index] = NULL;
- }
}
-
- XDELETEVEC (rpo);
- XDELETEVEC (bb_rpo);
- XDELETEVEC (last_rpo);
- for (i = 0; i < last_basic_block_for_fn (cfun); ++i)
- if (live[i])
- sbitmap_free (live[i]);
- XDELETEVEC (live);
}
-/* Create an ASSERT_EXPR for NAME and insert it in the location
- indicated by LOC. Return true if we made any edge insertions. */
+/* walk_tree() callback that checks if *TP is
+ an ARRAY_REF inside an ADDR_EXPR (in which an array
+ subscript one outside the valid range is allowed). Call
+ check_array_ref for each ARRAY_REF found. The location is
+ passed in DATA. */
-static bool
-process_assert_insertions_for (tree name, assert_locus *loc)
+static tree
+check_array_bounds (tree *tp, int *walk_subtree, void *data)
{
- /* Build the comparison expression NAME_i COMP_CODE VAL. */
- gimple *stmt;
- tree cond;
- gimple *assert_stmt;
- edge_iterator ei;
- edge e;
-
- /* If we have X <=> X do not insert an assert expr for that. */
- if (loc->expr == loc->val)
- return false;
+ tree t = *tp;
+ struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
+ location_t location;
- cond = build2 (loc->comp_code, boolean_type_node, loc->expr, loc->val);
- assert_stmt = build_assert_expr_for (cond, name);
- if (loc->e)
- {
- /* We have been asked to insert the assertion on an edge. This
- is used only by COND_EXPR and SWITCH_EXPR assertions. */
- gcc_checking_assert (gimple_code (gsi_stmt (loc->si)) == GIMPLE_COND
- || (gimple_code (gsi_stmt (loc->si))
- == GIMPLE_SWITCH));
+ if (EXPR_HAS_LOCATION (t))
+ location = EXPR_LOCATION (t);
+ else
+ location = gimple_location (wi->stmt);
- gsi_insert_on_edge (loc->e, assert_stmt);
- return true;
- }
+ *walk_subtree = TRUE;
- /* If the stmt iterator points at the end then this is an insertion
- at the beginning of a block. */
- if (gsi_end_p (loc->si))
- {
- gimple_stmt_iterator si = gsi_after_labels (loc->bb);
- gsi_insert_before (&si, assert_stmt, GSI_SAME_STMT);
- return false;
+ vrp_prop *vrp_prop = (class vrp_prop *)wi->info;
+ if (TREE_CODE (t) == ARRAY_REF)
+ vrp_prop->check_array_ref (location, t, false /*ignore_off_by_one*/);
- }
- /* Otherwise, we can insert right after LOC->SI iff the
- statement must not be the last statement in the block. */
- stmt = gsi_stmt (loc->si);
- if (!stmt_ends_bb_p (stmt))
+ else if (TREE_CODE (t) == ADDR_EXPR)
{
- gsi_insert_after (&loc->si, assert_stmt, GSI_SAME_STMT);
- return false;
+ vrp_prop->search_for_addr_array (t, location);
+ *walk_subtree = FALSE;
}
- /* If STMT must be the last statement in BB, we can only insert new
- assertions on the non-abnormal edge out of BB. Note that since
- STMT is not control flow, there may only be one non-abnormal/eh edge
- out of BB. */
- FOR_EACH_EDGE (e, ei, loc->bb->succs)
- if (!(e->flags & (EDGE_ABNORMAL|EDGE_EH)))
- {
- gsi_insert_on_edge (e, assert_stmt);
- return true;
- }
-
- gcc_unreachable ();
+ return NULL_TREE;
}
-/* Qsort helper for sorting assert locations. If stable is true, don't
- use iterative_hash_expr because it can be unstable for -fcompare-debug,
- on the other side some pointers might be NULL. */
+/* Walk over all statements of all reachable BBs and call check_array_bounds
+ on them. */
-template <bool stable>
-static int
-compare_assert_loc (const void *pa, const void *pb)
+void
+vrp_prop::check_all_array_refs ()
{
- assert_locus * const a = *(assert_locus * const *)pa;
- assert_locus * const b = *(assert_locus * const *)pb;
+ basic_block bb;
+ gimple_stmt_iterator si;
- /* If stable, some asserts might be optimized away already, sort
- them last. */
- if (stable)
+ FOR_EACH_BB_FN (bb, cfun)
{
- if (a == NULL)
- return b != NULL;
- else if (b == NULL)
- return -1;
- }
-
- if (a->e == NULL && b->e != NULL)
- return 1;
- else if (a->e != NULL && b->e == NULL)
- return -1;
+ edge_iterator ei;
+ edge e;
+ bool executable = false;
- /* After the above checks, we know that (a->e == NULL) == (b->e == NULL),
- no need to test both a->e and b->e. */
+ /* Skip blocks that were found to be unreachable. */
+ FOR_EACH_EDGE (e, ei, bb->preds)
+ executable |= !!(e->flags & EDGE_EXECUTABLE);
+ if (!executable)
+ continue;
- /* Sort after destination index. */
- if (a->e == NULL)
- ;
- else if (a->e->dest->index > b->e->dest->index)
- return 1;
- else if (a->e->dest->index < b->e->dest->index)
- return -1;
+ for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
+ {
+ gimple *stmt = gsi_stmt (si);
+ struct walk_stmt_info wi;
+ if (!gimple_has_location (stmt)
+ || is_gimple_debug (stmt))
+ continue;
- /* Sort after comp_code. */
- if (a->comp_code > b->comp_code)
- return 1;
- else if (a->comp_code < b->comp_code)
- return -1;
+ memset (&wi, 0, sizeof (wi));
- hashval_t ha, hb;
+ wi.info = this;
- /* E.g. if a->val is ADDR_EXPR of a VAR_DECL, iterative_hash_expr
- uses DECL_UID of the VAR_DECL, so sorting might differ between
- -g and -g0. When doing the removal of redundant assert exprs
- and commonization to successors, this does not matter, but for
- the final sort needs to be stable. */
- if (stable)
- {
- ha = 0;
- hb = 0;
- }
- else
- {
- ha = iterative_hash_expr (a->expr, iterative_hash_expr (a->val, 0));
- hb = iterative_hash_expr (b->expr, iterative_hash_expr (b->val, 0));
+ walk_gimple_op (gsi_stmt (si),
+ check_array_bounds,
+ &wi);
+ }
}
+}
- /* Break the tie using hashing and source/bb index. */
- if (ha == hb)
- return (a->e != NULL
- ? a->e->src->index - b->e->src->index
- : a->bb->index - b->bb->index);
- return ha > hb ? 1 : -1;
+/* Return true if all imm uses of VAR are either in STMT, or
+ feed (optionally through a chain of single imm uses) GIMPLE_COND
+ in basic block COND_BB. */
+
+static bool
+all_imm_uses_in_stmt_or_feed_cond (tree var, gimple *stmt, basic_block cond_bb)
+{
+ use_operand_p use_p, use2_p;
+ imm_use_iterator iter;
+
+ FOR_EACH_IMM_USE_FAST (use_p, iter, var)
+ if (USE_STMT (use_p) != stmt)
+ {
+ gimple *use_stmt = USE_STMT (use_p), *use_stmt2;
+ if (is_gimple_debug (use_stmt))
+ continue;
+ while (is_gimple_assign (use_stmt)
+ && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
+ && single_imm_use (gimple_assign_lhs (use_stmt),
+ &use2_p, &use_stmt2))
+ use_stmt = use_stmt2;
+ if (gimple_code (use_stmt) != GIMPLE_COND
+ || gimple_bb (use_stmt) != cond_bb)
+ return false;
+ }
+ return true;
}
-/* Process all the insertions registered for every name N_i registered
- in NEED_ASSERT_FOR. The list of assertions to be inserted are
- found in ASSERTS_FOR[i]. */
+/* Handle
+ _4 = x_3 & 31;
+ if (_4 != 0)
+ goto <bb 6>;
+ else
+ goto <bb 7>;
+ <bb 6>:
+ __builtin_unreachable ();
+ <bb 7>:
+ x_5 = ASSERT_EXPR <x_3, ...>;
+ If x_3 has no other immediate uses (checked by caller),
+ var is the x_3 var from ASSERT_EXPR, we can clear low 5 bits
+ from the non-zero bitmask. */
static void
-process_assert_insertions (void)
+maybe_set_nonzero_bits (basic_block bb, tree var)
{
- unsigned i;
- bitmap_iterator bi;
- bool update_edges_p = false;
- int num_asserts = 0;
+ edge e = single_pred_edge (bb);
+ basic_block cond_bb = e->src;
+ gimple *stmt = last_stmt (cond_bb);
+ tree cst;
- if (dump_file && (dump_flags & TDF_DETAILS))
- dump_all_asserts (dump_file);
+ if (stmt == NULL
+ || gimple_code (stmt) != GIMPLE_COND
+ || gimple_cond_code (stmt) != ((e->flags & EDGE_TRUE_VALUE)
+ ? EQ_EXPR : NE_EXPR)
+ || TREE_CODE (gimple_cond_lhs (stmt)) != SSA_NAME
+ || !integer_zerop (gimple_cond_rhs (stmt)))
+ return;
- EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
+ stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (stmt));
+ if (!is_gimple_assign (stmt)
+ || gimple_assign_rhs_code (stmt) != BIT_AND_EXPR
+ || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST)
+ return;
+ if (gimple_assign_rhs1 (stmt) != var)
{
- assert_locus *loc = asserts_for[i];
- gcc_assert (loc);
-
- auto_vec<assert_locus *, 16> asserts;
- for (; loc; loc = loc->next)
- asserts.safe_push (loc);
- asserts.qsort (compare_assert_loc<false>);
-
- /* Push down common asserts to successors and remove redundant ones. */
- unsigned ecnt = 0;
- assert_locus *common = NULL;
- unsigned commonj = 0;
- for (unsigned j = 0; j < asserts.length (); ++j)
- {
- loc = asserts[j];
- if (! loc->e)
- common = NULL;
- else if (! common
- || loc->e->dest != common->e->dest
- || loc->comp_code != common->comp_code
- || ! operand_equal_p (loc->val, common->val, 0)
- || ! operand_equal_p (loc->expr, common->expr, 0))
- {
- commonj = j;
- common = loc;
- ecnt = 1;
- }
- else if (loc->e == asserts[j-1]->e)
- {
- /* Remove duplicate asserts. */
- if (commonj == j - 1)
- {
- commonj = j;
- common = loc;
- }
- free (asserts[j-1]);
- asserts[j-1] = NULL;
- }
- else
- {
- ecnt++;
- if (EDGE_COUNT (common->e->dest->preds) == ecnt)
- {
- /* We have the same assertion on all incoming edges of a BB.
- Insert it at the beginning of that block. */
- loc->bb = loc->e->dest;
- loc->e = NULL;
- loc->si = gsi_none ();
- common = NULL;
- /* Clear asserts commoned. */
- for (; commonj != j; ++commonj)
- if (asserts[commonj])
- {
- free (asserts[commonj]);
- asserts[commonj] = NULL;
- }
- }
- }
- }
+ gimple *stmt2;
- /* The asserts vector sorting above might be unstable for
- -fcompare-debug, sort again to ensure a stable sort. */
- asserts.qsort (compare_assert_loc<true>);
- for (unsigned j = 0; j < asserts.length (); ++j)
- {
- loc = asserts[j];
- if (! loc)
- break;
- update_edges_p |= process_assert_insertions_for (ssa_name (i), loc);
- num_asserts++;
- free (loc);
- }
+ if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
+ return;
+ stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
+ if (!gimple_assign_cast_p (stmt2)
+ || gimple_assign_rhs1 (stmt2) != var
+ || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt2))
+ || (TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (stmt)))
+ != TYPE_PRECISION (TREE_TYPE (var))))
+ return;
}
-
- if (update_edges_p)
- gsi_commit_edge_inserts ();
-
- statistics_counter_event (cfun, "Number of ASSERT_EXPR expressions inserted",
- num_asserts);
+ cst = gimple_assign_rhs2 (stmt);
+ set_nonzero_bits (var, wi::bit_and_not (get_nonzero_bits (var),
+ wi::to_wide (cst)));
}
+/* Convert range assertion expressions into the implied copies and
+ copy propagate away the copies. Doing the trivial copy propagation
+ here avoids the need to run the full copy propagation pass after
+ VRP.
-/* Traverse the flowgraph looking for conditional jumps to insert range
- expressions. These range expressions are meant to provide information
- to optimizations that need to reason in terms of value ranges. They
- will not be expanded into RTL. For instance, given:
+ FIXME, this will eventually lead to copy propagation removing the
+ names that had useful range information attached to them. For
+ instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>,
+ then N_i will have the range [3, +INF].
- x = ...
- y = ...
- if (x < y)
- y = x - 2;
- else
- x = y + 3;
+ However, by converting the assertion into the implied copy
+ operation N_i = N_j, we will then copy-propagate N_j into the uses
+ of N_i and lose the range information. We may want to hold on to
+ ASSERT_EXPRs a little while longer as the ranges could be used in
+ things like jump threading.
- this pass will transform the code into:
-
- x = ...
- y = ...
- if (x < y)
- {
- x = ASSERT_EXPR <x, x < y>
- y = x - 2
- }
- else
- {
- y = ASSERT_EXPR <y, x >= y>
- x = y + 3
- }
+ The problem with keeping ASSERT_EXPRs around is that passes after
+ VRP need to handle them appropriately.
- The idea is that once copy and constant propagation have run, other
- optimizations will be able to determine what ranges of values can 'x'
- take in different paths of the code, simply by checking the reaching
- definition of 'x'. */
+ Another approach would be to make the range information a first
+ class property of the SSA_NAME so that it can be queried from
+ any pass. This is made somewhat more complex by the need for
+ multiple ranges to be associated with one SSA_NAME. */
static void
-insert_range_assertions (void)
-{
- need_assert_for = BITMAP_ALLOC (NULL);
- asserts_for = XCNEWVEC (assert_locus *, num_ssa_names);
-
- calculate_dominance_info (CDI_DOMINATORS);
-
- find_assert_locations ();
- if (!bitmap_empty_p (need_assert_for))
- {
- process_assert_insertions ();
- update_ssa (TODO_update_ssa_no_phi);
- }
-
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, "\nSSA form after inserting ASSERT_EXPRs\n");
- dump_function_to_file (current_function_decl, dump_file, dump_flags);
- }
-
- free (asserts_for);
- BITMAP_FREE (need_assert_for);
-}
-
-class vrp_prop : public ssa_propagation_engine
-{
- public:
- enum ssa_prop_result visit_stmt (gimple *, edge *, tree *) FINAL OVERRIDE;
- enum ssa_prop_result visit_phi (gphi *) FINAL OVERRIDE;
-
- void vrp_initialize (void);
- void vrp_finalize (bool);
- void check_all_array_refs (void);
- void check_array_ref (location_t, tree, bool);
- void search_for_addr_array (tree, location_t);
-
- class vr_values vr_values;
- /* Temporary delegator to minimize code churn. */
- value_range *get_value_range (const_tree op)
- { return vr_values.get_value_range (op); }
- void set_defs_to_varying (gimple *stmt)
- { return vr_values.set_defs_to_varying (stmt); }
- void extract_range_from_stmt (gimple *stmt, edge *taken_edge_p,
- tree *output_p, value_range *vr)
- { vr_values.extract_range_from_stmt (stmt, taken_edge_p, output_p, vr); }
- bool update_value_range (const_tree op, value_range *vr)
- { return vr_values.update_value_range (op, vr); }
- void extract_range_basic (value_range *vr, gimple *stmt)
- { vr_values.extract_range_basic (vr, stmt); }
- void extract_range_from_phi_node (gphi *phi, value_range *vr)
- { vr_values.extract_range_from_phi_node (phi, vr); }
-};
-
-/* Checks one ARRAY_REF in REF, located at LOCUS. Ignores flexible arrays
- and "struct" hacks. If VRP can determine that the
- array subscript is a constant, check if it is outside valid
- range. If the array subscript is a RANGE, warn if it is
- non-overlapping with valid range.
- IGNORE_OFF_BY_ONE is true if the ARRAY_REF is inside a ADDR_EXPR. */
-
-void
-vrp_prop::check_array_ref (location_t location, tree ref,
- bool ignore_off_by_one)
+remove_range_assertions (void)
{
- value_range *vr = NULL;
- tree low_sub, up_sub;
- tree low_bound, up_bound, up_bound_p1;
-
- if (TREE_NO_WARNING (ref))
- return;
-
- low_sub = up_sub = TREE_OPERAND (ref, 1);
- up_bound = array_ref_up_bound (ref);
+ basic_block bb;
+ gimple_stmt_iterator si;
+ /* 1 if looking at ASSERT_EXPRs immediately at the beginning of
+ a basic block preceeded by GIMPLE_COND branching to it and
+ __builtin_trap, -1 if not yet checked, 0 otherwise. */
+ int is_unreachable;
- /* Can not check flexible arrays. */
- if (!up_bound
- || TREE_CODE (up_bound) != INTEGER_CST)
- return;
+ /* Note that the BSI iterator bump happens at the bottom of the
+ loop and no bump is necessary if we're removing the statement
+ referenced by the current BSI. */
+ FOR_EACH_BB_FN (bb, cfun)
+ for (si = gsi_after_labels (bb), is_unreachable = -1; !gsi_end_p (si);)
+ {
+ gimple *stmt = gsi_stmt (si);
- /* Accesses to trailing arrays via pointers may access storage
- beyond the types array bounds. */
- if (warn_array_bounds < 2
- && array_at_struct_end_p (ref))
- return;
+ if (is_gimple_assign (stmt)
+ && gimple_assign_rhs_code (stmt) == ASSERT_EXPR)
+ {
+ tree lhs = gimple_assign_lhs (stmt);
+ tree rhs = gimple_assign_rhs1 (stmt);
+ tree var;
- low_bound = array_ref_low_bound (ref);
- up_bound_p1 = int_const_binop (PLUS_EXPR, up_bound,
- build_int_cst (TREE_TYPE (up_bound), 1));
+ var = ASSERT_EXPR_VAR (rhs);
- /* Empty array. */
- if (tree_int_cst_equal (low_bound, up_bound_p1))
- {
- warning_at (location, OPT_Warray_bounds,
- "array subscript is above array bounds");
- TREE_NO_WARNING (ref) = 1;
- }
+ if (TREE_CODE (var) == SSA_NAME
+ && !POINTER_TYPE_P (TREE_TYPE (lhs))
+ && SSA_NAME_RANGE_INFO (lhs))
+ {
+ if (is_unreachable == -1)
+ {
+ is_unreachable = 0;
+ if (single_pred_p (bb)
+ && assert_unreachable_fallthru_edge_p
+ (single_pred_edge (bb)))
+ is_unreachable = 1;
+ }
+ /* Handle
+ if (x_7 >= 10 && x_7 < 20)
+ __builtin_unreachable ();
+ x_8 = ASSERT_EXPR <x_7, ...>;
+ if the only uses of x_7 are in the ASSERT_EXPR and
+ in the condition. In that case, we can copy the
+ range info from x_8 computed in this pass also
+ for x_7. */
+ if (is_unreachable
+ && all_imm_uses_in_stmt_or_feed_cond (var, stmt,
+ single_pred (bb)))
+ {
+ set_range_info (var, SSA_NAME_RANGE_TYPE (lhs),
+ SSA_NAME_RANGE_INFO (lhs)->get_min (),
+ SSA_NAME_RANGE_INFO (lhs)->get_max ());
+ maybe_set_nonzero_bits (bb, var);
+ }
+ }
- if (TREE_CODE (low_sub) == SSA_NAME)
- {
- vr = get_value_range (low_sub);
- if (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE)
- {
- low_sub = vr->type == VR_RANGE ? vr->max : vr->min;
- up_sub = vr->type == VR_RANGE ? vr->min : vr->max;
- }
- }
+ /* Propagate the RHS into every use of the LHS. For SSA names
+ also propagate abnormals as it merely restores the original
+ IL in this case (an replace_uses_by would assert). */
+ if (TREE_CODE (var) == SSA_NAME)
+ {
+ imm_use_iterator iter;
+ use_operand_p use_p;
+ gimple *use_stmt;
+ FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
+ FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
+ SET_USE (use_p, var);
+ }
+ else
+ replace_uses_by (lhs, var);
- if (vr && vr->type == VR_ANTI_RANGE)
- {
- if (TREE_CODE (up_sub) == INTEGER_CST
- && (ignore_off_by_one
- ? tree_int_cst_lt (up_bound, up_sub)
- : tree_int_cst_le (up_bound, up_sub))
- && TREE_CODE (low_sub) == INTEGER_CST
- && tree_int_cst_le (low_sub, low_bound))
- {
- warning_at (location, OPT_Warray_bounds,
- "array subscript is outside array bounds");
- TREE_NO_WARNING (ref) = 1;
- }
- }
- else if (TREE_CODE (up_sub) == INTEGER_CST
- && (ignore_off_by_one
- ? !tree_int_cst_le (up_sub, up_bound_p1)
- : !tree_int_cst_le (up_sub, up_bound)))
- {
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, "Array bound warning for ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
- fprintf (dump_file, "\n");
- }
- warning_at (location, OPT_Warray_bounds,
- "array subscript is above array bounds");
- TREE_NO_WARNING (ref) = 1;
- }
- else if (TREE_CODE (low_sub) == INTEGER_CST
- && tree_int_cst_lt (low_sub, low_bound))
- {
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, "Array bound warning for ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
- fprintf (dump_file, "\n");
- }
- warning_at (location, OPT_Warray_bounds,
- "array subscript is below array bounds");
- TREE_NO_WARNING (ref) = 1;
- }
+ /* And finally, remove the copy, it is not needed. */
+ gsi_remove (&si, true);
+ release_defs (stmt);
+ }
+ else
+ {
+ if (!is_gimple_debug (gsi_stmt (si)))
+ is_unreachable = 0;
+ gsi_next (&si);
+ }
+ }
}
-/* Searches if the expr T, located at LOCATION computes
- address of an ARRAY_REF, and call check_array_ref on it. */
-void
-vrp_prop::search_for_addr_array (tree t, location_t location)
+/* Return true if STMT is interesting for VRP. */
+
+bool
+stmt_interesting_for_vrp (gimple *stmt)
{
- /* Check each ARRAY_REFs in the reference chain. */
- do
+ if (gimple_code (stmt) == GIMPLE_PHI)
{
- if (TREE_CODE (t) == ARRAY_REF)
- check_array_ref (location, t, true /*ignore_off_by_one*/);
-
- t = TREE_OPERAND (t, 0);
- }
- while (handled_component_p (t));
-
- if (TREE_CODE (t) == MEM_REF
- && TREE_CODE (TREE_OPERAND (t, 0)) == ADDR_EXPR
- && !TREE_NO_WARNING (t))
- {
- tree tem = TREE_OPERAND (TREE_OPERAND (t, 0), 0);
- tree low_bound, up_bound, el_sz;
- offset_int idx;
- if (TREE_CODE (TREE_TYPE (tem)) != ARRAY_TYPE
- || TREE_CODE (TREE_TYPE (TREE_TYPE (tem))) == ARRAY_TYPE
- || !TYPE_DOMAIN (TREE_TYPE (tem)))
- return;
-
- low_bound = TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
- up_bound = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (tem)));
- el_sz = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (tem)));
- if (!low_bound
- || TREE_CODE (low_bound) != INTEGER_CST
- || !up_bound
- || TREE_CODE (up_bound) != INTEGER_CST
- || !el_sz
- || TREE_CODE (el_sz) != INTEGER_CST)
- return;
-
- idx = mem_ref_offset (t);
- idx = wi::sdiv_trunc (idx, wi::to_offset (el_sz));
- if (idx < 0)
- {
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, "Array bound warning for ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
- fprintf (dump_file, "\n");
- }
- warning_at (location, OPT_Warray_bounds,
- "array subscript is below array bounds");
- TREE_NO_WARNING (t) = 1;
- }
- else if (idx > (wi::to_offset (up_bound)
- - wi::to_offset (low_bound) + 1))
- {
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, "Array bound warning for ");
- dump_generic_expr (MSG_NOTE, TDF_SLIM, t);
- fprintf (dump_file, "\n");
- }
- warning_at (location, OPT_Warray_bounds,
- "array subscript is above array bounds");
- TREE_NO_WARNING (t) = 1;
- }
- }
-}
-
-/* walk_tree() callback that checks if *TP is
- an ARRAY_REF inside an ADDR_EXPR (in which an array
- subscript one outside the valid range is allowed). Call
- check_array_ref for each ARRAY_REF found. The location is
- passed in DATA. */
-
-static tree
-check_array_bounds (tree *tp, int *walk_subtree, void *data)
-{
- tree t = *tp;
- struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
- location_t location;
-
- if (EXPR_HAS_LOCATION (t))
- location = EXPR_LOCATION (t);
- else
- location = gimple_location (wi->stmt);
-
- *walk_subtree = TRUE;
-
- vrp_prop *vrp_prop = (class vrp_prop *)wi->info;
- if (TREE_CODE (t) == ARRAY_REF)
- vrp_prop->check_array_ref (location, t, false /*ignore_off_by_one*/);
-
- else if (TREE_CODE (t) == ADDR_EXPR)
- {
- vrp_prop->search_for_addr_array (t, location);
- *walk_subtree = FALSE;
- }
-
- return NULL_TREE;
-}
-
-/* Walk over all statements of all reachable BBs and call check_array_bounds
- on them. */
-
-void
-vrp_prop::check_all_array_refs ()
-{
- basic_block bb;
- gimple_stmt_iterator si;
-
- FOR_EACH_BB_FN (bb, cfun)
- {
- edge_iterator ei;
- edge e;
- bool executable = false;
-
- /* Skip blocks that were found to be unreachable. */
- FOR_EACH_EDGE (e, ei, bb->preds)
- executable |= !!(e->flags & EDGE_EXECUTABLE);
- if (!executable)
- continue;
-
- for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
- {
- gimple *stmt = gsi_stmt (si);
- struct walk_stmt_info wi;
- if (!gimple_has_location (stmt)
- || is_gimple_debug (stmt))
- continue;
-
- memset (&wi, 0, sizeof (wi));
-
- wi.info = this;
-
- walk_gimple_op (gsi_stmt (si),
- check_array_bounds,
- &wi);
- }
- }
-}
-
-/* Return true if all imm uses of VAR are either in STMT, or
- feed (optionally through a chain of single imm uses) GIMPLE_COND
- in basic block COND_BB. */
-
-static bool
-all_imm_uses_in_stmt_or_feed_cond (tree var, gimple *stmt, basic_block cond_bb)
-{
- use_operand_p use_p, use2_p;
- imm_use_iterator iter;
-
- FOR_EACH_IMM_USE_FAST (use_p, iter, var)
- if (USE_STMT (use_p) != stmt)
- {
- gimple *use_stmt = USE_STMT (use_p), *use_stmt2;
- if (is_gimple_debug (use_stmt))
- continue;
- while (is_gimple_assign (use_stmt)
- && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
- && single_imm_use (gimple_assign_lhs (use_stmt),
- &use2_p, &use_stmt2))
- use_stmt = use_stmt2;
- if (gimple_code (use_stmt) != GIMPLE_COND
- || gimple_bb (use_stmt) != cond_bb)
- return false;
- }
- return true;
-}
-
-/* Handle
- _4 = x_3 & 31;
- if (_4 != 0)
- goto <bb 6>;
- else
- goto <bb 7>;
- <bb 6>:
- __builtin_unreachable ();
- <bb 7>:
- x_5 = ASSERT_EXPR <x_3, ...>;
- If x_3 has no other immediate uses (checked by caller),
- var is the x_3 var from ASSERT_EXPR, we can clear low 5 bits
- from the non-zero bitmask. */
-
-static void
-maybe_set_nonzero_bits (basic_block bb, tree var)
-{
- edge e = single_pred_edge (bb);
- basic_block cond_bb = e->src;
- gimple *stmt = last_stmt (cond_bb);
- tree cst;
-
- if (stmt == NULL
- || gimple_code (stmt) != GIMPLE_COND
- || gimple_cond_code (stmt) != ((e->flags & EDGE_TRUE_VALUE)
- ? EQ_EXPR : NE_EXPR)
- || TREE_CODE (gimple_cond_lhs (stmt)) != SSA_NAME
- || !integer_zerop (gimple_cond_rhs (stmt)))
- return;
-
- stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (stmt));
- if (!is_gimple_assign (stmt)
- || gimple_assign_rhs_code (stmt) != BIT_AND_EXPR
- || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST)
- return;
- if (gimple_assign_rhs1 (stmt) != var)
- {
- gimple *stmt2;
-
- if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
- return;
- stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
- if (!gimple_assign_cast_p (stmt2)
- || gimple_assign_rhs1 (stmt2) != var
- || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt2))
- || (TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (stmt)))
- != TYPE_PRECISION (TREE_TYPE (var))))
- return;
- }
- cst = gimple_assign_rhs2 (stmt);
- set_nonzero_bits (var, wi::bit_and_not (get_nonzero_bits (var),
- wi::to_wide (cst)));
-}
-
-/* Convert range assertion expressions into the implied copies and
- copy propagate away the copies. Doing the trivial copy propagation
- here avoids the need to run the full copy propagation pass after
- VRP.
-
- FIXME, this will eventually lead to copy propagation removing the
- names that had useful range information attached to them. For
- instance, if we had the assertion N_i = ASSERT_EXPR <N_j, N_j > 3>,
- then N_i will have the range [3, +INF].
-
- However, by converting the assertion into the implied copy
- operation N_i = N_j, we will then copy-propagate N_j into the uses
- of N_i and lose the range information. We may want to hold on to
- ASSERT_EXPRs a little while longer as the ranges could be used in
- things like jump threading.
-
- The problem with keeping ASSERT_EXPRs around is that passes after
- VRP need to handle them appropriately.
-
- Another approach would be to make the range information a first
- class property of the SSA_NAME so that it can be queried from
- any pass. This is made somewhat more complex by the need for
- multiple ranges to be associated with one SSA_NAME. */
-
-static void
-remove_range_assertions (void)
-{
- basic_block bb;
- gimple_stmt_iterator si;
- /* 1 if looking at ASSERT_EXPRs immediately at the beginning of
- a basic block preceeded by GIMPLE_COND branching to it and
- __builtin_trap, -1 if not yet checked, 0 otherwise. */
- int is_unreachable;
-
- /* Note that the BSI iterator bump happens at the bottom of the
- loop and no bump is necessary if we're removing the statement
- referenced by the current BSI. */
- FOR_EACH_BB_FN (bb, cfun)
- for (si = gsi_after_labels (bb), is_unreachable = -1; !gsi_end_p (si);)
- {
- gimple *stmt = gsi_stmt (si);
-
- if (is_gimple_assign (stmt)
- && gimple_assign_rhs_code (stmt) == ASSERT_EXPR)
- {
- tree lhs = gimple_assign_lhs (stmt);
- tree rhs = gimple_assign_rhs1 (stmt);
- tree var;
-
- var = ASSERT_EXPR_VAR (rhs);
-
- if (TREE_CODE (var) == SSA_NAME
- && !POINTER_TYPE_P (TREE_TYPE (lhs))
- && SSA_NAME_RANGE_INFO (lhs))
- {
- if (is_unreachable == -1)
- {
- is_unreachable = 0;
- if (single_pred_p (bb)
- && assert_unreachable_fallthru_edge_p
- (single_pred_edge (bb)))
- is_unreachable = 1;
- }
- /* Handle
- if (x_7 >= 10 && x_7 < 20)
- __builtin_unreachable ();
- x_8 = ASSERT_EXPR <x_7, ...>;
- if the only uses of x_7 are in the ASSERT_EXPR and
- in the condition. In that case, we can copy the
- range info from x_8 computed in this pass also
- for x_7. */
- if (is_unreachable
- && all_imm_uses_in_stmt_or_feed_cond (var, stmt,
- single_pred (bb)))
- {
- set_range_info (var, SSA_NAME_RANGE_TYPE (lhs),
- SSA_NAME_RANGE_INFO (lhs)->get_min (),
- SSA_NAME_RANGE_INFO (lhs)->get_max ());
- maybe_set_nonzero_bits (bb, var);
- }
- }
-
- /* Propagate the RHS into every use of the LHS. For SSA names
- also propagate abnormals as it merely restores the original
- IL in this case (an replace_uses_by would assert). */
- if (TREE_CODE (var) == SSA_NAME)
- {
- imm_use_iterator iter;
- use_operand_p use_p;
- gimple *use_stmt;
- FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
- FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
- SET_USE (use_p, var);
- }
- else
- replace_uses_by (lhs, var);
-
- /* And finally, remove the copy, it is not needed. */
- gsi_remove (&si, true);
- release_defs (stmt);
- }
- else
- {
- if (!is_gimple_debug (gsi_stmt (si)))
- is_unreachable = 0;
- gsi_next (&si);
- }
- }
-}
-
-
-/* Return true if STMT is interesting for VRP. */
-
-bool
-stmt_interesting_for_vrp (gimple *stmt)
-{
- if (gimple_code (stmt) == GIMPLE_PHI)
- {
- tree res = gimple_phi_result (stmt);
- return (!virtual_operand_p (res)
- && (INTEGRAL_TYPE_P (TREE_TYPE (res))
- || POINTER_TYPE_P (TREE_TYPE (res))));
- }
- else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
- {
- tree lhs = gimple_get_lhs (stmt);
-
- /* In general, assignments with virtual operands are not useful
- for deriving ranges, with the obvious exception of calls to
- builtin functions. */
- if (lhs && TREE_CODE (lhs) == SSA_NAME
- && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
- || POINTER_TYPE_P (TREE_TYPE (lhs)))
- && (is_gimple_call (stmt)
- || !gimple_vuse (stmt)))
- return true;
- else if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
- switch (gimple_call_internal_fn (stmt))
- {
- case IFN_ADD_OVERFLOW:
- case IFN_SUB_OVERFLOW:
- case IFN_MUL_OVERFLOW:
- case IFN_ATOMIC_COMPARE_EXCHANGE:
- /* These internal calls return _Complex integer type,
- but are interesting to VRP nevertheless. */
- if (lhs && TREE_CODE (lhs) == SSA_NAME)
- return true;
- break;
- default:
- break;
- }
- }
- else if (gimple_code (stmt) == GIMPLE_COND
- || gimple_code (stmt) == GIMPLE_SWITCH)
- return true;
-
- return false;
-}
-
-/* Initialize VRP lattice. */
-
-vr_values::vr_values () : vrp_value_range_pool ("Tree VRP value ranges")
-{
- values_propagated = false;
- num_vr_values = num_ssa_names;
- vr_value = XCNEWVEC (value_range *, num_vr_values);
- vr_phi_edge_counts = XCNEWVEC (int, num_ssa_names);
- bitmap_obstack_initialize (&vrp_equiv_obstack);
-}
-
-/* Initialization required by ssa_propagate engine. */
-
-void
-vrp_prop::vrp_initialize ()
-{
- basic_block bb;
-
- FOR_EACH_BB_FN (bb, cfun)
- {
- for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
- gsi_next (&si))
- {
- gphi *phi = si.phi ();
- if (!stmt_interesting_for_vrp (phi))
- {
- tree lhs = PHI_RESULT (phi);
- set_value_range_to_varying (get_value_range (lhs));
- prop_set_simulate_again (phi, false);
- }
- else
- prop_set_simulate_again (phi, true);
- }
-
- for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
- gsi_next (&si))
- {
- gimple *stmt = gsi_stmt (si);
-
- /* If the statement is a control insn, then we do not
- want to avoid simulating the statement once. Failure
- to do so means that those edges will never get added. */
- if (stmt_ends_bb_p (stmt))
- prop_set_simulate_again (stmt, true);
- else if (!stmt_interesting_for_vrp (stmt))
- {
- set_defs_to_varying (stmt);
- prop_set_simulate_again (stmt, false);
- }
- else
- prop_set_simulate_again (stmt, true);
- }
- }
-}
-
-/* A hack. */
-static class vr_values *x_vr_values;
-
-/* Return the singleton value-range for NAME or NAME. */
-
-static inline tree
-vrp_valueize (tree name)
-{
- if (TREE_CODE (name) == SSA_NAME)
- {
- value_range *vr = x_vr_values->get_value_range (name);
- if (vr->type == VR_RANGE
- && (TREE_CODE (vr->min) == SSA_NAME
- || is_gimple_min_invariant (vr->min))
- && vrp_operand_equal_p (vr->min, vr->max))
- return vr->min;
- }
- return name;
-}
-
-/* Return the singleton value-range for NAME if that is a constant
- but signal to not follow SSA edges. */
-
-static inline tree
-vrp_valueize_1 (tree name)
-{
- if (TREE_CODE (name) == SSA_NAME)
- {
- /* If the definition may be simulated again we cannot follow
- this SSA edge as the SSA propagator does not necessarily
- re-visit the use. */
- gimple *def_stmt = SSA_NAME_DEF_STMT (name);
- if (!gimple_nop_p (def_stmt)
- && prop_simulate_again_p (def_stmt))
- return NULL_TREE;
- value_range *vr = x_vr_values->get_value_range (name);
- if (range_int_cst_singleton_p (vr))
- return vr->min;
- }
- return name;
-}
-
-/* Visit assignment STMT. If it produces an interesting range, record
- the range in VR and set LHS to OUTPUT_P. */
-
-void
-vr_values::vrp_visit_assignment_or_call (gimple *stmt, tree *output_p,
- value_range *vr)
-{
- tree lhs;
- enum gimple_code code = gimple_code (stmt);
- lhs = gimple_get_lhs (stmt);
- *output_p = NULL_TREE;
-
- /* We only keep track of ranges in integral and pointer types. */
- if (TREE_CODE (lhs) == SSA_NAME
- && ((INTEGRAL_TYPE_P (TREE_TYPE (lhs))
- /* It is valid to have NULL MIN/MAX values on a type. See
- build_range_type. */
- && TYPE_MIN_VALUE (TREE_TYPE (lhs))
- && TYPE_MAX_VALUE (TREE_TYPE (lhs)))
- || POINTER_TYPE_P (TREE_TYPE (lhs))))
- {
- *output_p = lhs;
-
- /* Try folding the statement to a constant first. */
- x_vr_values = this;
- tree tem = gimple_fold_stmt_to_constant_1 (stmt, vrp_valueize,
- vrp_valueize_1);
- x_vr_values = NULL;
- if (tem)
- {
- if (TREE_CODE (tem) == SSA_NAME
- && (SSA_NAME_IS_DEFAULT_DEF (tem)
- || ! prop_simulate_again_p (SSA_NAME_DEF_STMT (tem))))
- {
- extract_range_from_ssa_name (vr, tem);
- return;
- }
- else if (is_gimple_min_invariant (tem))
- {
- set_value_range_to_value (vr, tem, NULL);
- return;
- }
- }
- /* Then dispatch to value-range extracting functions. */
- if (code == GIMPLE_CALL)
- extract_range_basic (vr, stmt);
- else
- extract_range_from_assignment (vr, as_a <gassign *> (stmt));
- }
-}
-
-/* Helper that gets the value range of the SSA_NAME with version I
- or a symbolic range containing the SSA_NAME only if the value range
- is varying or undefined. */
-
-value_range
-vr_values::get_vr_for_comparison (int i)
-{
- value_range vr = *get_value_range (ssa_name (i));
-
- /* If name N_i does not have a valid range, use N_i as its own
- range. This allows us to compare against names that may
- have N_i in their ranges. */
- if (vr.type == VR_VARYING || vr.type == VR_UNDEFINED)
- {
- vr.type = VR_RANGE;
- vr.min = ssa_name (i);
- vr.max = ssa_name (i);
- }
-
- return vr;
-}
-
-/* Compare all the value ranges for names equivalent to VAR with VAL
- using comparison code COMP. Return the same value returned by
- compare_range_with_value, including the setting of
- *STRICT_OVERFLOW_P. */
-
-tree
-vr_values::compare_name_with_value (enum tree_code comp, tree var, tree val,
- bool *strict_overflow_p, bool use_equiv_p)
-{
- bitmap_iterator bi;
- unsigned i;
- bitmap e;
- tree retval, t;
- int used_strict_overflow;
- bool sop;
- value_range equiv_vr;
-
- /* Get the set of equivalences for VAR. */
- e = get_value_range (var)->equiv;
-
- /* Start at -1. Set it to 0 if we do a comparison without relying
- on overflow, or 1 if all comparisons rely on overflow. */
- used_strict_overflow = -1;
-
- /* Compare vars' value range with val. */
- equiv_vr = get_vr_for_comparison (SSA_NAME_VERSION (var));
- sop = false;
- retval = compare_range_with_value (comp, &equiv_vr, val, &sop);
- if (retval)
- used_strict_overflow = sop ? 1 : 0;
-
- /* If the equiv set is empty we have done all work we need to do. */
- if (e == NULL)
- {
- if (retval
- && used_strict_overflow > 0)
- *strict_overflow_p = true;
- return retval;
- }
-
- EXECUTE_IF_SET_IN_BITMAP (e, 0, i, bi)
- {
- tree name = ssa_name (i);
- if (! name)
- continue;
-
- if (! use_equiv_p
- && ! SSA_NAME_IS_DEFAULT_DEF (name)
- && prop_simulate_again_p (SSA_NAME_DEF_STMT (name)))
- continue;
-
- equiv_vr = get_vr_for_comparison (i);
- sop = false;
- t = compare_range_with_value (comp, &equiv_vr, val, &sop);
- if (t)
- {
- /* If we get different answers from different members
- of the equivalence set this check must be in a dead
- code region. Folding it to a trap representation
- would be correct here. For now just return don't-know. */
- if (retval != NULL
- && t != retval)
- {
- retval = NULL_TREE;
- break;
- }
- retval = t;
-
- if (!sop)
- used_strict_overflow = 0;
- else if (used_strict_overflow < 0)
- used_strict_overflow = 1;
- }
- }
-
- if (retval
- && used_strict_overflow > 0)
- *strict_overflow_p = true;
-
- return retval;
-}
-
-
-/* Given a comparison code COMP and names N1 and N2, compare all the
- ranges equivalent to N1 against all the ranges equivalent to N2
- to determine the value of N1 COMP N2. Return the same value
- returned by compare_ranges. Set *STRICT_OVERFLOW_P to indicate
- whether we relied on undefined signed overflow in the comparison. */
-
-
-tree
-vr_values::compare_names (enum tree_code comp, tree n1, tree n2,
- bool *strict_overflow_p)
-{
- tree t, retval;
- bitmap e1, e2;
- bitmap_iterator bi1, bi2;
- unsigned i1, i2;
- int used_strict_overflow;
- static bitmap_obstack *s_obstack = NULL;
- static bitmap s_e1 = NULL, s_e2 = NULL;
-
- /* Compare the ranges of every name equivalent to N1 against the
- ranges of every name equivalent to N2. */
- e1 = get_value_range (n1)->equiv;
- e2 = get_value_range (n2)->equiv;
-
- /* Use the fake bitmaps if e1 or e2 are not available. */
- if (s_obstack == NULL)
- {
- s_obstack = XNEW (bitmap_obstack);
- bitmap_obstack_initialize (s_obstack);
- s_e1 = BITMAP_ALLOC (s_obstack);
- s_e2 = BITMAP_ALLOC (s_obstack);
- }
- if (e1 == NULL)
- e1 = s_e1;
- if (e2 == NULL)
- e2 = s_e2;
-
- /* Add N1 and N2 to their own set of equivalences to avoid
- duplicating the body of the loop just to check N1 and N2
- ranges. */
- bitmap_set_bit (e1, SSA_NAME_VERSION (n1));
- bitmap_set_bit (e2, SSA_NAME_VERSION (n2));
-
- /* If the equivalence sets have a common intersection, then the two
- names can be compared without checking their ranges. */
- if (bitmap_intersect_p (e1, e2))
- {
- bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
- bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
-
- return (comp == EQ_EXPR || comp == GE_EXPR || comp == LE_EXPR)
- ? boolean_true_node
- : boolean_false_node;
- }
-
- /* Start at -1. Set it to 0 if we do a comparison without relying
- on overflow, or 1 if all comparisons rely on overflow. */
- used_strict_overflow = -1;
-
- /* Otherwise, compare all the equivalent ranges. First, add N1 and
- N2 to their own set of equivalences to avoid duplicating the body
- of the loop just to check N1 and N2 ranges. */
- EXECUTE_IF_SET_IN_BITMAP (e1, 0, i1, bi1)
- {
- if (! ssa_name (i1))
- continue;
-
- value_range vr1 = get_vr_for_comparison (i1);
-
- t = retval = NULL_TREE;
- EXECUTE_IF_SET_IN_BITMAP (e2, 0, i2, bi2)
- {
- if (! ssa_name (i2))
- continue;
-
- bool sop = false;
-
- value_range vr2 = get_vr_for_comparison (i2);
-
- t = compare_ranges (comp, &vr1, &vr2, &sop);
- if (t)
- {
- /* If we get different answers from different members
- of the equivalence set this check must be in a dead
- code region. Folding it to a trap representation
- would be correct here. For now just return don't-know. */
- if (retval != NULL
- && t != retval)
- {
- bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
- bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
- return NULL_TREE;
- }
- retval = t;
-
- if (!sop)
- used_strict_overflow = 0;
- else if (used_strict_overflow < 0)
- used_strict_overflow = 1;
- }
- }
-
- if (retval)
- {
- bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
- bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
- if (used_strict_overflow > 0)
- *strict_overflow_p = true;
- return retval;
- }
- }
-
- /* None of the equivalent ranges are useful in computing this
- comparison. */
- bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
- bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
- return NULL_TREE;
-}
-
-/* Helper function for vrp_evaluate_conditional_warnv & other
- optimizers. */
-
-tree
-vr_values::vrp_evaluate_conditional_warnv_with_ops_using_ranges
- (enum tree_code code, tree op0, tree op1, bool * strict_overflow_p)
-{
- value_range *vr0, *vr1;
-
- vr0 = (TREE_CODE (op0) == SSA_NAME) ? get_value_range (op0) : NULL;
- vr1 = (TREE_CODE (op1) == SSA_NAME) ? get_value_range (op1) : NULL;
-
- tree res = NULL_TREE;
- if (vr0 && vr1)
- res = compare_ranges (code, vr0, vr1, strict_overflow_p);
- if (!res && vr0)
- res = compare_range_with_value (code, vr0, op1, strict_overflow_p);
- if (!res && vr1)
- res = (compare_range_with_value
- (swap_tree_comparison (code), vr1, op0, strict_overflow_p));
- return res;
-}
-
-/* Helper function for vrp_evaluate_conditional_warnv. */
-
-tree
-vr_values::vrp_evaluate_conditional_warnv_with_ops (enum tree_code code,
- tree op0, tree op1,
- bool use_equiv_p,
- bool *strict_overflow_p,
- bool *only_ranges)
-{
- tree ret;
- if (only_ranges)
- *only_ranges = true;
-
- /* We only deal with integral and pointer types. */
- if (!INTEGRAL_TYPE_P (TREE_TYPE (op0))
- && !POINTER_TYPE_P (TREE_TYPE (op0)))
- return NULL_TREE;
-
- /* If OP0 CODE OP1 is an overflow comparison, if it can be expressed
- as a simple equality test, then prefer that over its current form
- for evaluation.
-
- An overflow test which collapses to an equality test can always be
- expressed as a comparison of one argument against zero. Overflow
- occurs when the chosen argument is zero and does not occur if the
- chosen argument is not zero. */
- tree x;
- if (overflow_comparison_p (code, op0, op1, use_equiv_p, &x))
- {
- wide_int max = wi::max_value (TYPE_PRECISION (TREE_TYPE (op0)), UNSIGNED);
- /* B = A - 1; if (A < B) -> B = A - 1; if (A == 0)
- B = A - 1; if (A > B) -> B = A - 1; if (A != 0)
- B = A + 1; if (B < A) -> B = A + 1; if (B == 0)
- B = A + 1; if (B > A) -> B = A + 1; if (B != 0) */
- if (integer_zerop (x))
- {
- op1 = x;
- code = (code == LT_EXPR || code == LE_EXPR) ? EQ_EXPR : NE_EXPR;
- }
- /* B = A + 1; if (A > B) -> B = A + 1; if (B == 0)
- B = A + 1; if (A < B) -> B = A + 1; if (B != 0)
- B = A - 1; if (B > A) -> B = A - 1; if (A == 0)
- B = A - 1; if (B < A) -> B = A - 1; if (A != 0) */
- else if (wi::to_wide (x) == max - 1)
- {
- op0 = op1;
- op1 = wide_int_to_tree (TREE_TYPE (op0), 0);
- code = (code == GT_EXPR || code == GE_EXPR) ? EQ_EXPR : NE_EXPR;
- }
- }
-
- if ((ret = vrp_evaluate_conditional_warnv_with_ops_using_ranges
- (code, op0, op1, strict_overflow_p)))
- return ret;
- if (only_ranges)
- *only_ranges = false;
- /* Do not use compare_names during propagation, it's quadratic. */
- if (TREE_CODE (op0) == SSA_NAME && TREE_CODE (op1) == SSA_NAME
- && use_equiv_p)
- return compare_names (code, op0, op1, strict_overflow_p);
- else if (TREE_CODE (op0) == SSA_NAME)
- return compare_name_with_value (code, op0, op1,
- strict_overflow_p, use_equiv_p);
- else if (TREE_CODE (op1) == SSA_NAME)
- return compare_name_with_value (swap_tree_comparison (code), op1, op0,
- strict_overflow_p, use_equiv_p);
- return NULL_TREE;
-}
-
-/* Given (CODE OP0 OP1) within STMT, try to simplify it based on value range
- information. Return NULL if the conditional can not be evaluated.
- The ranges of all the names equivalent with the operands in COND
- will be used when trying to compute the value. If the result is
- based on undefined signed overflow, issue a warning if
- appropriate. */
-
-tree
-vr_values::vrp_evaluate_conditional (tree_code code, tree op0,
- tree op1, gimple *stmt)
-{
- bool sop;
- tree ret;
- bool only_ranges;
-
- /* Some passes and foldings leak constants with overflow flag set
- into the IL. Avoid doing wrong things with these and bail out. */
- if ((TREE_CODE (op0) == INTEGER_CST
- && TREE_OVERFLOW (op0))
- || (TREE_CODE (op1) == INTEGER_CST
- && TREE_OVERFLOW (op1)))
- return NULL_TREE;
-
- sop = false;
- ret = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, true, &sop,
- &only_ranges);
-
- if (ret && sop)
- {
- enum warn_strict_overflow_code wc;
- const char* warnmsg;
-
- if (is_gimple_min_invariant (ret))
- {
- wc = WARN_STRICT_OVERFLOW_CONDITIONAL;
- warnmsg = G_("assuming signed overflow does not occur when "
- "simplifying conditional to constant");
- }
- else
- {
- wc = WARN_STRICT_OVERFLOW_COMPARISON;
- warnmsg = G_("assuming signed overflow does not occur when "
- "simplifying conditional");
- }
-
- if (issue_strict_overflow_warning (wc))
- {
- location_t location;
-
- if (!gimple_has_location (stmt))
- location = input_location;
- else
- location = gimple_location (stmt);
- warning_at (location, OPT_Wstrict_overflow, "%s", warnmsg);
- }
- }
-
- if (warn_type_limits
- && ret && only_ranges
- && TREE_CODE_CLASS (code) == tcc_comparison
- && TREE_CODE (op0) == SSA_NAME)
- {
- /* If the comparison is being folded and the operand on the LHS
- is being compared against a constant value that is outside of
- the natural range of OP0's type, then the predicate will
- always fold regardless of the value of OP0. If -Wtype-limits
- was specified, emit a warning. */
- tree type = TREE_TYPE (op0);
- value_range *vr0 = get_value_range (op0);
-
- if (vr0->type == VR_RANGE
- && INTEGRAL_TYPE_P (type)
- && vrp_val_is_min (vr0->min)
- && vrp_val_is_max (vr0->max)
- && is_gimple_min_invariant (op1))
- {
- location_t location;
-
- if (!gimple_has_location (stmt))
- location = input_location;
- else
- location = gimple_location (stmt);
-
- warning_at (location, OPT_Wtype_limits,
- integer_zerop (ret)
- ? G_("comparison always false "
- "due to limited range of data type")
- : G_("comparison always true "
- "due to limited range of data type"));
- }
- }
-
- return ret;
-}
-
-
-/* Visit conditional statement STMT. If we can determine which edge
- will be taken out of STMT's basic block, record it in
- *TAKEN_EDGE_P. Otherwise, set *TAKEN_EDGE_P to NULL. */
-
-void
-vr_values::vrp_visit_cond_stmt (gcond *stmt, edge *taken_edge_p)
-{
- tree val;
-
- *taken_edge_p = NULL;
-
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- tree use;
- ssa_op_iter i;
-
- fprintf (dump_file, "\nVisiting conditional with predicate: ");
- print_gimple_stmt (dump_file, stmt, 0);
- fprintf (dump_file, "\nWith known ranges\n");
-
- FOR_EACH_SSA_TREE_OPERAND (use, stmt, i, SSA_OP_USE)
- {
- fprintf (dump_file, "\t");
- print_generic_expr (dump_file, use);
- fprintf (dump_file, ": ");
- dump_value_range (dump_file, vr_value[SSA_NAME_VERSION (use)]);
- }
-
- fprintf (dump_file, "\n");
- }
-
- /* Compute the value of the predicate COND by checking the known
- ranges of each of its operands.
-
- Note that we cannot evaluate all the equivalent ranges here
- because those ranges may not yet be final and with the current
- propagation strategy, we cannot determine when the value ranges
- of the names in the equivalence set have changed.
-
- For instance, given the following code fragment
-
- i_5 = PHI <8, i_13>
- ...
- i_14 = ASSERT_EXPR <i_5, i_5 != 0>
- if (i_14 == 1)
- ...
-
- Assume that on the first visit to i_14, i_5 has the temporary
- range [8, 8] because the second argument to the PHI function is
- not yet executable. We derive the range ~[0, 0] for i_14 and the
- equivalence set { i_5 }. So, when we visit 'if (i_14 == 1)' for
- the first time, since i_14 is equivalent to the range [8, 8], we
- determine that the predicate is always false.
-
- On the next round of propagation, i_13 is determined to be
- VARYING, which causes i_5 to drop down to VARYING. So, another
- visit to i_14 is scheduled. In this second visit, we compute the
- exact same range and equivalence set for i_14, namely ~[0, 0] and
- { i_5 }. But we did not have the previous range for i_5
- registered, so vrp_visit_assignment thinks that the range for
- i_14 has not changed. Therefore, the predicate 'if (i_14 == 1)'
- is not visited again, which stops propagation from visiting
- statements in the THEN clause of that if().
-
- To properly fix this we would need to keep the previous range
- value for the names in the equivalence set. This way we would've
- discovered that from one visit to the other i_5 changed from
- range [8, 8] to VR_VARYING.
-
- However, fixing this apparent limitation may not be worth the
- additional checking. Testing on several code bases (GCC, DLV,
- MICO, TRAMP3D and SPEC2000) showed that doing this results in
- 4 more predicates folded in SPEC. */
-
- bool sop;
- val = vrp_evaluate_conditional_warnv_with_ops (gimple_cond_code (stmt),
- gimple_cond_lhs (stmt),
- gimple_cond_rhs (stmt),
- false, &sop, NULL);
- if (val)
- *taken_edge_p = find_taken_edge (gimple_bb (stmt), val);
-
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, "\nPredicate evaluates to: ");
- if (val == NULL_TREE)
- fprintf (dump_file, "DON'T KNOW\n");
- else
- print_generic_stmt (dump_file, val);
- }
-}
-
-/* Searches the case label vector VEC for the index *IDX of the CASE_LABEL
- that includes the value VAL. The search is restricted to the range
- [START_IDX, n - 1] where n is the size of VEC.
-
- If there is a CASE_LABEL for VAL, its index is placed in IDX and true is
- returned.
-
- If there is no CASE_LABEL for VAL and there is one that is larger than VAL,
- it is placed in IDX and false is returned.
-
- If VAL is larger than any CASE_LABEL, n is placed on IDX and false is
- returned. */
-
-static bool
-find_case_label_index (gswitch *stmt, size_t start_idx, tree val, size_t *idx)
-{
- size_t n = gimple_switch_num_labels (stmt);
- size_t low, high;
-
- /* Find case label for minimum of the value range or the next one.
- At each iteration we are searching in [low, high - 1]. */
-
- for (low = start_idx, high = n; high != low; )
- {
- tree t;
- int cmp;
- /* Note that i != high, so we never ask for n. */
- size_t i = (high + low) / 2;
- t = gimple_switch_label (stmt, i);
-
- /* Cache the result of comparing CASE_LOW and val. */
- cmp = tree_int_cst_compare (CASE_LOW (t), val);
-
- if (cmp == 0)
- {
- /* Ranges cannot be empty. */
- *idx = i;
- return true;
- }
- else if (cmp > 0)
- high = i;
- else
- {
- low = i + 1;
- if (CASE_HIGH (t) != NULL
- && tree_int_cst_compare (CASE_HIGH (t), val) >= 0)
- {
- *idx = i;
- return true;
- }
- }
- }
-
- *idx = high;
- return false;
-}
-
-/* Searches the case label vector VEC for the range of CASE_LABELs that is used
- for values between MIN and MAX. The first index is placed in MIN_IDX. The
- last index is placed in MAX_IDX. If the range of CASE_LABELs is empty
- then MAX_IDX < MIN_IDX.
- Returns true if the default label is not needed. */
-
-static bool
-find_case_label_range (gswitch *stmt, tree min, tree max, size_t *min_idx,
- size_t *max_idx)
-{
- size_t i, j;
- bool min_take_default = !find_case_label_index (stmt, 1, min, &i);
- bool max_take_default = !find_case_label_index (stmt, i, max, &j);
-
- if (i == j
- && min_take_default
- && max_take_default)
- {
- /* Only the default case label reached.
- Return an empty range. */
- *min_idx = 1;
- *max_idx = 0;
- return false;
- }
- else
- {
- bool take_default = min_take_default || max_take_default;
- tree low, high;
- size_t k;
-
- if (max_take_default)
- j--;
-
- /* If the case label range is continuous, we do not need
- the default case label. Verify that. */
- high = CASE_LOW (gimple_switch_label (stmt, i));
- if (CASE_HIGH (gimple_switch_label (stmt, i)))
- high = CASE_HIGH (gimple_switch_label (stmt, i));
- for (k = i + 1; k <= j; ++k)
- {
- low = CASE_LOW (gimple_switch_label (stmt, k));
- if (!integer_onep (int_const_binop (MINUS_EXPR, low, high)))
- {
- take_default = true;
- break;
- }
- high = low;
- if (CASE_HIGH (gimple_switch_label (stmt, k)))
- high = CASE_HIGH (gimple_switch_label (stmt, k));
- }
-
- *min_idx = i;
- *max_idx = j;
- return !take_default;
- }
-}
-
-/* Searches the case label vector VEC for the ranges of CASE_LABELs that are
- used in range VR. The indices are placed in MIN_IDX1, MAX_IDX, MIN_IDX2 and
- MAX_IDX2. If the ranges of CASE_LABELs are empty then MAX_IDX1 < MIN_IDX1.
- Returns true if the default label is not needed. */
-
-static bool
-find_case_label_ranges (gswitch *stmt, value_range *vr, size_t *min_idx1,
- size_t *max_idx1, size_t *min_idx2,
- size_t *max_idx2)
-{
- size_t i, j, k, l;
- unsigned int n = gimple_switch_num_labels (stmt);
- bool take_default;
- tree case_low, case_high;
- tree min = vr->min, max = vr->max;
-
- gcc_checking_assert (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE);
-
- take_default = !find_case_label_range (stmt, min, max, &i, &j);
-
- /* Set second range to emtpy. */
- *min_idx2 = 1;
- *max_idx2 = 0;
-
- if (vr->type == VR_RANGE)
- {
- *min_idx1 = i;
- *max_idx1 = j;
- return !take_default;
- }
-
- /* Set first range to all case labels. */
- *min_idx1 = 1;
- *max_idx1 = n - 1;
-
- if (i > j)
- return false;
-
- /* Make sure all the values of case labels [i , j] are contained in
- range [MIN, MAX]. */
- case_low = CASE_LOW (gimple_switch_label (stmt, i));
- case_high = CASE_HIGH (gimple_switch_label (stmt, j));
- if (tree_int_cst_compare (case_low, min) < 0)
- i += 1;
- if (case_high != NULL_TREE
- && tree_int_cst_compare (max, case_high) < 0)
- j -= 1;
-
- if (i > j)
- return false;
-
- /* If the range spans case labels [i, j], the corresponding anti-range spans
- the labels [1, i - 1] and [j + 1, n - 1]. */
- k = j + 1;
- l = n - 1;
- if (k > l)
- {
- k = 1;
- l = 0;
- }
-
- j = i - 1;
- i = 1;
- if (i > j)
- {
- i = k;
- j = l;
- k = 1;
- l = 0;
- }
-
- *min_idx1 = i;
- *max_idx1 = j;
- *min_idx2 = k;
- *max_idx2 = l;
- return false;
-}
-
-/* Visit switch statement STMT. If we can determine which edge
- will be taken out of STMT's basic block, record it in
- *TAKEN_EDGE_P. Otherwise, *TAKEN_EDGE_P set to NULL. */
-
-void
-vr_values::vrp_visit_switch_stmt (gswitch *stmt, edge *taken_edge_p)
-{
- tree op, val;
- value_range *vr;
- size_t i = 0, j = 0, k, l;
- bool take_default;
-
- *taken_edge_p = NULL;
- op = gimple_switch_index (stmt);
- if (TREE_CODE (op) != SSA_NAME)
- return;
-
- vr = get_value_range (op);
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, "\nVisiting switch expression with operand ");
- print_generic_expr (dump_file, op);
- fprintf (dump_file, " with known range ");
- dump_value_range (dump_file, vr);
- fprintf (dump_file, "\n");
- }
-
- if ((vr->type != VR_RANGE
- && vr->type != VR_ANTI_RANGE)
- || symbolic_range_p (vr))
- return;
-
- /* Find the single edge that is taken from the switch expression. */
- take_default = !find_case_label_ranges (stmt, vr, &i, &j, &k, &l);
-
- /* Check if the range spans no CASE_LABEL. If so, we only reach the default
- label */
- if (j < i)
- {
- gcc_assert (take_default);
- val = gimple_switch_default_label (stmt);
- }
- else
- {
- /* Check if labels with index i to j and maybe the default label
- are all reaching the same label. */
-
- val = gimple_switch_label (stmt, i);
- if (take_default
- && CASE_LABEL (gimple_switch_default_label (stmt))
- != CASE_LABEL (val))
- {
- if (dump_file && (dump_flags & TDF_DETAILS))
- fprintf (dump_file, " not a single destination for this "
- "range\n");
- return;
- }
- for (++i; i <= j; ++i)
- {
- if (CASE_LABEL (gimple_switch_label (stmt, i)) != CASE_LABEL (val))
- {
- if (dump_file && (dump_flags & TDF_DETAILS))
- fprintf (dump_file, " not a single destination for this "
- "range\n");
- return;
- }
- }
- for (; k <= l; ++k)
- {
- if (CASE_LABEL (gimple_switch_label (stmt, k)) != CASE_LABEL (val))
- {
- if (dump_file && (dump_flags & TDF_DETAILS))
- fprintf (dump_file, " not a single destination for this "
- "range\n");
- return;
- }
- }
- }
-
- *taken_edge_p = find_edge (gimple_bb (stmt),
- label_to_block (CASE_LABEL (val)));
-
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, " will take edge to ");
- print_generic_stmt (dump_file, CASE_LABEL (val));
- }
-}
-
-
-/* Evaluate statement STMT. If the statement produces a useful range,
- set VR and corepsponding OUTPUT_P.
-
- If STMT is a conditional branch and we can determine its truth
- value, the taken edge is recorded in *TAKEN_EDGE_P. */
-
-void
-vr_values::extract_range_from_stmt (gimple *stmt, edge *taken_edge_p,
- tree *output_p, value_range *vr)
-{
-
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, "\nVisiting statement:\n");
- print_gimple_stmt (dump_file, stmt, 0, dump_flags);
- }
-
- if (!stmt_interesting_for_vrp (stmt))
- gcc_assert (stmt_ends_bb_p (stmt));
- else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
- vrp_visit_assignment_or_call (stmt, output_p, vr);
- else if (gimple_code (stmt) == GIMPLE_COND)
- vrp_visit_cond_stmt (as_a <gcond *> (stmt), taken_edge_p);
- else if (gimple_code (stmt) == GIMPLE_SWITCH)
- vrp_visit_switch_stmt (as_a <gswitch *> (stmt), taken_edge_p);
-}
-
-/* Evaluate statement STMT. If the statement produces a useful range,
- return SSA_PROP_INTERESTING and record the SSA name with the
- interesting range into *OUTPUT_P.
-
- If STMT is a conditional branch and we can determine its truth
- value, the taken edge is recorded in *TAKEN_EDGE_P.
-
- If STMT produces a varying value, return SSA_PROP_VARYING. */
-
-enum ssa_prop_result
-vrp_prop::visit_stmt (gimple *stmt, edge *taken_edge_p, tree *output_p)
-{
- value_range vr = VR_INITIALIZER;
- tree lhs = gimple_get_lhs (stmt);
- extract_range_from_stmt (stmt, taken_edge_p, output_p, &vr);
-
- if (*output_p)
- {
- if (update_value_range (*output_p, &vr))
- {
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, "Found new range for ");
- print_generic_expr (dump_file, *output_p);
- fprintf (dump_file, ": ");
- dump_value_range (dump_file, &vr);
- fprintf (dump_file, "\n");
- }
-
- if (vr.type == VR_VARYING)
- return SSA_PROP_VARYING;
-
- return SSA_PROP_INTERESTING;
- }
- return SSA_PROP_NOT_INTERESTING;
- }
-
- if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
- switch (gimple_call_internal_fn (stmt))
- {
- case IFN_ADD_OVERFLOW:
- case IFN_SUB_OVERFLOW:
- case IFN_MUL_OVERFLOW:
- case IFN_ATOMIC_COMPARE_EXCHANGE:
- /* These internal calls return _Complex integer type,
- which VRP does not track, but the immediate uses
- thereof might be interesting. */
- if (lhs && TREE_CODE (lhs) == SSA_NAME)
- {
- imm_use_iterator iter;
- use_operand_p use_p;
- enum ssa_prop_result res = SSA_PROP_VARYING;
-
- set_value_range_to_varying (get_value_range (lhs));
-
- FOR_EACH_IMM_USE_FAST (use_p, iter, lhs)
- {
- gimple *use_stmt = USE_STMT (use_p);
- if (!is_gimple_assign (use_stmt))
- continue;
- enum tree_code rhs_code = gimple_assign_rhs_code (use_stmt);
- if (rhs_code != REALPART_EXPR && rhs_code != IMAGPART_EXPR)
- continue;
- tree rhs1 = gimple_assign_rhs1 (use_stmt);
- tree use_lhs = gimple_assign_lhs (use_stmt);
- if (TREE_CODE (rhs1) != rhs_code
- || TREE_OPERAND (rhs1, 0) != lhs
- || TREE_CODE (use_lhs) != SSA_NAME
- || !stmt_interesting_for_vrp (use_stmt)
- || (!INTEGRAL_TYPE_P (TREE_TYPE (use_lhs))
- || !TYPE_MIN_VALUE (TREE_TYPE (use_lhs))
- || !TYPE_MAX_VALUE (TREE_TYPE (use_lhs))))
- continue;
-
- /* If there is a change in the value range for any of the
- REALPART_EXPR/IMAGPART_EXPR immediate uses, return
- SSA_PROP_INTERESTING. If there are any REALPART_EXPR
- or IMAGPART_EXPR immediate uses, but none of them have
- a change in their value ranges, return
- SSA_PROP_NOT_INTERESTING. If there are no
- {REAL,IMAG}PART_EXPR uses at all,
- return SSA_PROP_VARYING. */
- value_range new_vr = VR_INITIALIZER;
- extract_range_basic (&new_vr, use_stmt);
- value_range *old_vr = get_value_range (use_lhs);
- if (old_vr->type != new_vr.type
- || !vrp_operand_equal_p (old_vr->min, new_vr.min)
- || !vrp_operand_equal_p (old_vr->max, new_vr.max)
- || !vrp_bitmap_equal_p (old_vr->equiv, new_vr.equiv))
- res = SSA_PROP_INTERESTING;
- else
- res = SSA_PROP_NOT_INTERESTING;
- BITMAP_FREE (new_vr.equiv);
- if (res == SSA_PROP_INTERESTING)
- {
- *output_p = lhs;
- return res;
- }
- }
-
- return res;
- }
- break;
- default:
- break;
- }
-
- /* All other statements produce nothing of interest for VRP, so mark
- their outputs varying and prevent further simulation. */
- set_defs_to_varying (stmt);
-
- return (*taken_edge_p) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING;
-}
-
-/* Union the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
- { VR1TYPE, VR0MIN, VR0MAX } and store the result
- in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest
- possible such range. The resulting range is not canonicalized. */
-
-static void
-union_ranges (enum value_range_type *vr0type,
- tree *vr0min, tree *vr0max,
- enum value_range_type vr1type,
- tree vr1min, tree vr1max)
-{
- bool mineq = vrp_operand_equal_p (*vr0min, vr1min);
- bool maxeq = vrp_operand_equal_p (*vr0max, vr1max);
-
- /* [] is vr0, () is vr1 in the following classification comments. */
- if (mineq && maxeq)
- {
- /* [( )] */
- if (*vr0type == vr1type)
- /* Nothing to do for equal ranges. */
- ;
- else if ((*vr0type == VR_RANGE
- && vr1type == VR_ANTI_RANGE)
- || (*vr0type == VR_ANTI_RANGE
- && vr1type == VR_RANGE))
- {
- /* For anti-range with range union the result is varying. */
- goto give_up;
- }
- else
- gcc_unreachable ();
- }
- else if (operand_less_p (*vr0max, vr1min) == 1
- || operand_less_p (vr1max, *vr0min) == 1)
- {
- /* [ ] ( ) or ( ) [ ]
- If the ranges have an empty intersection, result of the union
- operation is the anti-range or if both are anti-ranges
- it covers all. */
- if (*vr0type == VR_ANTI_RANGE
- && vr1type == VR_ANTI_RANGE)
- goto give_up;
- else if (*vr0type == VR_ANTI_RANGE
- && vr1type == VR_RANGE)
- ;
- else if (*vr0type == VR_RANGE
- && vr1type == VR_ANTI_RANGE)
- {
- *vr0type = vr1type;
- *vr0min = vr1min;
- *vr0max = vr1max;
- }
- else if (*vr0type == VR_RANGE
- && vr1type == VR_RANGE)
- {
- /* The result is the convex hull of both ranges. */
- if (operand_less_p (*vr0max, vr1min) == 1)
- {
- /* If the result can be an anti-range, create one. */
- if (TREE_CODE (*vr0max) == INTEGER_CST
- && TREE_CODE (vr1min) == INTEGER_CST
- && vrp_val_is_min (*vr0min)
- && vrp_val_is_max (vr1max))
- {
- tree min = int_const_binop (PLUS_EXPR,
- *vr0max,
- build_int_cst (TREE_TYPE (*vr0max), 1));
- tree max = int_const_binop (MINUS_EXPR,
- vr1min,
- build_int_cst (TREE_TYPE (vr1min), 1));
- if (!operand_less_p (max, min))
- {
- *vr0type = VR_ANTI_RANGE;
- *vr0min = min;
- *vr0max = max;
- }
- else
- *vr0max = vr1max;
- }
- else
- *vr0max = vr1max;
- }
- else
- {
- /* If the result can be an anti-range, create one. */
- if (TREE_CODE (vr1max) == INTEGER_CST
- && TREE_CODE (*vr0min) == INTEGER_CST
- && vrp_val_is_min (vr1min)
- && vrp_val_is_max (*vr0max))
- {
- tree min = int_const_binop (PLUS_EXPR,
- vr1max,
- build_int_cst (TREE_TYPE (vr1max), 1));
- tree max = int_const_binop (MINUS_EXPR,
- *vr0min,
- build_int_cst (TREE_TYPE (*vr0min), 1));
- if (!operand_less_p (max, min))
- {
- *vr0type = VR_ANTI_RANGE;
- *vr0min = min;
- *vr0max = max;
- }
- else
- *vr0min = vr1min;
- }
- else
- *vr0min = vr1min;
- }
- }
- else
- gcc_unreachable ();
- }
- else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1)
- && (mineq || operand_less_p (*vr0min, vr1min) == 1))
- {
- /* [ ( ) ] or [( ) ] or [ ( )] */
- if (*vr0type == VR_RANGE
- && vr1type == VR_RANGE)
- ;
- else if (*vr0type == VR_ANTI_RANGE
- && vr1type == VR_ANTI_RANGE)
- {
- *vr0type = vr1type;
- *vr0min = vr1min;
- *vr0max = vr1max;
- }
- else if (*vr0type == VR_ANTI_RANGE
- && vr1type == VR_RANGE)
- {
- /* Arbitrarily choose the right or left gap. */
- if (!mineq && TREE_CODE (vr1min) == INTEGER_CST)
- *vr0max = int_const_binop (MINUS_EXPR, vr1min,
- build_int_cst (TREE_TYPE (vr1min), 1));
- else if (!maxeq && TREE_CODE (vr1max) == INTEGER_CST)
- *vr0min = int_const_binop (PLUS_EXPR, vr1max,
- build_int_cst (TREE_TYPE (vr1max), 1));
- else
- goto give_up;
- }
- else if (*vr0type == VR_RANGE
- && vr1type == VR_ANTI_RANGE)
- /* The result covers everything. */
- goto give_up;
- else
- gcc_unreachable ();
- }
- else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1)
- && (mineq || operand_less_p (vr1min, *vr0min) == 1))
- {
- /* ( [ ] ) or ([ ] ) or ( [ ]) */
- if (*vr0type == VR_RANGE
- && vr1type == VR_RANGE)
- {
- *vr0type = vr1type;
- *vr0min = vr1min;
- *vr0max = vr1max;
- }
- else if (*vr0type == VR_ANTI_RANGE
- && vr1type == VR_ANTI_RANGE)
- ;
- else if (*vr0type == VR_RANGE
- && vr1type == VR_ANTI_RANGE)
- {
- *vr0type = VR_ANTI_RANGE;
- if (!mineq && TREE_CODE (*vr0min) == INTEGER_CST)
- {
- *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
- build_int_cst (TREE_TYPE (*vr0min), 1));
- *vr0min = vr1min;
- }
- else if (!maxeq && TREE_CODE (*vr0max) == INTEGER_CST)
- {
- *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
- build_int_cst (TREE_TYPE (*vr0max), 1));
- *vr0max = vr1max;
- }
- else
- goto give_up;
- }
- else if (*vr0type == VR_ANTI_RANGE
- && vr1type == VR_RANGE)
- /* The result covers everything. */
- goto give_up;
- else
- gcc_unreachable ();
- }
- else if ((operand_less_p (vr1min, *vr0max) == 1
- || operand_equal_p (vr1min, *vr0max, 0))
- && operand_less_p (*vr0min, vr1min) == 1
- && operand_less_p (*vr0max, vr1max) == 1)
- {
- /* [ ( ] ) or [ ]( ) */
- if (*vr0type == VR_RANGE
- && vr1type == VR_RANGE)
- *vr0max = vr1max;
- else if (*vr0type == VR_ANTI_RANGE
- && vr1type == VR_ANTI_RANGE)
- *vr0min = vr1min;
- else if (*vr0type == VR_ANTI_RANGE
- && vr1type == VR_RANGE)
- {
- if (TREE_CODE (vr1min) == INTEGER_CST)
- *vr0max = int_const_binop (MINUS_EXPR, vr1min,
- build_int_cst (TREE_TYPE (vr1min), 1));
- else
- goto give_up;
- }
- else if (*vr0type == VR_RANGE
- && vr1type == VR_ANTI_RANGE)
- {
- if (TREE_CODE (*vr0max) == INTEGER_CST)
- {
- *vr0type = vr1type;
- *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
- build_int_cst (TREE_TYPE (*vr0max), 1));
- *vr0max = vr1max;
- }
- else
- goto give_up;
- }
- else
- gcc_unreachable ();
- }
- else if ((operand_less_p (*vr0min, vr1max) == 1
- || operand_equal_p (*vr0min, vr1max, 0))
- && operand_less_p (vr1min, *vr0min) == 1
- && operand_less_p (vr1max, *vr0max) == 1)
- {
- /* ( [ ) ] or ( )[ ] */
- if (*vr0type == VR_RANGE
- && vr1type == VR_RANGE)
- *vr0min = vr1min;
- else if (*vr0type == VR_ANTI_RANGE
- && vr1type == VR_ANTI_RANGE)
- *vr0max = vr1max;
- else if (*vr0type == VR_ANTI_RANGE
- && vr1type == VR_RANGE)
- {
- if (TREE_CODE (vr1max) == INTEGER_CST)
- *vr0min = int_const_binop (PLUS_EXPR, vr1max,
- build_int_cst (TREE_TYPE (vr1max), 1));
- else
- goto give_up;
- }
- else if (*vr0type == VR_RANGE
- && vr1type == VR_ANTI_RANGE)
- {
- if (TREE_CODE (*vr0min) == INTEGER_CST)
- {
- *vr0type = vr1type;
- *vr0min = vr1min;
- *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
- build_int_cst (TREE_TYPE (*vr0min), 1));
- }
- else
- goto give_up;
- }
- else
- gcc_unreachable ();
- }
- else
- goto give_up;
-
- return;
-
-give_up:
- *vr0type = VR_VARYING;
- *vr0min = NULL_TREE;
- *vr0max = NULL_TREE;
-}
-
-/* Intersect the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
- { VR1TYPE, VR0MIN, VR0MAX } and store the result
- in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest
- possible such range. The resulting range is not canonicalized. */
-
-static void
-intersect_ranges (enum value_range_type *vr0type,
- tree *vr0min, tree *vr0max,
- enum value_range_type vr1type,
- tree vr1min, tree vr1max)
-{
- bool mineq = vrp_operand_equal_p (*vr0min, vr1min);
- bool maxeq = vrp_operand_equal_p (*vr0max, vr1max);
-
- /* [] is vr0, () is vr1 in the following classification comments. */
- if (mineq && maxeq)
- {
- /* [( )] */
- if (*vr0type == vr1type)
- /* Nothing to do for equal ranges. */
- ;
- else if ((*vr0type == VR_RANGE
- && vr1type == VR_ANTI_RANGE)
- || (*vr0type == VR_ANTI_RANGE
- && vr1type == VR_RANGE))
- {
- /* For anti-range with range intersection the result is empty. */
- *vr0type = VR_UNDEFINED;
- *vr0min = NULL_TREE;
- *vr0max = NULL_TREE;
- }
- else
- gcc_unreachable ();
- }
- else if (operand_less_p (*vr0max, vr1min) == 1
- || operand_less_p (vr1max, *vr0min) == 1)
- {
- /* [ ] ( ) or ( ) [ ]
- If the ranges have an empty intersection, the result of the
- intersect operation is the range for intersecting an
- anti-range with a range or empty when intersecting two ranges. */
- if (*vr0type == VR_RANGE
- && vr1type == VR_ANTI_RANGE)
- ;
- else if (*vr0type == VR_ANTI_RANGE
- && vr1type == VR_RANGE)
- {
- *vr0type = vr1type;
- *vr0min = vr1min;
- *vr0max = vr1max;
- }
- else if (*vr0type == VR_RANGE
- && vr1type == VR_RANGE)
- {
- *vr0type = VR_UNDEFINED;
- *vr0min = NULL_TREE;
- *vr0max = NULL_TREE;
- }
- else if (*vr0type == VR_ANTI_RANGE
- && vr1type == VR_ANTI_RANGE)
- {
- /* If the anti-ranges are adjacent to each other merge them. */
- if (TREE_CODE (*vr0max) == INTEGER_CST
- && TREE_CODE (vr1min) == INTEGER_CST
- && operand_less_p (*vr0max, vr1min) == 1
- && integer_onep (int_const_binop (MINUS_EXPR,
- vr1min, *vr0max)))
- *vr0max = vr1max;
- else if (TREE_CODE (vr1max) == INTEGER_CST
- && TREE_CODE (*vr0min) == INTEGER_CST
- && operand_less_p (vr1max, *vr0min) == 1
- && integer_onep (int_const_binop (MINUS_EXPR,
- *vr0min, vr1max)))
- *vr0min = vr1min;
- /* Else arbitrarily take VR0. */
- }
- }
- else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1)
- && (mineq || operand_less_p (*vr0min, vr1min) == 1))
- {
- /* [ ( ) ] or [( ) ] or [ ( )] */
- if (*vr0type == VR_RANGE
- && vr1type == VR_RANGE)
- {
- /* If both are ranges the result is the inner one. */
- *vr0type = vr1type;
- *vr0min = vr1min;
- *vr0max = vr1max;
- }
- else if (*vr0type == VR_RANGE
- && vr1type == VR_ANTI_RANGE)
- {
- /* Choose the right gap if the left one is empty. */
- if (mineq)
- {
- if (TREE_CODE (vr1max) != INTEGER_CST)
- *vr0min = vr1max;
- else if (TYPE_PRECISION (TREE_TYPE (vr1max)) == 1
- && !TYPE_UNSIGNED (TREE_TYPE (vr1max)))
- *vr0min
- = int_const_binop (MINUS_EXPR, vr1max,
- build_int_cst (TREE_TYPE (vr1max), -1));
- else
- *vr0min
- = int_const_binop (PLUS_EXPR, vr1max,
- build_int_cst (TREE_TYPE (vr1max), 1));
- }
- /* Choose the left gap if the right one is empty. */
- else if (maxeq)
- {
- if (TREE_CODE (vr1min) != INTEGER_CST)
- *vr0max = vr1min;
- else if (TYPE_PRECISION (TREE_TYPE (vr1min)) == 1
- && !TYPE_UNSIGNED (TREE_TYPE (vr1min)))
- *vr0max
- = int_const_binop (PLUS_EXPR, vr1min,
- build_int_cst (TREE_TYPE (vr1min), -1));
- else
- *vr0max
- = int_const_binop (MINUS_EXPR, vr1min,
- build_int_cst (TREE_TYPE (vr1min), 1));
- }
- /* Choose the anti-range if the range is effectively varying. */
- else if (vrp_val_is_min (*vr0min)
- && vrp_val_is_max (*vr0max))
- {
- *vr0type = vr1type;
- *vr0min = vr1min;
- *vr0max = vr1max;
- }
- /* Else choose the range. */
- }
- else if (*vr0type == VR_ANTI_RANGE
- && vr1type == VR_ANTI_RANGE)
- /* If both are anti-ranges the result is the outer one. */
- ;
- else if (*vr0type == VR_ANTI_RANGE
- && vr1type == VR_RANGE)
- {
- /* The intersection is empty. */
- *vr0type = VR_UNDEFINED;
- *vr0min = NULL_TREE;
- *vr0max = NULL_TREE;
- }
- else
- gcc_unreachable ();
- }
- else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1)
- && (mineq || operand_less_p (vr1min, *vr0min) == 1))
- {
- /* ( [ ] ) or ([ ] ) or ( [ ]) */
- if (*vr0type == VR_RANGE
- && vr1type == VR_RANGE)
- /* Choose the inner range. */
- ;
- else if (*vr0type == VR_ANTI_RANGE
- && vr1type == VR_RANGE)
- {
- /* Choose the right gap if the left is empty. */
- if (mineq)
- {
- *vr0type = VR_RANGE;
- if (TREE_CODE (*vr0max) != INTEGER_CST)
- *vr0min = *vr0max;
- else if (TYPE_PRECISION (TREE_TYPE (*vr0max)) == 1
- && !TYPE_UNSIGNED (TREE_TYPE (*vr0max)))
- *vr0min
- = int_const_binop (MINUS_EXPR, *vr0max,
- build_int_cst (TREE_TYPE (*vr0max), -1));
- else
- *vr0min
- = int_const_binop (PLUS_EXPR, *vr0max,
- build_int_cst (TREE_TYPE (*vr0max), 1));
- *vr0max = vr1max;
- }
- /* Choose the left gap if the right is empty. */
- else if (maxeq)
- {
- *vr0type = VR_RANGE;
- if (TREE_CODE (*vr0min) != INTEGER_CST)
- *vr0max = *vr0min;
- else if (TYPE_PRECISION (TREE_TYPE (*vr0min)) == 1
- && !TYPE_UNSIGNED (TREE_TYPE (*vr0min)))
- *vr0max
- = int_const_binop (PLUS_EXPR, *vr0min,
- build_int_cst (TREE_TYPE (*vr0min), -1));
- else
- *vr0max
- = int_const_binop (MINUS_EXPR, *vr0min,
- build_int_cst (TREE_TYPE (*vr0min), 1));
- *vr0min = vr1min;
- }
- /* Choose the anti-range if the range is effectively varying. */
- else if (vrp_val_is_min (vr1min)
- && vrp_val_is_max (vr1max))
- ;
- /* Choose the anti-range if it is ~[0,0], that range is special
- enough to special case when vr1's range is relatively wide. */
- else if (*vr0min == *vr0max
- && integer_zerop (*vr0min)
- && (TYPE_PRECISION (TREE_TYPE (*vr0min))
- == TYPE_PRECISION (ptr_type_node))
- && TREE_CODE (vr1max) == INTEGER_CST
- && TREE_CODE (vr1min) == INTEGER_CST
- && (wi::clz (wi::to_wide (vr1max) - wi::to_wide (vr1min))
- < TYPE_PRECISION (TREE_TYPE (*vr0min)) / 2))
- ;
- /* Else choose the range. */
- else
- {
- *vr0type = vr1type;
- *vr0min = vr1min;
- *vr0max = vr1max;
- }
- }
- else if (*vr0type == VR_ANTI_RANGE
- && vr1type == VR_ANTI_RANGE)
- {
- /* If both are anti-ranges the result is the outer one. */
- *vr0type = vr1type;
- *vr0min = vr1min;
- *vr0max = vr1max;
- }
- else if (vr1type == VR_ANTI_RANGE
- && *vr0type == VR_RANGE)
- {
- /* The intersection is empty. */
- *vr0type = VR_UNDEFINED;
- *vr0min = NULL_TREE;
- *vr0max = NULL_TREE;
- }
- else
- gcc_unreachable ();
+ tree res = gimple_phi_result (stmt);
+ return (!virtual_operand_p (res)
+ && (INTEGRAL_TYPE_P (TREE_TYPE (res))
+ || POINTER_TYPE_P (TREE_TYPE (res))));
}
- else if ((operand_less_p (vr1min, *vr0max) == 1
- || operand_equal_p (vr1min, *vr0max, 0))
- && operand_less_p (*vr0min, vr1min) == 1)
+ else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
{
- /* [ ( ] ) or [ ]( ) */
- if (*vr0type == VR_ANTI_RANGE
- && vr1type == VR_ANTI_RANGE)
- *vr0max = vr1max;
- else if (*vr0type == VR_RANGE
- && vr1type == VR_RANGE)
- *vr0min = vr1min;
- else if (*vr0type == VR_RANGE
- && vr1type == VR_ANTI_RANGE)
- {
- if (TREE_CODE (vr1min) == INTEGER_CST)
- *vr0max = int_const_binop (MINUS_EXPR, vr1min,
- build_int_cst (TREE_TYPE (vr1min), 1));
- else
- *vr0max = vr1min;
- }
- else if (*vr0type == VR_ANTI_RANGE
- && vr1type == VR_RANGE)
- {
- *vr0type = VR_RANGE;
- if (TREE_CODE (*vr0max) == INTEGER_CST)
- *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
- build_int_cst (TREE_TYPE (*vr0max), 1));
- else
- *vr0min = *vr0max;
- *vr0max = vr1max;
- }
- else
- gcc_unreachable ();
+ tree lhs = gimple_get_lhs (stmt);
+
+ /* In general, assignments with virtual operands are not useful
+ for deriving ranges, with the obvious exception of calls to
+ builtin functions. */
+ if (lhs && TREE_CODE (lhs) == SSA_NAME
+ && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
+ || POINTER_TYPE_P (TREE_TYPE (lhs)))
+ && (is_gimple_call (stmt)
+ || !gimple_vuse (stmt)))
+ return true;
+ else if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
+ switch (gimple_call_internal_fn (stmt))
+ {
+ case IFN_ADD_OVERFLOW:
+ case IFN_SUB_OVERFLOW:
+ case IFN_MUL_OVERFLOW:
+ case IFN_ATOMIC_COMPARE_EXCHANGE:
+ /* These internal calls return _Complex integer type,
+ but are interesting to VRP nevertheless. */
+ if (lhs && TREE_CODE (lhs) == SSA_NAME)
+ return true;
+ break;
+ default:
+ break;
+ }
}
- else if ((operand_less_p (*vr0min, vr1max) == 1
- || operand_equal_p (*vr0min, vr1max, 0))
- && operand_less_p (vr1min, *vr0min) == 1)
+ else if (gimple_code (stmt) == GIMPLE_COND
+ || gimple_code (stmt) == GIMPLE_SWITCH)
+ return true;
+
+ return false;
+}
+
+/* Initialization required by ssa_propagate engine. */
+
+void
+vrp_prop::vrp_initialize ()
+{
+ basic_block bb;
+
+ FOR_EACH_BB_FN (bb, cfun)
{
- /* ( [ ) ] or ( )[ ] */
- if (*vr0type == VR_ANTI_RANGE
- && vr1type == VR_ANTI_RANGE)
- *vr0min = vr1min;
- else if (*vr0type == VR_RANGE
- && vr1type == VR_RANGE)
- *vr0max = vr1max;
- else if (*vr0type == VR_RANGE
- && vr1type == VR_ANTI_RANGE)
+ for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
+ gsi_next (&si))
{
- if (TREE_CODE (vr1max) == INTEGER_CST)
- *vr0min = int_const_binop (PLUS_EXPR, vr1max,
- build_int_cst (TREE_TYPE (vr1max), 1));
+ gphi *phi = si.phi ();
+ if (!stmt_interesting_for_vrp (phi))
+ {
+ tree lhs = PHI_RESULT (phi);
+ set_value_range_to_varying (get_value_range (lhs));
+ prop_set_simulate_again (phi, false);
+ }
else
- *vr0min = vr1max;
+ prop_set_simulate_again (phi, true);
}
- else if (*vr0type == VR_ANTI_RANGE
- && vr1type == VR_RANGE)
- {
- *vr0type = VR_RANGE;
- if (TREE_CODE (*vr0min) == INTEGER_CST)
- *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
- build_int_cst (TREE_TYPE (*vr0min), 1));
+
+ for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
+ gsi_next (&si))
+ {
+ gimple *stmt = gsi_stmt (si);
+
+ /* If the statement is a control insn, then we do not
+ want to avoid simulating the statement once. Failure
+ to do so means that those edges will never get added. */
+ if (stmt_ends_bb_p (stmt))
+ prop_set_simulate_again (stmt, true);
+ else if (!stmt_interesting_for_vrp (stmt))
+ {
+ set_defs_to_varying (stmt);
+ prop_set_simulate_again (stmt, false);
+ }
else
- *vr0max = *vr0min;
- *vr0min = vr1min;
+ prop_set_simulate_again (stmt, true);
}
- else
- gcc_unreachable ();
}
+}
- /* As a fallback simply use { *VRTYPE, *VR0MIN, *VR0MAX } as
- result for the intersection. That's always a conservative
- correct estimate unless VR1 is a constant singleton range
- in which case we choose that. */
- if (vr1type == VR_RANGE
- && is_gimple_min_invariant (vr1min)
- && vrp_operand_equal_p (vr1min, vr1max))
- {
- *vr0type = vr1type;
- *vr0min = vr1min;
- *vr0max = vr1max;
- }
+/* Searches the case label vector VEC for the index *IDX of the CASE_LABEL
+ that includes the value VAL. The search is restricted to the range
+ [START_IDX, n - 1] where n is the size of VEC.
- return;
-}
+ If there is a CASE_LABEL for VAL, its index is placed in IDX and true is
+ returned.
+ If there is no CASE_LABEL for VAL and there is one that is larger than VAL,
+ it is placed in IDX and false is returned.
-/* Intersect the two value-ranges *VR0 and *VR1 and store the result
- in *VR0. This may not be the smallest possible such range. */
+ If VAL is larger than any CASE_LABEL, n is placed on IDX and false is
+ returned. */
-static void
-vrp_intersect_ranges_1 (value_range *vr0, value_range *vr1)
+bool
+find_case_label_index (gswitch *stmt, size_t start_idx, tree val, size_t *idx)
{
- value_range saved;
+ size_t n = gimple_switch_num_labels (stmt);
+ size_t low, high;
- /* If either range is VR_VARYING the other one wins. */
- if (vr1->type == VR_VARYING)
- return;
- if (vr0->type == VR_VARYING)
- {
- copy_value_range (vr0, vr1);
- return;
- }
+ /* Find case label for minimum of the value range or the next one.
+ At each iteration we are searching in [low, high - 1]. */
- /* When either range is VR_UNDEFINED the resulting range is
- VR_UNDEFINED, too. */
- if (vr0->type == VR_UNDEFINED)
- return;
- if (vr1->type == VR_UNDEFINED)
+ for (low = start_idx, high = n; high != low; )
{
- set_value_range_to_undefined (vr0);
- return;
- }
+ tree t;
+ int cmp;
+ /* Note that i != high, so we never ask for n. */
+ size_t i = (high + low) / 2;
+ t = gimple_switch_label (stmt, i);
- /* Save the original vr0 so we can return it as conservative intersection
- result when our worker turns things to varying. */
- saved = *vr0;
- intersect_ranges (&vr0->type, &vr0->min, &vr0->max,
- vr1->type, vr1->min, vr1->max);
- /* Make sure to canonicalize the result though as the inversion of a
- VR_RANGE can still be a VR_RANGE. */
- set_and_canonicalize_value_range (vr0, vr0->type,
- vr0->min, vr0->max, vr0->equiv);
- /* If that failed, use the saved original VR0. */
- if (vr0->type == VR_VARYING)
- {
- *vr0 = saved;
- return;
- }
- /* If the result is VR_UNDEFINED there is no need to mess with
- the equivalencies. */
- if (vr0->type == VR_UNDEFINED)
- return;
+ /* Cache the result of comparing CASE_LOW and val. */
+ cmp = tree_int_cst_compare (CASE_LOW (t), val);
- /* The resulting set of equivalences for range intersection is the union of
- the two sets. */
- if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
- bitmap_ior_into (vr0->equiv, vr1->equiv);
- else if (vr1->equiv && !vr0->equiv)
- {
- /* All equivalence bitmaps are allocated from the same obstack. So
- we can use the obstack associated with VR to allocate vr0->equiv. */
- vr0->equiv = BITMAP_ALLOC (vr1->equiv->obstack);
- bitmap_copy (vr0->equiv, vr1->equiv);
+ if (cmp == 0)
+ {
+ /* Ranges cannot be empty. */
+ *idx = i;
+ return true;
+ }
+ else if (cmp > 0)
+ high = i;
+ else
+ {
+ low = i + 1;
+ if (CASE_HIGH (t) != NULL
+ && tree_int_cst_compare (CASE_HIGH (t), val) >= 0)
+ {
+ *idx = i;
+ return true;
+ }
+ }
}
+
+ *idx = high;
+ return false;
}
-void
-vrp_intersect_ranges (value_range *vr0, value_range *vr1)
+/* Searches the case label vector VEC for the range of CASE_LABELs that is used
+ for values between MIN and MAX. The first index is placed in MIN_IDX. The
+ last index is placed in MAX_IDX. If the range of CASE_LABELs is empty
+ then MAX_IDX < MIN_IDX.
+ Returns true if the default label is not needed. */
+
+bool
+find_case_label_range (gswitch *stmt, tree min, tree max, size_t *min_idx,
+ size_t *max_idx)
{
- if (dump_file && (dump_flags & TDF_DETAILS))
+ size_t i, j;
+ bool min_take_default = !find_case_label_index (stmt, 1, min, &i);
+ bool max_take_default = !find_case_label_index (stmt, i, max, &j);
+
+ if (i == j
+ && min_take_default
+ && max_take_default)
{
- fprintf (dump_file, "Intersecting\n ");
- dump_value_range (dump_file, vr0);
- fprintf (dump_file, "\nand\n ");
- dump_value_range (dump_file, vr1);
- fprintf (dump_file, "\n");
+ /* Only the default case label reached.
+ Return an empty range. */
+ *min_idx = 1;
+ *max_idx = 0;
+ return false;
}
- vrp_intersect_ranges_1 (vr0, vr1);
- if (dump_file && (dump_flags & TDF_DETAILS))
+ else
{
- fprintf (dump_file, "to\n ");
- dump_value_range (dump_file, vr0);
- fprintf (dump_file, "\n");
+ bool take_default = min_take_default || max_take_default;
+ tree low, high;
+ size_t k;
+
+ if (max_take_default)
+ j--;
+
+ /* If the case label range is continuous, we do not need
+ the default case label. Verify that. */
+ high = CASE_LOW (gimple_switch_label (stmt, i));
+ if (CASE_HIGH (gimple_switch_label (stmt, i)))
+ high = CASE_HIGH (gimple_switch_label (stmt, i));
+ for (k = i + 1; k <= j; ++k)
+ {
+ low = CASE_LOW (gimple_switch_label (stmt, k));
+ if (!integer_onep (int_const_binop (MINUS_EXPR, low, high)))
+ {
+ take_default = true;
+ break;
+ }
+ high = low;
+ if (CASE_HIGH (gimple_switch_label (stmt, k)))
+ high = CASE_HIGH (gimple_switch_label (stmt, k));
+ }
+
+ *min_idx = i;
+ *max_idx = j;
+ return !take_default;
}
}
-/* Meet operation for value ranges. Given two value ranges VR0 and
- VR1, store in VR0 a range that contains both VR0 and VR1. This
- may not be the smallest possible such range. */
+/* Evaluate statement STMT. If the statement produces a useful range,
+ return SSA_PROP_INTERESTING and record the SSA name with the
+ interesting range into *OUTPUT_P.
-static void
-vrp_meet_1 (value_range *vr0, const value_range *vr1)
+ If STMT is a conditional branch and we can determine its truth
+ value, the taken edge is recorded in *TAKEN_EDGE_P.
+
+ If STMT produces a varying value, return SSA_PROP_VARYING. */
+
+enum ssa_prop_result
+vrp_prop::visit_stmt (gimple *stmt, edge *taken_edge_p, tree *output_p)
{
- value_range saved;
+ value_range vr = VR_INITIALIZER;
+ tree lhs = gimple_get_lhs (stmt);
+ extract_range_from_stmt (stmt, taken_edge_p, output_p, &vr);
- if (vr0->type == VR_UNDEFINED)
+ if (*output_p)
{
- set_value_range (vr0, vr1->type, vr1->min, vr1->max, vr1->equiv);
- return;
- }
+ if (update_value_range (*output_p, &vr))
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "Found new range for ");
+ print_generic_expr (dump_file, *output_p);
+ fprintf (dump_file, ": ");
+ dump_value_range (dump_file, &vr);
+ fprintf (dump_file, "\n");
+ }
- if (vr1->type == VR_UNDEFINED)
- {
- /* VR0 already has the resulting range. */
- return;
- }
+ if (vr.type == VR_VARYING)
+ return SSA_PROP_VARYING;
- if (vr0->type == VR_VARYING)
- {
- /* Nothing to do. VR0 already has the resulting range. */
- return;
+ return SSA_PROP_INTERESTING;
+ }
+ return SSA_PROP_NOT_INTERESTING;
}
- if (vr1->type == VR_VARYING)
- {
- set_value_range_to_varying (vr0);
- return;
- }
+ if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
+ switch (gimple_call_internal_fn (stmt))
+ {
+ case IFN_ADD_OVERFLOW:
+ case IFN_SUB_OVERFLOW:
+ case IFN_MUL_OVERFLOW:
+ case IFN_ATOMIC_COMPARE_EXCHANGE:
+ /* These internal calls return _Complex integer type,
+ which VRP does not track, but the immediate uses
+ thereof might be interesting. */
+ if (lhs && TREE_CODE (lhs) == SSA_NAME)
+ {
+ imm_use_iterator iter;
+ use_operand_p use_p;
+ enum ssa_prop_result res = SSA_PROP_VARYING;
- saved = *vr0;
- union_ranges (&vr0->type, &vr0->min, &vr0->max,
- vr1->type, vr1->min, vr1->max);
- if (vr0->type == VR_VARYING)
- {
- /* Failed to find an efficient meet. Before giving up and setting
- the result to VARYING, see if we can at least derive a useful
- anti-range. FIXME, all this nonsense about distinguishing
- anti-ranges from ranges is necessary because of the odd
- semantics of range_includes_zero_p and friends. */
- if (((saved.type == VR_RANGE
- && range_includes_zero_p (saved.min, saved.max) == 0)
- || (saved.type == VR_ANTI_RANGE
- && range_includes_zero_p (saved.min, saved.max) == 1))
- && ((vr1->type == VR_RANGE
- && range_includes_zero_p (vr1->min, vr1->max) == 0)
- || (vr1->type == VR_ANTI_RANGE
- && range_includes_zero_p (vr1->min, vr1->max) == 1)))
- {
- set_value_range_to_nonnull (vr0, TREE_TYPE (saved.min));
+ set_value_range_to_varying (get_value_range (lhs));
- /* Since this meet operation did not result from the meeting of
- two equivalent names, VR0 cannot have any equivalences. */
- if (vr0->equiv)
- bitmap_clear (vr0->equiv);
- return;
- }
+ FOR_EACH_IMM_USE_FAST (use_p, iter, lhs)
+ {
+ gimple *use_stmt = USE_STMT (use_p);
+ if (!is_gimple_assign (use_stmt))
+ continue;
+ enum tree_code rhs_code = gimple_assign_rhs_code (use_stmt);
+ if (rhs_code != REALPART_EXPR && rhs_code != IMAGPART_EXPR)
+ continue;
+ tree rhs1 = gimple_assign_rhs1 (use_stmt);
+ tree use_lhs = gimple_assign_lhs (use_stmt);
+ if (TREE_CODE (rhs1) != rhs_code
+ || TREE_OPERAND (rhs1, 0) != lhs
+ || TREE_CODE (use_lhs) != SSA_NAME
+ || !stmt_interesting_for_vrp (use_stmt)
+ || (!INTEGRAL_TYPE_P (TREE_TYPE (use_lhs))
+ || !TYPE_MIN_VALUE (TREE_TYPE (use_lhs))
+ || !TYPE_MAX_VALUE (TREE_TYPE (use_lhs))))
+ continue;
- set_value_range_to_varying (vr0);
- return;
- }
- set_and_canonicalize_value_range (vr0, vr0->type, vr0->min, vr0->max,
- vr0->equiv);
- if (vr0->type == VR_VARYING)
- return;
+ /* If there is a change in the value range for any of the
+ REALPART_EXPR/IMAGPART_EXPR immediate uses, return
+ SSA_PROP_INTERESTING. If there are any REALPART_EXPR
+ or IMAGPART_EXPR immediate uses, but none of them have
+ a change in their value ranges, return
+ SSA_PROP_NOT_INTERESTING. If there are no
+ {REAL,IMAG}PART_EXPR uses at all,
+ return SSA_PROP_VARYING. */
+ value_range new_vr = VR_INITIALIZER;
+ extract_range_basic (&new_vr, use_stmt);
+ value_range *old_vr = get_value_range (use_lhs);
+ if (old_vr->type != new_vr.type
+ || !vrp_operand_equal_p (old_vr->min, new_vr.min)
+ || !vrp_operand_equal_p (old_vr->max, new_vr.max)
+ || !vrp_bitmap_equal_p (old_vr->equiv, new_vr.equiv))
+ res = SSA_PROP_INTERESTING;
+ else
+ res = SSA_PROP_NOT_INTERESTING;
+ BITMAP_FREE (new_vr.equiv);
+ if (res == SSA_PROP_INTERESTING)
+ {
+ *output_p = lhs;
+ return res;
+ }
+ }
- /* The resulting set of equivalences is always the intersection of
- the two sets. */
- if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
- bitmap_and_into (vr0->equiv, vr1->equiv);
- else if (vr0->equiv && !vr1->equiv)
- bitmap_clear (vr0->equiv);
-}
+ return res;
+ }
+ break;
+ default:
+ break;
+ }
-void
-vrp_meet (value_range *vr0, const value_range *vr1)
-{
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, "Meeting\n ");
- dump_value_range (dump_file, vr0);
- fprintf (dump_file, "\nand\n ");
- dump_value_range (dump_file, vr1);
- fprintf (dump_file, "\n");
- }
- vrp_meet_1 (vr0, vr1);
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, "to\n ");
- dump_value_range (dump_file, vr0);
- fprintf (dump_file, "\n");
- }
-}
+ /* All other statements produce nothing of interest for VRP, so mark
+ their outputs varying and prevent further simulation. */
+ set_defs_to_varying (stmt);
+ return (*taken_edge_p) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING;
+}
-/* Visit all arguments for PHI node PHI that flow through executable
- edges. If a valid value range can be derived from all the incoming
- value ranges, set a new range in VR_RESULT. */
+/* Union the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
+ { VR1TYPE, VR0MIN, VR0MAX } and store the result
+ in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest
+ possible such range. The resulting range is not canonicalized. */
-void
-vr_values::extract_range_from_phi_node (gphi *phi, value_range *vr_result)
+static void
+union_ranges (enum value_range_type *vr0type,
+ tree *vr0min, tree *vr0max,
+ enum value_range_type vr1type,
+ tree vr1min, tree vr1max)
{
- size_t i;
- tree lhs = PHI_RESULT (phi);
- value_range *lhs_vr = get_value_range (lhs);
- bool first = true;
- int edges, old_edges;
- struct loop *l;
+ bool mineq = vrp_operand_equal_p (*vr0min, vr1min);
+ bool maxeq = vrp_operand_equal_p (*vr0max, vr1max);
- if (dump_file && (dump_flags & TDF_DETAILS))
+ /* [] is vr0, () is vr1 in the following classification comments. */
+ if (mineq && maxeq)
{
- fprintf (dump_file, "\nVisiting PHI node: ");
- print_gimple_stmt (dump_file, phi, 0, dump_flags);
+ /* [( )] */
+ if (*vr0type == vr1type)
+ /* Nothing to do for equal ranges. */
+ ;
+ else if ((*vr0type == VR_RANGE
+ && vr1type == VR_ANTI_RANGE)
+ || (*vr0type == VR_ANTI_RANGE
+ && vr1type == VR_RANGE))
+ {
+ /* For anti-range with range union the result is varying. */
+ goto give_up;
+ }
+ else
+ gcc_unreachable ();
}
-
- bool may_simulate_backedge_again = false;
- edges = 0;
- for (i = 0; i < gimple_phi_num_args (phi); i++)
+ else if (operand_less_p (*vr0max, vr1min) == 1
+ || operand_less_p (vr1max, *vr0min) == 1)
{
- edge e = gimple_phi_arg_edge (phi, i);
-
- if (dump_file && (dump_flags & TDF_DETAILS))
+ /* [ ] ( ) or ( ) [ ]
+ If the ranges have an empty intersection, result of the union
+ operation is the anti-range or if both are anti-ranges
+ it covers all. */
+ if (*vr0type == VR_ANTI_RANGE
+ && vr1type == VR_ANTI_RANGE)
+ goto give_up;
+ else if (*vr0type == VR_ANTI_RANGE
+ && vr1type == VR_RANGE)
+ ;
+ else if (*vr0type == VR_RANGE
+ && vr1type == VR_ANTI_RANGE)
{
- fprintf (dump_file,
- " Argument #%d (%d -> %d %sexecutable)\n",
- (int) i, e->src->index, e->dest->index,
- (e->flags & EDGE_EXECUTABLE) ? "" : "not ");
+ *vr0type = vr1type;
+ *vr0min = vr1min;
+ *vr0max = vr1max;
}
-
- if (e->flags & EDGE_EXECUTABLE)
+ else if (*vr0type == VR_RANGE
+ && vr1type == VR_RANGE)
{
- tree arg = PHI_ARG_DEF (phi, i);
- value_range vr_arg;
-
- ++edges;
-
- if (TREE_CODE (arg) == SSA_NAME)
+ /* The result is the convex hull of both ranges. */
+ if (operand_less_p (*vr0max, vr1min) == 1)
{
- /* See if we are eventually going to change one of the args. */
- gimple *def_stmt = SSA_NAME_DEF_STMT (arg);
- if (! gimple_nop_p (def_stmt)
- && prop_simulate_again_p (def_stmt)
- && e->flags & EDGE_DFS_BACK)
- may_simulate_backedge_again = true;
-
- vr_arg = *(get_value_range (arg));
- /* Do not allow equivalences or symbolic ranges to leak in from
- backedges. That creates invalid equivalencies.
- See PR53465 and PR54767. */
- if (e->flags & EDGE_DFS_BACK)
+ /* If the result can be an anti-range, create one. */
+ if (TREE_CODE (*vr0max) == INTEGER_CST
+ && TREE_CODE (vr1min) == INTEGER_CST
+ && vrp_val_is_min (*vr0min)
+ && vrp_val_is_max (vr1max))
{
- if (vr_arg.type == VR_RANGE
- || vr_arg.type == VR_ANTI_RANGE)
+ tree min = int_const_binop (PLUS_EXPR,
+ *vr0max,
+ build_int_cst (TREE_TYPE (*vr0max), 1));
+ tree max = int_const_binop (MINUS_EXPR,
+ vr1min,
+ build_int_cst (TREE_TYPE (vr1min), 1));
+ if (!operand_less_p (max, min))
{
- vr_arg.equiv = NULL;
- if (symbolic_range_p (&vr_arg))
- {
- vr_arg.type = VR_VARYING;
- vr_arg.min = NULL_TREE;
- vr_arg.max = NULL_TREE;
- }
+ *vr0type = VR_ANTI_RANGE;
+ *vr0min = min;
+ *vr0max = max;
}
+ else
+ *vr0max = vr1max;
}
else
+ *vr0max = vr1max;
+ }
+ else
+ {
+ /* If the result can be an anti-range, create one. */
+ if (TREE_CODE (vr1max) == INTEGER_CST
+ && TREE_CODE (*vr0min) == INTEGER_CST
+ && vrp_val_is_min (vr1min)
+ && vrp_val_is_max (*vr0max))
{
- /* If the non-backedge arguments range is VR_VARYING then
- we can still try recording a simple equivalence. */
- if (vr_arg.type == VR_VARYING)
+ tree min = int_const_binop (PLUS_EXPR,
+ vr1max,
+ build_int_cst (TREE_TYPE (vr1max), 1));
+ tree max = int_const_binop (MINUS_EXPR,
+ *vr0min,
+ build_int_cst (TREE_TYPE (*vr0min), 1));
+ if (!operand_less_p (max, min))
{
- vr_arg.type = VR_RANGE;
- vr_arg.min = arg;
- vr_arg.max = arg;
- vr_arg.equiv = NULL;
+ *vr0type = VR_ANTI_RANGE;
+ *vr0min = min;
+ *vr0max = max;
}
+ else
+ *vr0min = vr1min;
}
+ else
+ *vr0min = vr1min;
}
- else
- {
- if (TREE_OVERFLOW_P (arg))
- arg = drop_tree_overflow (arg);
-
- vr_arg.type = VR_RANGE;
- vr_arg.min = arg;
- vr_arg.max = arg;
- vr_arg.equiv = NULL;
- }
-
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, "\t");
- print_generic_expr (dump_file, arg, dump_flags);
- fprintf (dump_file, ": ");
- dump_value_range (dump_file, &vr_arg);
- fprintf (dump_file, "\n");
- }
-
- if (first)
- copy_value_range (vr_result, &vr_arg);
- else
- vrp_meet (vr_result, &vr_arg);
- first = false;
-
- if (vr_result->type == VR_VARYING)
- break;
- }
- }
-
- if (vr_result->type == VR_VARYING)
- goto varying;
- else if (vr_result->type == VR_UNDEFINED)
- goto update_range;
-
- old_edges = vr_phi_edge_counts[SSA_NAME_VERSION (lhs)];
- vr_phi_edge_counts[SSA_NAME_VERSION (lhs)] = edges;
-
- /* To prevent infinite iterations in the algorithm, derive ranges
- when the new value is slightly bigger or smaller than the
- previous one. We don't do this if we have seen a new executable
- edge; this helps us avoid an infinity for conditionals
- which are not in a loop. If the old value-range was VR_UNDEFINED
- use the updated range and iterate one more time. If we will not
- simulate this PHI again via the backedge allow us to iterate. */
- if (edges > 0
- && gimple_phi_num_args (phi) > 1
- && edges == old_edges
- && lhs_vr->type != VR_UNDEFINED
- && may_simulate_backedge_again)
- {
- /* Compare old and new ranges, fall back to varying if the
- values are not comparable. */
- int cmp_min = compare_values (lhs_vr->min, vr_result->min);
- if (cmp_min == -2)
- goto varying;
- int cmp_max = compare_values (lhs_vr->max, vr_result->max);
- if (cmp_max == -2)
- goto varying;
-
- /* For non VR_RANGE or for pointers fall back to varying if
- the range changed. */
- if ((lhs_vr->type != VR_RANGE || vr_result->type != VR_RANGE
- || POINTER_TYPE_P (TREE_TYPE (lhs)))
- && (cmp_min != 0 || cmp_max != 0))
- goto varying;
-
- /* If the new minimum is larger than the previous one
- retain the old value. If the new minimum value is smaller
- than the previous one and not -INF go all the way to -INF + 1.
- In the first case, to avoid infinite bouncing between different
- minimums, and in the other case to avoid iterating millions of
- times to reach -INF. Going to -INF + 1 also lets the following
- iteration compute whether there will be any overflow, at the
- expense of one additional iteration. */
- if (cmp_min < 0)
- vr_result->min = lhs_vr->min;
- else if (cmp_min > 0
- && !vrp_val_is_min (vr_result->min))
- vr_result->min
- = int_const_binop (PLUS_EXPR,
- vrp_val_min (TREE_TYPE (vr_result->min)),
- build_int_cst (TREE_TYPE (vr_result->min), 1));
-
- /* Similarly for the maximum value. */
- if (cmp_max > 0)
- vr_result->max = lhs_vr->max;
- else if (cmp_max < 0
- && !vrp_val_is_max (vr_result->max))
- vr_result->max
- = int_const_binop (MINUS_EXPR,
- vrp_val_max (TREE_TYPE (vr_result->min)),
- build_int_cst (TREE_TYPE (vr_result->min), 1));
-
- /* If we dropped either bound to +-INF then if this is a loop
- PHI node SCEV may known more about its value-range. */
- if (cmp_min > 0 || cmp_min < 0
- || cmp_max < 0 || cmp_max > 0)
- goto scev_check;
-
- goto infinite_check;
- }
-
- goto update_range;
-
-varying:
- set_value_range_to_varying (vr_result);
-
-scev_check:
- /* If this is a loop PHI node SCEV may known more about its value-range.
- scev_check can be reached from two paths, one is a fall through from above
- "varying" label, the other is direct goto from code block which tries to
- avoid infinite simulation. */
- if ((l = loop_containing_stmt (phi))
- && l->header == gimple_bb (phi))
- adjust_range_with_scev (vr_result, l, phi, lhs);
-
-infinite_check:
- /* If we will end up with a (-INF, +INF) range, set it to
- VARYING. Same if the previous max value was invalid for
- the type and we end up with vr_result.min > vr_result.max. */
- if ((vr_result->type == VR_RANGE || vr_result->type == VR_ANTI_RANGE)
- && !((vrp_val_is_max (vr_result->max) && vrp_val_is_min (vr_result->min))
- || compare_values (vr_result->min, vr_result->max) > 0))
- ;
- else
- set_value_range_to_varying (vr_result);
-
- /* If the new range is different than the previous value, keep
- iterating. */
-update_range:
- return;
-}
-
-/* Visit all arguments for PHI node PHI that flow through executable
- edges. If a valid value range can be derived from all the incoming
- value ranges, set a new range for the LHS of PHI. */
-
-enum ssa_prop_result
-vrp_prop::visit_phi (gphi *phi)
-{
- tree lhs = PHI_RESULT (phi);
- value_range vr_result = VR_INITIALIZER;
- extract_range_from_phi_node (phi, &vr_result);
- if (update_value_range (lhs, &vr_result))
- {
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, "Found new range for ");
- print_generic_expr (dump_file, lhs);
- fprintf (dump_file, ": ");
- dump_value_range (dump_file, &vr_result);
- fprintf (dump_file, "\n");
}
-
- if (vr_result.type == VR_VARYING)
- return SSA_PROP_VARYING;
-
- return SSA_PROP_INTERESTING;
- }
-
- /* Nothing changed, don't add outgoing edges. */
- return SSA_PROP_NOT_INTERESTING;
-}
-
-/* Simplify boolean operations if the source is known
- to be already a boolean. */
-bool
-vr_values::simplify_truth_ops_using_ranges (gimple_stmt_iterator *gsi,
- gimple *stmt)
-{
- enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
- tree lhs, op0, op1;
- bool need_conversion;
-
- /* We handle only !=/== case here. */
- gcc_assert (rhs_code == EQ_EXPR || rhs_code == NE_EXPR);
-
- op0 = gimple_assign_rhs1 (stmt);
- if (!op_with_boolean_value_range_p (op0))
- return false;
-
- op1 = gimple_assign_rhs2 (stmt);
- if (!op_with_boolean_value_range_p (op1))
- return false;
-
- /* Reduce number of cases to handle to NE_EXPR. As there is no
- BIT_XNOR_EXPR we cannot replace A == B with a single statement. */
- if (rhs_code == EQ_EXPR)
- {
- if (TREE_CODE (op1) == INTEGER_CST)
- op1 = int_const_binop (BIT_XOR_EXPR, op1,
- build_int_cst (TREE_TYPE (op1), 1));
else
- return false;
- }
-
- lhs = gimple_assign_lhs (stmt);
- need_conversion
- = !useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (op0));
-
- /* Make sure to not sign-extend a 1-bit 1 when converting the result. */
- if (need_conversion
- && !TYPE_UNSIGNED (TREE_TYPE (op0))
- && TYPE_PRECISION (TREE_TYPE (op0)) == 1
- && TYPE_PRECISION (TREE_TYPE (lhs)) > 1)
- return false;
-
- /* For A != 0 we can substitute A itself. */
- if (integer_zerop (op1))
- gimple_assign_set_rhs_with_ops (gsi,
- need_conversion
- ? NOP_EXPR : TREE_CODE (op0), op0);
- /* For A != B we substitute A ^ B. Either with conversion. */
- else if (need_conversion)
- {
- tree tem = make_ssa_name (TREE_TYPE (op0));
- gassign *newop
- = gimple_build_assign (tem, BIT_XOR_EXPR, op0, op1);
- gsi_insert_before (gsi, newop, GSI_SAME_STMT);
- if (INTEGRAL_TYPE_P (TREE_TYPE (tem))
- && TYPE_PRECISION (TREE_TYPE (tem)) > 1)
- set_range_info (tem, VR_RANGE,
- wi::zero (TYPE_PRECISION (TREE_TYPE (tem))),
- wi::one (TYPE_PRECISION (TREE_TYPE (tem))));
- gimple_assign_set_rhs_with_ops (gsi, NOP_EXPR, tem);
- }
- /* Or without. */
- else
- gimple_assign_set_rhs_with_ops (gsi, BIT_XOR_EXPR, op0, op1);
- update_stmt (gsi_stmt (*gsi));
- fold_stmt (gsi, follow_single_use_edges);
-
- return true;
-}
-
-/* Simplify a division or modulo operator to a right shift or bitwise and
- if the first operand is unsigned or is greater than zero and the second
- operand is an exact power of two. For TRUNC_MOD_EXPR op0 % op1 with
- constant op1 (op1min = op1) or with op1 in [op1min, op1max] range,
- optimize it into just op0 if op0's range is known to be a subset of
- [-op1min + 1, op1min - 1] for signed and [0, op1min - 1] for unsigned
- modulo. */
-
-bool
-vr_values::simplify_div_or_mod_using_ranges (gimple_stmt_iterator *gsi,
- gimple *stmt)
-{
- enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
- tree val = NULL;
- tree op0 = gimple_assign_rhs1 (stmt);
- tree op1 = gimple_assign_rhs2 (stmt);
- tree op0min = NULL_TREE, op0max = NULL_TREE;
- tree op1min = op1;
- value_range *vr = NULL;
-
- if (TREE_CODE (op0) == INTEGER_CST)
- {
- op0min = op0;
- op0max = op0;
- }
- else
- {
- vr = get_value_range (op0);
- if (range_int_cst_p (vr))
- {
- op0min = vr->min;
- op0max = vr->max;
- }
- }
-
- if (rhs_code == TRUNC_MOD_EXPR
- && TREE_CODE (op1) == SSA_NAME)
- {
- value_range *vr1 = get_value_range (op1);
- if (range_int_cst_p (vr1))
- op1min = vr1->min;
+ gcc_unreachable ();
}
- if (rhs_code == TRUNC_MOD_EXPR
- && TREE_CODE (op1min) == INTEGER_CST
- && tree_int_cst_sgn (op1min) == 1
- && op0max
- && tree_int_cst_lt (op0max, op1min))
+ else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1)
+ && (mineq || operand_less_p (*vr0min, vr1min) == 1))
{
- if (TYPE_UNSIGNED (TREE_TYPE (op0))
- || tree_int_cst_sgn (op0min) >= 0
- || tree_int_cst_lt (fold_unary (NEGATE_EXPR, TREE_TYPE (op1min), op1min),
- op0min))
+ /* [ ( ) ] or [( ) ] or [ ( )] */
+ if (*vr0type == VR_RANGE
+ && vr1type == VR_RANGE)
+ ;
+ else if (*vr0type == VR_ANTI_RANGE
+ && vr1type == VR_ANTI_RANGE)
{
- /* If op0 already has the range op0 % op1 has,
- then TRUNC_MOD_EXPR won't change anything. */
- gimple_assign_set_rhs_from_tree (gsi, op0);
- return true;
+ *vr0type = vr1type;
+ *vr0min = vr1min;
+ *vr0max = vr1max;
}
- }
-
- if (TREE_CODE (op0) != SSA_NAME)
- return false;
-
- if (!integer_pow2p (op1))
- {
- /* X % -Y can be only optimized into X % Y either if
- X is not INT_MIN, or Y is not -1. Fold it now, as after
- remove_range_assertions the range info might be not available
- anymore. */
- if (rhs_code == TRUNC_MOD_EXPR
- && fold_stmt (gsi, follow_single_use_edges))
- return true;
- return false;
- }
-
- if (TYPE_UNSIGNED (TREE_TYPE (op0)))
- val = integer_one_node;
- else
- {
- bool sop = false;
-
- val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, &sop);
-
- if (val
- && sop
- && integer_onep (val)
- && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
+ else if (*vr0type == VR_ANTI_RANGE
+ && vr1type == VR_RANGE)
{
- location_t location;
-
- if (!gimple_has_location (stmt))
- location = input_location;
+ /* Arbitrarily choose the right or left gap. */
+ if (!mineq && TREE_CODE (vr1min) == INTEGER_CST)
+ *vr0max = int_const_binop (MINUS_EXPR, vr1min,
+ build_int_cst (TREE_TYPE (vr1min), 1));
+ else if (!maxeq && TREE_CODE (vr1max) == INTEGER_CST)
+ *vr0min = int_const_binop (PLUS_EXPR, vr1max,
+ build_int_cst (TREE_TYPE (vr1max), 1));
else
- location = gimple_location (stmt);
- warning_at (location, OPT_Wstrict_overflow,
- "assuming signed overflow does not occur when "
- "simplifying %</%> or %<%%%> to %<>>%> or %<&%>");
+ goto give_up;
}
+ else if (*vr0type == VR_RANGE
+ && vr1type == VR_ANTI_RANGE)
+ /* The result covers everything. */
+ goto give_up;
+ else
+ gcc_unreachable ();
}
-
- if (val && integer_onep (val))
+ else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1)
+ && (mineq || operand_less_p (vr1min, *vr0min) == 1))
{
- tree t;
-
- if (rhs_code == TRUNC_DIV_EXPR)
+ /* ( [ ] ) or ([ ] ) or ( [ ]) */
+ if (*vr0type == VR_RANGE
+ && vr1type == VR_RANGE)
{
- t = build_int_cst (integer_type_node, tree_log2 (op1));
- gimple_assign_set_rhs_code (stmt, RSHIFT_EXPR);
- gimple_assign_set_rhs1 (stmt, op0);
- gimple_assign_set_rhs2 (stmt, t);
+ *vr0type = vr1type;
+ *vr0min = vr1min;
+ *vr0max = vr1max;
}
- else
+ else if (*vr0type == VR_ANTI_RANGE
+ && vr1type == VR_ANTI_RANGE)
+ ;
+ else if (*vr0type == VR_RANGE
+ && vr1type == VR_ANTI_RANGE)
{
- t = build_int_cst (TREE_TYPE (op1), 1);
- t = int_const_binop (MINUS_EXPR, op1, t);
- t = fold_convert (TREE_TYPE (op0), t);
-
- gimple_assign_set_rhs_code (stmt, BIT_AND_EXPR);
- gimple_assign_set_rhs1 (stmt, op0);
- gimple_assign_set_rhs2 (stmt, t);
+ *vr0type = VR_ANTI_RANGE;
+ if (!mineq && TREE_CODE (*vr0min) == INTEGER_CST)
+ {
+ *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
+ build_int_cst (TREE_TYPE (*vr0min), 1));
+ *vr0min = vr1min;
+ }
+ else if (!maxeq && TREE_CODE (*vr0max) == INTEGER_CST)
+ {
+ *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
+ build_int_cst (TREE_TYPE (*vr0max), 1));
+ *vr0max = vr1max;
+ }
+ else
+ goto give_up;
}
-
- update_stmt (stmt);
- fold_stmt (gsi, follow_single_use_edges);
- return true;
- }
-
- return false;
-}
-
-/* Simplify a min or max if the ranges of the two operands are
- disjoint. Return true if we do simplify. */
-
-bool
-vr_values::simplify_min_or_max_using_ranges (gimple_stmt_iterator *gsi,
- gimple *stmt)
-{
- tree op0 = gimple_assign_rhs1 (stmt);
- tree op1 = gimple_assign_rhs2 (stmt);
- bool sop = false;
- tree val;
-
- val = (vrp_evaluate_conditional_warnv_with_ops_using_ranges
- (LE_EXPR, op0, op1, &sop));
- if (!val)
- {
- sop = false;
- val = (vrp_evaluate_conditional_warnv_with_ops_using_ranges
- (LT_EXPR, op0, op1, &sop));
+ else if (*vr0type == VR_ANTI_RANGE
+ && vr1type == VR_RANGE)
+ /* The result covers everything. */
+ goto give_up;
+ else
+ gcc_unreachable ();
}
-
- if (val)
+ else if ((operand_less_p (vr1min, *vr0max) == 1
+ || operand_equal_p (vr1min, *vr0max, 0))
+ && operand_less_p (*vr0min, vr1min) == 1
+ && operand_less_p (*vr0max, vr1max) == 1)
{
- if (sop && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
+ /* [ ( ] ) or [ ]( ) */
+ if (*vr0type == VR_RANGE
+ && vr1type == VR_RANGE)
+ *vr0max = vr1max;
+ else if (*vr0type == VR_ANTI_RANGE
+ && vr1type == VR_ANTI_RANGE)
+ *vr0min = vr1min;
+ else if (*vr0type == VR_ANTI_RANGE
+ && vr1type == VR_RANGE)
{
- location_t location;
-
- if (!gimple_has_location (stmt))
- location = input_location;
+ if (TREE_CODE (vr1min) == INTEGER_CST)
+ *vr0max = int_const_binop (MINUS_EXPR, vr1min,
+ build_int_cst (TREE_TYPE (vr1min), 1));
else
- location = gimple_location (stmt);
- warning_at (location, OPT_Wstrict_overflow,
- "assuming signed overflow does not occur when "
- "simplifying %<min/max (X,Y)%> to %<X%> or %<Y%>");
+ goto give_up;
}
-
- /* VAL == TRUE -> OP0 < or <= op1
- VAL == FALSE -> OP0 > or >= op1. */
- tree res = ((gimple_assign_rhs_code (stmt) == MAX_EXPR)
- == integer_zerop (val)) ? op0 : op1;
- gimple_assign_set_rhs_from_tree (gsi, res);
- return true;
+ else if (*vr0type == VR_RANGE
+ && vr1type == VR_ANTI_RANGE)
+ {
+ if (TREE_CODE (*vr0max) == INTEGER_CST)
+ {
+ *vr0type = vr1type;
+ *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
+ build_int_cst (TREE_TYPE (*vr0max), 1));
+ *vr0max = vr1max;
+ }
+ else
+ goto give_up;
+ }
+ else
+ gcc_unreachable ();
}
-
- return false;
-}
-
-/* If the operand to an ABS_EXPR is >= 0, then eliminate the
- ABS_EXPR. If the operand is <= 0, then simplify the
- ABS_EXPR into a NEGATE_EXPR. */
-
-bool
-vr_values::simplify_abs_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt)
-{
- tree op = gimple_assign_rhs1 (stmt);
- value_range *vr = get_value_range (op);
-
- if (vr)
+ else if ((operand_less_p (*vr0min, vr1max) == 1
+ || operand_equal_p (*vr0min, vr1max, 0))
+ && operand_less_p (vr1min, *vr0min) == 1
+ && operand_less_p (vr1max, *vr0max) == 1)
{
- tree val = NULL;
- bool sop = false;
-
- val = compare_range_with_value (LE_EXPR, vr, integer_zero_node, &sop);
- if (!val)
+ /* ( [ ) ] or ( )[ ] */
+ if (*vr0type == VR_RANGE
+ && vr1type == VR_RANGE)
+ *vr0min = vr1min;
+ else if (*vr0type == VR_ANTI_RANGE
+ && vr1type == VR_ANTI_RANGE)
+ *vr0max = vr1max;
+ else if (*vr0type == VR_ANTI_RANGE
+ && vr1type == VR_RANGE)
{
- /* The range is neither <= 0 nor > 0. Now see if it is
- either < 0 or >= 0. */
- sop = false;
- val = compare_range_with_value (LT_EXPR, vr, integer_zero_node,
- &sop);
+ if (TREE_CODE (vr1max) == INTEGER_CST)
+ *vr0min = int_const_binop (PLUS_EXPR, vr1max,
+ build_int_cst (TREE_TYPE (vr1max), 1));
+ else
+ goto give_up;
}
-
- if (val)
+ else if (*vr0type == VR_RANGE
+ && vr1type == VR_ANTI_RANGE)
{
- if (sop && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
+ if (TREE_CODE (*vr0min) == INTEGER_CST)
{
- location_t location;
-
- if (!gimple_has_location (stmt))
- location = input_location;
- else
- location = gimple_location (stmt);
- warning_at (location, OPT_Wstrict_overflow,
- "assuming signed overflow does not occur when "
- "simplifying %<abs (X)%> to %<X%> or %<-X%>");
+ *vr0type = vr1type;
+ *vr0min = vr1min;
+ *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
+ build_int_cst (TREE_TYPE (*vr0min), 1));
}
-
- gimple_assign_set_rhs1 (stmt, op);
- if (integer_zerop (val))
- gimple_assign_set_rhs_code (stmt, SSA_NAME);
else
- gimple_assign_set_rhs_code (stmt, NEGATE_EXPR);
- update_stmt (stmt);
- fold_stmt (gsi, follow_single_use_edges);
- return true;
+ goto give_up;
}
+ else
+ gcc_unreachable ();
}
+ else
+ goto give_up;
- return false;
+ return;
+
+give_up:
+ *vr0type = VR_VARYING;
+ *vr0min = NULL_TREE;
+ *vr0max = NULL_TREE;
}
-/* Optimize away redundant BIT_AND_EXPR and BIT_IOR_EXPR.
- If all the bits that are being cleared by & are already
- known to be zero from VR, or all the bits that are being
- set by | are already known to be one from VR, the bit
- operation is redundant. */
+/* Intersect the two value-ranges { *VR0TYPE, *VR0MIN, *VR0MAX } and
+ { VR1TYPE, VR0MIN, VR0MAX } and store the result
+ in { *VR0TYPE, *VR0MIN, *VR0MAX }. This may not be the smallest
+ possible such range. The resulting range is not canonicalized. */
-bool
-vr_values::simplify_bit_ops_using_ranges (gimple_stmt_iterator *gsi,
- gimple *stmt)
+static void
+intersect_ranges (enum value_range_type *vr0type,
+ tree *vr0min, tree *vr0max,
+ enum value_range_type vr1type,
+ tree vr1min, tree vr1max)
{
- tree op0 = gimple_assign_rhs1 (stmt);
- tree op1 = gimple_assign_rhs2 (stmt);
- tree op = NULL_TREE;
- value_range vr0 = VR_INITIALIZER;
- value_range vr1 = VR_INITIALIZER;
- wide_int may_be_nonzero0, may_be_nonzero1;
- wide_int must_be_nonzero0, must_be_nonzero1;
- wide_int mask;
-
- if (TREE_CODE (op0) == SSA_NAME)
- vr0 = *(get_value_range (op0));
- else if (is_gimple_min_invariant (op0))
- set_value_range_to_value (&vr0, op0, NULL);
- else
- return false;
-
- if (TREE_CODE (op1) == SSA_NAME)
- vr1 = *(get_value_range (op1));
- else if (is_gimple_min_invariant (op1))
- set_value_range_to_value (&vr1, op1, NULL);
- else
- return false;
-
- if (!zero_nonzero_bits_from_vr (TREE_TYPE (op0), &vr0, &may_be_nonzero0,
- &must_be_nonzero0))
- return false;
- if (!zero_nonzero_bits_from_vr (TREE_TYPE (op1), &vr1, &may_be_nonzero1,
- &must_be_nonzero1))
- return false;
+ bool mineq = vrp_operand_equal_p (*vr0min, vr1min);
+ bool maxeq = vrp_operand_equal_p (*vr0max, vr1max);
- switch (gimple_assign_rhs_code (stmt))
+ /* [] is vr0, () is vr1 in the following classification comments. */
+ if (mineq && maxeq)
{
- case BIT_AND_EXPR:
- mask = wi::bit_and_not (may_be_nonzero0, must_be_nonzero1);
- if (mask == 0)
- {
- op = op0;
- break;
- }
- mask = wi::bit_and_not (may_be_nonzero1, must_be_nonzero0);
- if (mask == 0)
+ /* [( )] */
+ if (*vr0type == vr1type)
+ /* Nothing to do for equal ranges. */
+ ;
+ else if ((*vr0type == VR_RANGE
+ && vr1type == VR_ANTI_RANGE)
+ || (*vr0type == VR_ANTI_RANGE
+ && vr1type == VR_RANGE))
{
- op = op1;
- break;
+ /* For anti-range with range intersection the result is empty. */
+ *vr0type = VR_UNDEFINED;
+ *vr0min = NULL_TREE;
+ *vr0max = NULL_TREE;
}
- break;
- case BIT_IOR_EXPR:
- mask = wi::bit_and_not (may_be_nonzero0, must_be_nonzero1);
- if (mask == 0)
+ else
+ gcc_unreachable ();
+ }
+ else if (operand_less_p (*vr0max, vr1min) == 1
+ || operand_less_p (vr1max, *vr0min) == 1)
+ {
+ /* [ ] ( ) or ( ) [ ]
+ If the ranges have an empty intersection, the result of the
+ intersect operation is the range for intersecting an
+ anti-range with a range or empty when intersecting two ranges. */
+ if (*vr0type == VR_RANGE
+ && vr1type == VR_ANTI_RANGE)
+ ;
+ else if (*vr0type == VR_ANTI_RANGE
+ && vr1type == VR_RANGE)
{
- op = op1;
- break;
+ *vr0type = vr1type;
+ *vr0min = vr1min;
+ *vr0max = vr1max;
}
- mask = wi::bit_and_not (may_be_nonzero1, must_be_nonzero0);
- if (mask == 0)
+ else if (*vr0type == VR_RANGE
+ && vr1type == VR_RANGE)
{
- op = op0;
- break;
+ *vr0type = VR_UNDEFINED;
+ *vr0min = NULL_TREE;
+ *vr0max = NULL_TREE;
}
- break;
- default:
- gcc_unreachable ();
- }
-
- if (op == NULL_TREE)
- return false;
-
- gimple_assign_set_rhs_with_ops (gsi, TREE_CODE (op), op);
- update_stmt (gsi_stmt (*gsi));
- return true;
-}
-
-/* We are comparing trees OP0 and OP1 using COND_CODE. OP0 has
- a known value range VR.
-
- If there is one and only one value which will satisfy the
- conditional, then return that value. Else return NULL.
-
- If signed overflow must be undefined for the value to satisfy
- the conditional, then set *STRICT_OVERFLOW_P to true. */
-
-static tree
-test_for_singularity (enum tree_code cond_code, tree op0,
- tree op1, value_range *vr)
-{
- tree min = NULL;
- tree max = NULL;
-
- /* Extract minimum/maximum values which satisfy the conditional as it was
- written. */
- if (cond_code == LE_EXPR || cond_code == LT_EXPR)
- {
- min = TYPE_MIN_VALUE (TREE_TYPE (op0));
-
- max = op1;
- if (cond_code == LT_EXPR)
+ else if (*vr0type == VR_ANTI_RANGE
+ && vr1type == VR_ANTI_RANGE)
{
- tree one = build_int_cst (TREE_TYPE (op0), 1);
- max = fold_build2 (MINUS_EXPR, TREE_TYPE (op0), max, one);
- /* Signal to compare_values_warnv this expr doesn't overflow. */
- if (EXPR_P (max))
- TREE_NO_WARNING (max) = 1;
+ /* If the anti-ranges are adjacent to each other merge them. */
+ if (TREE_CODE (*vr0max) == INTEGER_CST
+ && TREE_CODE (vr1min) == INTEGER_CST
+ && operand_less_p (*vr0max, vr1min) == 1
+ && integer_onep (int_const_binop (MINUS_EXPR,
+ vr1min, *vr0max)))
+ *vr0max = vr1max;
+ else if (TREE_CODE (vr1max) == INTEGER_CST
+ && TREE_CODE (*vr0min) == INTEGER_CST
+ && operand_less_p (vr1max, *vr0min) == 1
+ && integer_onep (int_const_binop (MINUS_EXPR,
+ *vr0min, vr1max)))
+ *vr0min = vr1min;
+ /* Else arbitrarily take VR0. */
}
}
- else if (cond_code == GE_EXPR || cond_code == GT_EXPR)
+ else if ((maxeq || operand_less_p (vr1max, *vr0max) == 1)
+ && (mineq || operand_less_p (*vr0min, vr1min) == 1))
{
- max = TYPE_MAX_VALUE (TREE_TYPE (op0));
-
- min = op1;
- if (cond_code == GT_EXPR)
+ /* [ ( ) ] or [( ) ] or [ ( )] */
+ if (*vr0type == VR_RANGE
+ && vr1type == VR_RANGE)
{
- tree one = build_int_cst (TREE_TYPE (op0), 1);
- min = fold_build2 (PLUS_EXPR, TREE_TYPE (op0), min, one);
- /* Signal to compare_values_warnv this expr doesn't overflow. */
- if (EXPR_P (min))
- TREE_NO_WARNING (min) = 1;
+ /* If both are ranges the result is the inner one. */
+ *vr0type = vr1type;
+ *vr0min = vr1min;
+ *vr0max = vr1max;
}
- }
-
- /* Now refine the minimum and maximum values using any
- value range information we have for op0. */
- if (min && max)
- {
- if (compare_values (vr->min, min) == 1)
- min = vr->min;
- if (compare_values (vr->max, max) == -1)
- max = vr->max;
-
- /* If the new min/max values have converged to a single value,
- then there is only one value which can satisfy the condition,
- return that value. */
- if (operand_equal_p (min, max, 0) && is_gimple_min_invariant (min))
- return min;
- }
- return NULL;
-}
-
-/* Return whether the value range *VR fits in an integer type specified
- by PRECISION and UNSIGNED_P. */
-
-static bool
-range_fits_type_p (value_range *vr, unsigned dest_precision, signop dest_sgn)
-{
- tree src_type;
- unsigned src_precision;
- widest_int tem;
- signop src_sgn;
-
- /* We can only handle integral and pointer types. */
- src_type = TREE_TYPE (vr->min);
- if (!INTEGRAL_TYPE_P (src_type)
- && !POINTER_TYPE_P (src_type))
- return false;
-
- /* An extension is fine unless VR is SIGNED and dest_sgn is UNSIGNED,
- and so is an identity transform. */
- src_precision = TYPE_PRECISION (TREE_TYPE (vr->min));
- src_sgn = TYPE_SIGN (src_type);
- if ((src_precision < dest_precision
- && !(dest_sgn == UNSIGNED && src_sgn == SIGNED))
- || (src_precision == dest_precision && src_sgn == dest_sgn))
- return true;
-
- /* Now we can only handle ranges with constant bounds. */
- if (vr->type != VR_RANGE
- || TREE_CODE (vr->min) != INTEGER_CST
- || TREE_CODE (vr->max) != INTEGER_CST)
- return false;
-
- /* For sign changes, the MSB of the wide_int has to be clear.
- An unsigned value with its MSB set cannot be represented by
- a signed wide_int, while a negative value cannot be represented
- by an unsigned wide_int. */
- if (src_sgn != dest_sgn
- && (wi::lts_p (wi::to_wide (vr->min), 0)
- || wi::lts_p (wi::to_wide (vr->max), 0)))
- return false;
-
- /* Then we can perform the conversion on both ends and compare
- the result for equality. */
- tem = wi::ext (wi::to_widest (vr->min), dest_precision, dest_sgn);
- if (tem != wi::to_widest (vr->min))
- return false;
- tem = wi::ext (wi::to_widest (vr->max), dest_precision, dest_sgn);
- if (tem != wi::to_widest (vr->max))
- return false;
-
- return true;
-}
-
-/* Simplify a conditional using a relational operator to an equality
- test if the range information indicates only one value can satisfy
- the original conditional. */
-
-bool
-vr_values::simplify_cond_using_ranges_1 (gcond *stmt)
-{
- tree op0 = gimple_cond_lhs (stmt);
- tree op1 = gimple_cond_rhs (stmt);
- enum tree_code cond_code = gimple_cond_code (stmt);
-
- if (cond_code != NE_EXPR
- && cond_code != EQ_EXPR
- && TREE_CODE (op0) == SSA_NAME
- && INTEGRAL_TYPE_P (TREE_TYPE (op0))
- && is_gimple_min_invariant (op1))
- {
- value_range *vr = get_value_range (op0);
-
- /* If we have range information for OP0, then we might be
- able to simplify this conditional. */
- if (vr->type == VR_RANGE)
+ else if (*vr0type == VR_RANGE
+ && vr1type == VR_ANTI_RANGE)
{
- tree new_tree = test_for_singularity (cond_code, op0, op1, vr);
- if (new_tree)
+ /* Choose the right gap if the left one is empty. */
+ if (mineq)
{
- if (dump_file)
- {
- fprintf (dump_file, "Simplified relational ");
- print_gimple_stmt (dump_file, stmt, 0);
- fprintf (dump_file, " into ");
- }
-
- gimple_cond_set_code (stmt, EQ_EXPR);
- gimple_cond_set_lhs (stmt, op0);
- gimple_cond_set_rhs (stmt, new_tree);
-
- update_stmt (stmt);
-
- if (dump_file)
- {
- print_gimple_stmt (dump_file, stmt, 0);
- fprintf (dump_file, "\n");
- }
-
- return true;
+ if (TREE_CODE (vr1max) != INTEGER_CST)
+ *vr0min = vr1max;
+ else if (TYPE_PRECISION (TREE_TYPE (vr1max)) == 1
+ && !TYPE_UNSIGNED (TREE_TYPE (vr1max)))
+ *vr0min
+ = int_const_binop (MINUS_EXPR, vr1max,
+ build_int_cst (TREE_TYPE (vr1max), -1));
+ else
+ *vr0min
+ = int_const_binop (PLUS_EXPR, vr1max,
+ build_int_cst (TREE_TYPE (vr1max), 1));
+ }
+ /* Choose the left gap if the right one is empty. */
+ else if (maxeq)
+ {
+ if (TREE_CODE (vr1min) != INTEGER_CST)
+ *vr0max = vr1min;
+ else if (TYPE_PRECISION (TREE_TYPE (vr1min)) == 1
+ && !TYPE_UNSIGNED (TREE_TYPE (vr1min)))
+ *vr0max
+ = int_const_binop (PLUS_EXPR, vr1min,
+ build_int_cst (TREE_TYPE (vr1min), -1));
+ else
+ *vr0max
+ = int_const_binop (MINUS_EXPR, vr1min,
+ build_int_cst (TREE_TYPE (vr1min), 1));
}
-
- /* Try again after inverting the condition. We only deal
- with integral types here, so no need to worry about
- issues with inverting FP comparisons. */
- new_tree = test_for_singularity
- (invert_tree_comparison (cond_code, false),
- op0, op1, vr);
- if (new_tree)
+ /* Choose the anti-range if the range is effectively varying. */
+ else if (vrp_val_is_min (*vr0min)
+ && vrp_val_is_max (*vr0max))
{
- if (dump_file)
- {
- fprintf (dump_file, "Simplified relational ");
- print_gimple_stmt (dump_file, stmt, 0);
- fprintf (dump_file, " into ");
- }
-
- gimple_cond_set_code (stmt, NE_EXPR);
- gimple_cond_set_lhs (stmt, op0);
- gimple_cond_set_rhs (stmt, new_tree);
-
- update_stmt (stmt);
-
- if (dump_file)
- {
- print_gimple_stmt (dump_file, stmt, 0);
- fprintf (dump_file, "\n");
- }
-
- return true;
+ *vr0type = vr1type;
+ *vr0min = vr1min;
+ *vr0max = vr1max;
}
+ /* Else choose the range. */
+ }
+ else if (*vr0type == VR_ANTI_RANGE
+ && vr1type == VR_ANTI_RANGE)
+ /* If both are anti-ranges the result is the outer one. */
+ ;
+ else if (*vr0type == VR_ANTI_RANGE
+ && vr1type == VR_RANGE)
+ {
+ /* The intersection is empty. */
+ *vr0type = VR_UNDEFINED;
+ *vr0min = NULL_TREE;
+ *vr0max = NULL_TREE;
}
+ else
+ gcc_unreachable ();
}
- return false;
-}
-
-/* STMT is a conditional at the end of a basic block.
-
- If the conditional is of the form SSA_NAME op constant and the SSA_NAME
- was set via a type conversion, try to replace the SSA_NAME with the RHS
- of the type conversion. Doing so makes the conversion dead which helps
- subsequent passes. */
-
-void
-vr_values::simplify_cond_using_ranges_2 (gcond *stmt)
-{
- tree op0 = gimple_cond_lhs (stmt);
- tree op1 = gimple_cond_rhs (stmt);
-
- /* If we have a comparison of an SSA_NAME (OP0) against a constant,
- see if OP0 was set by a type conversion where the source of
- the conversion is another SSA_NAME with a range that fits
- into the range of OP0's type.
-
- If so, the conversion is redundant as the earlier SSA_NAME can be
- used for the comparison directly if we just massage the constant in the
- comparison. */
- if (TREE_CODE (op0) == SSA_NAME
- && TREE_CODE (op1) == INTEGER_CST)
+ else if ((maxeq || operand_less_p (*vr0max, vr1max) == 1)
+ && (mineq || operand_less_p (vr1min, *vr0min) == 1))
{
- gimple *def_stmt = SSA_NAME_DEF_STMT (op0);
- tree innerop;
-
- if (!is_gimple_assign (def_stmt)
- || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)))
- return;
-
- innerop = gimple_assign_rhs1 (def_stmt);
-
- if (TREE_CODE (innerop) == SSA_NAME
- && !POINTER_TYPE_P (TREE_TYPE (innerop))
- && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (innerop)
- && desired_pro_or_demotion_p (TREE_TYPE (innerop), TREE_TYPE (op0)))
+ /* ( [ ] ) or ([ ] ) or ( [ ]) */
+ if (*vr0type == VR_RANGE
+ && vr1type == VR_RANGE)
+ /* Choose the inner range. */
+ ;
+ else if (*vr0type == VR_ANTI_RANGE
+ && vr1type == VR_RANGE)
{
- value_range *vr = get_value_range (innerop);
-
- if (range_int_cst_p (vr)
- && range_fits_type_p (vr,
- TYPE_PRECISION (TREE_TYPE (op0)),
- TYPE_SIGN (TREE_TYPE (op0)))
- && int_fits_type_p (op1, TREE_TYPE (innerop)))
+ /* Choose the right gap if the left is empty. */
+ if (mineq)
{
- tree newconst = fold_convert (TREE_TYPE (innerop), op1);
- gimple_cond_set_lhs (stmt, innerop);
- gimple_cond_set_rhs (stmt, newconst);
- update_stmt (stmt);
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, "Folded into: ");
- print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
- fprintf (dump_file, "\n");
- }
+ *vr0type = VR_RANGE;
+ if (TREE_CODE (*vr0max) != INTEGER_CST)
+ *vr0min = *vr0max;
+ else if (TYPE_PRECISION (TREE_TYPE (*vr0max)) == 1
+ && !TYPE_UNSIGNED (TREE_TYPE (*vr0max)))
+ *vr0min
+ = int_const_binop (MINUS_EXPR, *vr0max,
+ build_int_cst (TREE_TYPE (*vr0max), -1));
+ else
+ *vr0min
+ = int_const_binop (PLUS_EXPR, *vr0max,
+ build_int_cst (TREE_TYPE (*vr0max), 1));
+ *vr0max = vr1max;
+ }
+ /* Choose the left gap if the right is empty. */
+ else if (maxeq)
+ {
+ *vr0type = VR_RANGE;
+ if (TREE_CODE (*vr0min) != INTEGER_CST)
+ *vr0max = *vr0min;
+ else if (TYPE_PRECISION (TREE_TYPE (*vr0min)) == 1
+ && !TYPE_UNSIGNED (TREE_TYPE (*vr0min)))
+ *vr0max
+ = int_const_binop (PLUS_EXPR, *vr0min,
+ build_int_cst (TREE_TYPE (*vr0min), -1));
+ else
+ *vr0max
+ = int_const_binop (MINUS_EXPR, *vr0min,
+ build_int_cst (TREE_TYPE (*vr0min), 1));
+ *vr0min = vr1min;
+ }
+ /* Choose the anti-range if the range is effectively varying. */
+ else if (vrp_val_is_min (vr1min)
+ && vrp_val_is_max (vr1max))
+ ;
+ /* Choose the anti-range if it is ~[0,0], that range is special
+ enough to special case when vr1's range is relatively wide. */
+ else if (*vr0min == *vr0max
+ && integer_zerop (*vr0min)
+ && (TYPE_PRECISION (TREE_TYPE (*vr0min))
+ == TYPE_PRECISION (ptr_type_node))
+ && TREE_CODE (vr1max) == INTEGER_CST
+ && TREE_CODE (vr1min) == INTEGER_CST
+ && (wi::clz (wi::to_wide (vr1max) - wi::to_wide (vr1min))
+ < TYPE_PRECISION (TREE_TYPE (*vr0min)) / 2))
+ ;
+ /* Else choose the range. */
+ else
+ {
+ *vr0type = vr1type;
+ *vr0min = vr1min;
+ *vr0max = vr1max;
}
}
+ else if (*vr0type == VR_ANTI_RANGE
+ && vr1type == VR_ANTI_RANGE)
+ {
+ /* If both are anti-ranges the result is the outer one. */
+ *vr0type = vr1type;
+ *vr0min = vr1min;
+ *vr0max = vr1max;
+ }
+ else if (vr1type == VR_ANTI_RANGE
+ && *vr0type == VR_RANGE)
+ {
+ /* The intersection is empty. */
+ *vr0type = VR_UNDEFINED;
+ *vr0min = NULL_TREE;
+ *vr0max = NULL_TREE;
+ }
+ else
+ gcc_unreachable ();
}
-}
-
-/* Simplify a switch statement using the value range of the switch
- argument. */
-
-bool
-vr_values::simplify_switch_using_ranges (gswitch *stmt)
-{
- tree op = gimple_switch_index (stmt);
- value_range *vr = NULL;
- bool take_default;
- edge e;
- edge_iterator ei;
- size_t i = 0, j = 0, n, n2;
- tree vec2;
- switch_update su;
- size_t k = 1, l = 0;
-
- if (TREE_CODE (op) == SSA_NAME)
- {
- vr = get_value_range (op);
-
- /* We can only handle integer ranges. */
- if ((vr->type != VR_RANGE
- && vr->type != VR_ANTI_RANGE)
- || symbolic_range_p (vr))
- return false;
-
- /* Find case label for min/max of the value range. */
- take_default = !find_case_label_ranges (stmt, vr, &i, &j, &k, &l);
- }
- else if (TREE_CODE (op) == INTEGER_CST)
+ else if ((operand_less_p (vr1min, *vr0max) == 1
+ || operand_equal_p (vr1min, *vr0max, 0))
+ && operand_less_p (*vr0min, vr1min) == 1)
{
- take_default = !find_case_label_index (stmt, 1, op, &i);
- if (take_default)
+ /* [ ( ] ) or [ ]( ) */
+ if (*vr0type == VR_ANTI_RANGE
+ && vr1type == VR_ANTI_RANGE)
+ *vr0max = vr1max;
+ else if (*vr0type == VR_RANGE
+ && vr1type == VR_RANGE)
+ *vr0min = vr1min;
+ else if (*vr0type == VR_RANGE
+ && vr1type == VR_ANTI_RANGE)
{
- i = 1;
- j = 0;
+ if (TREE_CODE (vr1min) == INTEGER_CST)
+ *vr0max = int_const_binop (MINUS_EXPR, vr1min,
+ build_int_cst (TREE_TYPE (vr1min), 1));
+ else
+ *vr0max = vr1min;
}
- else
+ else if (*vr0type == VR_ANTI_RANGE
+ && vr1type == VR_RANGE)
{
- j = i;
+ *vr0type = VR_RANGE;
+ if (TREE_CODE (*vr0max) == INTEGER_CST)
+ *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
+ build_int_cst (TREE_TYPE (*vr0max), 1));
+ else
+ *vr0min = *vr0max;
+ *vr0max = vr1max;
}
+ else
+ gcc_unreachable ();
}
- else
- return false;
-
- n = gimple_switch_num_labels (stmt);
-
- /* We can truncate the case label ranges that partially overlap with OP's
- value range. */
- size_t min_idx = 1, max_idx = 0;
- if (vr != NULL)
- find_case_label_range (stmt, vr->min, vr->max, &min_idx, &max_idx);
- if (min_idx <= max_idx)
+ else if ((operand_less_p (*vr0min, vr1max) == 1
+ || operand_equal_p (*vr0min, vr1max, 0))
+ && operand_less_p (vr1min, *vr0min) == 1)
{
- tree min_label = gimple_switch_label (stmt, min_idx);
- tree max_label = gimple_switch_label (stmt, max_idx);
-
- /* Avoid changing the type of the case labels when truncating. */
- tree case_label_type = TREE_TYPE (CASE_LOW (min_label));
- tree vr_min = fold_convert (case_label_type, vr->min);
- tree vr_max = fold_convert (case_label_type, vr->max);
-
- if (vr->type == VR_RANGE)
+ /* ( [ ) ] or ( )[ ] */
+ if (*vr0type == VR_ANTI_RANGE
+ && vr1type == VR_ANTI_RANGE)
+ *vr0min = vr1min;
+ else if (*vr0type == VR_RANGE
+ && vr1type == VR_RANGE)
+ *vr0max = vr1max;
+ else if (*vr0type == VR_RANGE
+ && vr1type == VR_ANTI_RANGE)
{
- /* If OP's value range is [2,8] and the low label range is
- 0 ... 3, truncate the label's range to 2 .. 3. */
- if (tree_int_cst_compare (CASE_LOW (min_label), vr_min) < 0
- && CASE_HIGH (min_label) != NULL_TREE
- && tree_int_cst_compare (CASE_HIGH (min_label), vr_min) >= 0)
- CASE_LOW (min_label) = vr_min;
-
- /* If OP's value range is [2,8] and the high label range is
- 7 ... 10, truncate the label's range to 7 .. 8. */
- if (tree_int_cst_compare (CASE_LOW (max_label), vr_max) <= 0
- && CASE_HIGH (max_label) != NULL_TREE
- && tree_int_cst_compare (CASE_HIGH (max_label), vr_max) > 0)
- CASE_HIGH (max_label) = vr_max;
+ if (TREE_CODE (vr1max) == INTEGER_CST)
+ *vr0min = int_const_binop (PLUS_EXPR, vr1max,
+ build_int_cst (TREE_TYPE (vr1max), 1));
+ else
+ *vr0min = vr1max;
}
- else if (vr->type == VR_ANTI_RANGE)
+ else if (*vr0type == VR_ANTI_RANGE
+ && vr1type == VR_RANGE)
{
- tree one_cst = build_one_cst (case_label_type);
-
- if (min_label == max_label)
- {
- /* If OP's value range is ~[7,8] and the label's range is
- 7 ... 10, truncate the label's range to 9 ... 10. */
- if (tree_int_cst_compare (CASE_LOW (min_label), vr_min) == 0
- && CASE_HIGH (min_label) != NULL_TREE
- && tree_int_cst_compare (CASE_HIGH (min_label), vr_max) > 0)
- CASE_LOW (min_label)
- = int_const_binop (PLUS_EXPR, vr_max, one_cst);
-
- /* If OP's value range is ~[7,8] and the label's range is
- 5 ... 8, truncate the label's range to 5 ... 6. */
- if (tree_int_cst_compare (CASE_LOW (min_label), vr_min) < 0
- && CASE_HIGH (min_label) != NULL_TREE
- && tree_int_cst_compare (CASE_HIGH (min_label), vr_max) == 0)
- CASE_HIGH (min_label)
- = int_const_binop (MINUS_EXPR, vr_min, one_cst);
- }
+ *vr0type = VR_RANGE;
+ if (TREE_CODE (*vr0min) == INTEGER_CST)
+ *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
+ build_int_cst (TREE_TYPE (*vr0min), 1));
else
- {
- /* If OP's value range is ~[2,8] and the low label range is
- 0 ... 3, truncate the label's range to 0 ... 1. */
- if (tree_int_cst_compare (CASE_LOW (min_label), vr_min) < 0
- && CASE_HIGH (min_label) != NULL_TREE
- && tree_int_cst_compare (CASE_HIGH (min_label), vr_min) >= 0)
- CASE_HIGH (min_label)
- = int_const_binop (MINUS_EXPR, vr_min, one_cst);
-
- /* If OP's value range is ~[2,8] and the high label range is
- 7 ... 10, truncate the label's range to 9 ... 10. */
- if (tree_int_cst_compare (CASE_LOW (max_label), vr_max) <= 0
- && CASE_HIGH (max_label) != NULL_TREE
- && tree_int_cst_compare (CASE_HIGH (max_label), vr_max) > 0)
- CASE_LOW (max_label)
- = int_const_binop (PLUS_EXPR, vr_max, one_cst);
- }
+ *vr0max = *vr0min;
+ *vr0min = vr1min;
}
-
- /* Canonicalize singleton case ranges. */
- if (tree_int_cst_equal (CASE_LOW (min_label), CASE_HIGH (min_label)))
- CASE_HIGH (min_label) = NULL_TREE;
- if (tree_int_cst_equal (CASE_LOW (max_label), CASE_HIGH (max_label)))
- CASE_HIGH (max_label) = NULL_TREE;
+ else
+ gcc_unreachable ();
}
- /* We can also eliminate case labels that lie completely outside OP's value
- range. */
-
- /* Bail out if this is just all edges taken. */
- if (i == 1
- && j == n - 1
- && take_default)
- return false;
+ /* As a fallback simply use { *VRTYPE, *VR0MIN, *VR0MAX } as
+ result for the intersection. That's always a conservative
+ correct estimate unless VR1 is a constant singleton range
+ in which case we choose that. */
+ if (vr1type == VR_RANGE
+ && is_gimple_min_invariant (vr1min)
+ && vrp_operand_equal_p (vr1min, vr1max))
+ {
+ *vr0type = vr1type;
+ *vr0min = vr1min;
+ *vr0max = vr1max;
+ }
- /* Build a new vector of taken case labels. */
- vec2 = make_tree_vec (j - i + 1 + l - k + 1 + (int)take_default);
- n2 = 0;
+ return;
+}
- /* Add the default edge, if necessary. */
- if (take_default)
- TREE_VEC_ELT (vec2, n2++) = gimple_switch_default_label (stmt);
- for (; i <= j; ++i, ++n2)
- TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, i);
+/* Intersect the two value-ranges *VR0 and *VR1 and store the result
+ in *VR0. This may not be the smallest possible such range. */
- for (; k <= l; ++k, ++n2)
- TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, k);
+static void
+vrp_intersect_ranges_1 (value_range *vr0, value_range *vr1)
+{
+ value_range saved;
- /* Mark needed edges. */
- for (i = 0; i < n2; ++i)
+ /* If either range is VR_VARYING the other one wins. */
+ if (vr1->type == VR_VARYING)
+ return;
+ if (vr0->type == VR_VARYING)
{
- e = find_edge (gimple_bb (stmt),
- label_to_block (CASE_LABEL (TREE_VEC_ELT (vec2, i))));
- e->aux = (void *)-1;
+ copy_value_range (vr0, vr1);
+ return;
}
- /* Queue not needed edges for later removal. */
- FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
+ /* When either range is VR_UNDEFINED the resulting range is
+ VR_UNDEFINED, too. */
+ if (vr0->type == VR_UNDEFINED)
+ return;
+ if (vr1->type == VR_UNDEFINED)
{
- if (e->aux == (void *)-1)
- {
- e->aux = NULL;
- continue;
- }
+ set_value_range_to_undefined (vr0);
+ return;
+ }
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
- fprintf (dump_file, "removing unreachable case label\n");
- }
- to_remove_edges.safe_push (e);
- e->flags &= ~EDGE_EXECUTABLE;
+ /* Save the original vr0 so we can return it as conservative intersection
+ result when our worker turns things to varying. */
+ saved = *vr0;
+ intersect_ranges (&vr0->type, &vr0->min, &vr0->max,
+ vr1->type, vr1->min, vr1->max);
+ /* Make sure to canonicalize the result though as the inversion of a
+ VR_RANGE can still be a VR_RANGE. */
+ set_and_canonicalize_value_range (vr0, vr0->type,
+ vr0->min, vr0->max, vr0->equiv);
+ /* If that failed, use the saved original VR0. */
+ if (vr0->type == VR_VARYING)
+ {
+ *vr0 = saved;
+ return;
}
+ /* If the result is VR_UNDEFINED there is no need to mess with
+ the equivalencies. */
+ if (vr0->type == VR_UNDEFINED)
+ return;
- /* And queue an update for the stmt. */
- su.stmt = stmt;
- su.vec = vec2;
- to_update_switch_stmts.safe_push (su);
- return false;
+ /* The resulting set of equivalences for range intersection is the union of
+ the two sets. */
+ if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
+ bitmap_ior_into (vr0->equiv, vr1->equiv);
+ else if (vr1->equiv && !vr0->equiv)
+ {
+ /* All equivalence bitmaps are allocated from the same obstack. So
+ we can use the obstack associated with VR to allocate vr0->equiv. */
+ vr0->equiv = BITMAP_ALLOC (vr1->equiv->obstack);
+ bitmap_copy (vr0->equiv, vr1->equiv);
+ }
}
-/* Simplify an integral conversion from an SSA name in STMT. */
-
-static bool
-simplify_conversion_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt)
+void
+vrp_intersect_ranges (value_range *vr0, value_range *vr1)
{
- tree innerop, middleop, finaltype;
- gimple *def_stmt;
- signop inner_sgn, middle_sgn, final_sgn;
- unsigned inner_prec, middle_prec, final_prec;
- widest_int innermin, innermed, innermax, middlemin, middlemed, middlemax;
-
- finaltype = TREE_TYPE (gimple_assign_lhs (stmt));
- if (!INTEGRAL_TYPE_P (finaltype))
- return false;
- middleop = gimple_assign_rhs1 (stmt);
- def_stmt = SSA_NAME_DEF_STMT (middleop);
- if (!is_gimple_assign (def_stmt)
- || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)))
- return false;
- innerop = gimple_assign_rhs1 (def_stmt);
- if (TREE_CODE (innerop) != SSA_NAME
- || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (innerop))
- return false;
-
- /* Get the value-range of the inner operand. Use get_range_info in
- case innerop was created during substitute-and-fold. */
- wide_int imin, imax;
- if (!INTEGRAL_TYPE_P (TREE_TYPE (innerop))
- || get_range_info (innerop, &imin, &imax) != VR_RANGE)
- return false;
- innermin = widest_int::from (imin, TYPE_SIGN (TREE_TYPE (innerop)));
- innermax = widest_int::from (imax, TYPE_SIGN (TREE_TYPE (innerop)));
-
- /* Simulate the conversion chain to check if the result is equal if
- the middle conversion is removed. */
- inner_prec = TYPE_PRECISION (TREE_TYPE (innerop));
- middle_prec = TYPE_PRECISION (TREE_TYPE (middleop));
- final_prec = TYPE_PRECISION (finaltype);
-
- /* If the first conversion is not injective, the second must not
- be widening. */
- if (wi::gtu_p (innermax - innermin,
- wi::mask <widest_int> (middle_prec, false))
- && middle_prec < final_prec)
- return false;
- /* We also want a medium value so that we can track the effect that
- narrowing conversions with sign change have. */
- inner_sgn = TYPE_SIGN (TREE_TYPE (innerop));
- if (inner_sgn == UNSIGNED)
- innermed = wi::shifted_mask <widest_int> (1, inner_prec - 1, false);
- else
- innermed = 0;
- if (wi::cmp (innermin, innermed, inner_sgn) >= 0
- || wi::cmp (innermed, innermax, inner_sgn) >= 0)
- innermed = innermin;
-
- middle_sgn = TYPE_SIGN (TREE_TYPE (middleop));
- middlemin = wi::ext (innermin, middle_prec, middle_sgn);
- middlemed = wi::ext (innermed, middle_prec, middle_sgn);
- middlemax = wi::ext (innermax, middle_prec, middle_sgn);
-
- /* Require that the final conversion applied to both the original
- and the intermediate range produces the same result. */
- final_sgn = TYPE_SIGN (finaltype);
- if (wi::ext (middlemin, final_prec, final_sgn)
- != wi::ext (innermin, final_prec, final_sgn)
- || wi::ext (middlemed, final_prec, final_sgn)
- != wi::ext (innermed, final_prec, final_sgn)
- || wi::ext (middlemax, final_prec, final_sgn)
- != wi::ext (innermax, final_prec, final_sgn))
- return false;
-
- gimple_assign_set_rhs1 (stmt, innerop);
- fold_stmt (gsi, follow_single_use_edges);
- return true;
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "Intersecting\n ");
+ dump_value_range (dump_file, vr0);
+ fprintf (dump_file, "\nand\n ");
+ dump_value_range (dump_file, vr1);
+ fprintf (dump_file, "\n");
+ }
+ vrp_intersect_ranges_1 (vr0, vr1);
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "to\n ");
+ dump_value_range (dump_file, vr0);
+ fprintf (dump_file, "\n");
+ }
}
-/* Simplify a conversion from integral SSA name to float in STMT. */
+/* Meet operation for value ranges. Given two value ranges VR0 and
+ VR1, store in VR0 a range that contains both VR0 and VR1. This
+ may not be the smallest possible such range. */
-bool
-vr_values::simplify_float_conversion_using_ranges (gimple_stmt_iterator *gsi,
- gimple *stmt)
+static void
+vrp_meet_1 (value_range *vr0, const value_range *vr1)
{
- tree rhs1 = gimple_assign_rhs1 (stmt);
- value_range *vr = get_value_range (rhs1);
- scalar_float_mode fltmode
- = SCALAR_FLOAT_TYPE_MODE (TREE_TYPE (gimple_assign_lhs (stmt)));
- scalar_int_mode mode;
- tree tem;
- gassign *conv;
-
- /* We can only handle constant ranges. */
- if (vr->type != VR_RANGE
- || TREE_CODE (vr->min) != INTEGER_CST
- || TREE_CODE (vr->max) != INTEGER_CST)
- return false;
+ value_range saved;
- /* First check if we can use a signed type in place of an unsigned. */
- scalar_int_mode rhs_mode = SCALAR_INT_TYPE_MODE (TREE_TYPE (rhs1));
- if (TYPE_UNSIGNED (TREE_TYPE (rhs1))
- && can_float_p (fltmode, rhs_mode, 0) != CODE_FOR_nothing
- && range_fits_type_p (vr, TYPE_PRECISION (TREE_TYPE (rhs1)), SIGNED))
- mode = rhs_mode;
- /* If we can do the conversion in the current input mode do nothing. */
- else if (can_float_p (fltmode, rhs_mode,
- TYPE_UNSIGNED (TREE_TYPE (rhs1))) != CODE_FOR_nothing)
- return false;
- /* Otherwise search for a mode we can use, starting from the narrowest
- integer mode available. */
- else
+ if (vr0->type == VR_UNDEFINED)
{
- mode = NARROWEST_INT_MODE;
- for (;;)
- {
- /* If we cannot do a signed conversion to float from mode
- or if the value-range does not fit in the signed type
- try with a wider mode. */
- if (can_float_p (fltmode, mode, 0) != CODE_FOR_nothing
- && range_fits_type_p (vr, GET_MODE_PRECISION (mode), SIGNED))
- break;
-
- /* But do not widen the input. Instead leave that to the
- optabs expansion code. */
- if (!GET_MODE_WIDER_MODE (mode).exists (&mode)
- || GET_MODE_PRECISION (mode) > TYPE_PRECISION (TREE_TYPE (rhs1)))
- return false;
- }
+ set_value_range (vr0, vr1->type, vr1->min, vr1->max, vr1->equiv);
+ return;
}
- /* It works, insert a truncation or sign-change before the
- float conversion. */
- tem = make_ssa_name (build_nonstandard_integer_type
- (GET_MODE_PRECISION (mode), 0));
- conv = gimple_build_assign (tem, NOP_EXPR, rhs1);
- gsi_insert_before (gsi, conv, GSI_SAME_STMT);
- gimple_assign_set_rhs1 (stmt, tem);
- fold_stmt (gsi, follow_single_use_edges);
-
- return true;
-}
-
-/* Simplify an internal fn call using ranges if possible. */
+ if (vr1->type == VR_UNDEFINED)
+ {
+ /* VR0 already has the resulting range. */
+ return;
+ }
-bool
-vr_values::simplify_internal_call_using_ranges (gimple_stmt_iterator *gsi,
- gimple *stmt)
-{
- enum tree_code subcode;
- bool is_ubsan = false;
- bool ovf = false;
- switch (gimple_call_internal_fn (stmt))
- {
- case IFN_UBSAN_CHECK_ADD:
- subcode = PLUS_EXPR;
- is_ubsan = true;
- break;
- case IFN_UBSAN_CHECK_SUB:
- subcode = MINUS_EXPR;
- is_ubsan = true;
- break;
- case IFN_UBSAN_CHECK_MUL:
- subcode = MULT_EXPR;
- is_ubsan = true;
- break;
- case IFN_ADD_OVERFLOW:
- subcode = PLUS_EXPR;
- break;
- case IFN_SUB_OVERFLOW:
- subcode = MINUS_EXPR;
- break;
- case IFN_MUL_OVERFLOW:
- subcode = MULT_EXPR;
- break;
- default:
- return false;
+ if (vr0->type == VR_VARYING)
+ {
+ /* Nothing to do. VR0 already has the resulting range. */
+ return;
}
- tree op0 = gimple_call_arg (stmt, 0);
- tree op1 = gimple_call_arg (stmt, 1);
- tree type;
- if (is_ubsan)
+ if (vr1->type == VR_VARYING)
{
- type = TREE_TYPE (op0);
- if (VECTOR_TYPE_P (type))
- return false;
+ set_value_range_to_varying (vr0);
+ return;
}
- else if (gimple_call_lhs (stmt) == NULL_TREE)
- return false;
- else
- type = TREE_TYPE (TREE_TYPE (gimple_call_lhs (stmt)));
- if (!check_for_binary_op_overflow (subcode, type, op0, op1, &ovf)
- || (is_ubsan && ovf))
- return false;
- gimple *g;
- location_t loc = gimple_location (stmt);
- if (is_ubsan)
- g = gimple_build_assign (gimple_call_lhs (stmt), subcode, op0, op1);
- else
+ saved = *vr0;
+ union_ranges (&vr0->type, &vr0->min, &vr0->max,
+ vr1->type, vr1->min, vr1->max);
+ if (vr0->type == VR_VARYING)
{
- int prec = TYPE_PRECISION (type);
- tree utype = type;
- if (ovf
- || !useless_type_conversion_p (type, TREE_TYPE (op0))
- || !useless_type_conversion_p (type, TREE_TYPE (op1)))
- utype = build_nonstandard_integer_type (prec, 1);
- if (TREE_CODE (op0) == INTEGER_CST)
- op0 = fold_convert (utype, op0);
- else if (!useless_type_conversion_p (utype, TREE_TYPE (op0)))
- {
- g = gimple_build_assign (make_ssa_name (utype), NOP_EXPR, op0);
- gimple_set_location (g, loc);
- gsi_insert_before (gsi, g, GSI_SAME_STMT);
- op0 = gimple_assign_lhs (g);
- }
- if (TREE_CODE (op1) == INTEGER_CST)
- op1 = fold_convert (utype, op1);
- else if (!useless_type_conversion_p (utype, TREE_TYPE (op1)))
- {
- g = gimple_build_assign (make_ssa_name (utype), NOP_EXPR, op1);
- gimple_set_location (g, loc);
- gsi_insert_before (gsi, g, GSI_SAME_STMT);
- op1 = gimple_assign_lhs (g);
- }
- g = gimple_build_assign (make_ssa_name (utype), subcode, op0, op1);
- gimple_set_location (g, loc);
- gsi_insert_before (gsi, g, GSI_SAME_STMT);
- if (utype != type)
+ /* Failed to find an efficient meet. Before giving up and setting
+ the result to VARYING, see if we can at least derive a useful
+ anti-range. FIXME, all this nonsense about distinguishing
+ anti-ranges from ranges is necessary because of the odd
+ semantics of range_includes_zero_p and friends. */
+ if (((saved.type == VR_RANGE
+ && range_includes_zero_p (saved.min, saved.max) == 0)
+ || (saved.type == VR_ANTI_RANGE
+ && range_includes_zero_p (saved.min, saved.max) == 1))
+ && ((vr1->type == VR_RANGE
+ && range_includes_zero_p (vr1->min, vr1->max) == 0)
+ || (vr1->type == VR_ANTI_RANGE
+ && range_includes_zero_p (vr1->min, vr1->max) == 1)))
{
- g = gimple_build_assign (make_ssa_name (type), NOP_EXPR,
- gimple_assign_lhs (g));
- gimple_set_location (g, loc);
- gsi_insert_before (gsi, g, GSI_SAME_STMT);
+ set_value_range_to_nonnull (vr0, TREE_TYPE (saved.min));
+
+ /* Since this meet operation did not result from the meeting of
+ two equivalent names, VR0 cannot have any equivalences. */
+ if (vr0->equiv)
+ bitmap_clear (vr0->equiv);
+ return;
}
- g = gimple_build_assign (gimple_call_lhs (stmt), COMPLEX_EXPR,
- gimple_assign_lhs (g),
- build_int_cst (type, ovf));
+
+ set_value_range_to_varying (vr0);
+ return;
}
- gimple_set_location (g, loc);
- gsi_replace (gsi, g, false);
- return true;
-}
+ set_and_canonicalize_value_range (vr0, vr0->type, vr0->min, vr0->max,
+ vr0->equiv);
+ if (vr0->type == VR_VARYING)
+ return;
-/* Return true if VAR is a two-valued variable. Set a and b with the
- two-values when it is true. Return false otherwise. */
+ /* The resulting set of equivalences is always the intersection of
+ the two sets. */
+ if (vr0->equiv && vr1->equiv && vr0->equiv != vr1->equiv)
+ bitmap_and_into (vr0->equiv, vr1->equiv);
+ else if (vr0->equiv && !vr1->equiv)
+ bitmap_clear (vr0->equiv);
+}
-bool
-vr_values::two_valued_val_range_p (tree var, tree *a, tree *b)
+void
+vrp_meet (value_range *vr0, const value_range *vr1)
{
- value_range *vr = get_value_range (var);
- if ((vr->type != VR_RANGE
- && vr->type != VR_ANTI_RANGE)
- || TREE_CODE (vr->min) != INTEGER_CST
- || TREE_CODE (vr->max) != INTEGER_CST)
- return false;
-
- if (vr->type == VR_RANGE
- && wi::to_wide (vr->max) - wi::to_wide (vr->min) == 1)
+ if (dump_file && (dump_flags & TDF_DETAILS))
{
- *a = vr->min;
- *b = vr->max;
- return true;
+ fprintf (dump_file, "Meeting\n ");
+ dump_value_range (dump_file, vr0);
+ fprintf (dump_file, "\nand\n ");
+ dump_value_range (dump_file, vr1);
+ fprintf (dump_file, "\n");
}
-
- /* ~[TYPE_MIN + 1, TYPE_MAX - 1] */
- if (vr->type == VR_ANTI_RANGE
- && (wi::to_wide (vr->min)
- - wi::to_wide (vrp_val_min (TREE_TYPE (var)))) == 1
- && (wi::to_wide (vrp_val_max (TREE_TYPE (var)))
- - wi::to_wide (vr->max)) == 1)
+ vrp_meet_1 (vr0, vr1);
+ if (dump_file && (dump_flags & TDF_DETAILS))
{
- *a = vrp_val_min (TREE_TYPE (var));
- *b = vrp_val_max (TREE_TYPE (var));
- return true;
+ fprintf (dump_file, "to\n ");
+ dump_value_range (dump_file, vr0);
+ fprintf (dump_file, "\n");
}
-
- return false;
}
-/* Simplify STMT using ranges if possible. */
-
-bool
-vr_values::simplify_stmt_using_ranges (gimple_stmt_iterator *gsi)
-{
- gimple *stmt = gsi_stmt (*gsi);
- if (is_gimple_assign (stmt))
- {
- enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
- tree rhs1 = gimple_assign_rhs1 (stmt);
- tree rhs2 = gimple_assign_rhs2 (stmt);
- tree lhs = gimple_assign_lhs (stmt);
- tree val1 = NULL_TREE, val2 = NULL_TREE;
- use_operand_p use_p;
- gimple *use_stmt;
- /* Convert:
- LHS = CST BINOP VAR
- Where VAR is two-valued and LHS is used in GIMPLE_COND only
- To:
- LHS = VAR == VAL1 ? (CST BINOP VAL1) : (CST BINOP VAL2)
-
- Also handles:
- LHS = VAR BINOP CST
- Where VAR is two-valued and LHS is used in GIMPLE_COND only
- To:
- LHS = VAR == VAL1 ? (VAL1 BINOP CST) : (VAL2 BINOP CST) */
-
- if (TREE_CODE_CLASS (rhs_code) == tcc_binary
- && INTEGRAL_TYPE_P (TREE_TYPE (lhs))
- && ((TREE_CODE (rhs1) == INTEGER_CST
- && TREE_CODE (rhs2) == SSA_NAME)
- || (TREE_CODE (rhs2) == INTEGER_CST
- && TREE_CODE (rhs1) == SSA_NAME))
- && single_imm_use (lhs, &use_p, &use_stmt)
- && gimple_code (use_stmt) == GIMPLE_COND)
+/* Visit all arguments for PHI node PHI that flow through executable
+ edges. If a valid value range can be derived from all the incoming
+ value ranges, set a new range for the LHS of PHI. */
+enum ssa_prop_result
+vrp_prop::visit_phi (gphi *phi)
+{
+ tree lhs = PHI_RESULT (phi);
+ value_range vr_result = VR_INITIALIZER;
+ extract_range_from_phi_node (phi, &vr_result);
+ if (update_value_range (lhs, &vr_result))
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
{
- tree new_rhs1 = NULL_TREE;
- tree new_rhs2 = NULL_TREE;
- tree cmp_var = NULL_TREE;
-
- if (TREE_CODE (rhs2) == SSA_NAME
- && two_valued_val_range_p (rhs2, &val1, &val2))
- {
- /* Optimize RHS1 OP [VAL1, VAL2]. */
- new_rhs1 = int_const_binop (rhs_code, rhs1, val1);
- new_rhs2 = int_const_binop (rhs_code, rhs1, val2);
- cmp_var = rhs2;
- }
- else if (TREE_CODE (rhs1) == SSA_NAME
- && two_valued_val_range_p (rhs1, &val1, &val2))
- {
- /* Optimize [VAL1, VAL2] OP RHS2. */
- new_rhs1 = int_const_binop (rhs_code, val1, rhs2);
- new_rhs2 = int_const_binop (rhs_code, val2, rhs2);
- cmp_var = rhs1;
- }
-
- /* If we could not find two-vals or the optimzation is invalid as
- in divide by zero, new_rhs1 / new_rhs will be NULL_TREE. */
- if (new_rhs1 && new_rhs2)
- {
- tree cond = build2 (EQ_EXPR, boolean_type_node, cmp_var, val1);
- gimple_assign_set_rhs_with_ops (gsi,
- COND_EXPR, cond,
- new_rhs1,
- new_rhs2);
- update_stmt (gsi_stmt (*gsi));
- fold_stmt (gsi, follow_single_use_edges);
- return true;
- }
+ fprintf (dump_file, "Found new range for ");
+ print_generic_expr (dump_file, lhs);
+ fprintf (dump_file, ": ");
+ dump_value_range (dump_file, &vr_result);
+ fprintf (dump_file, "\n");
}
- switch (rhs_code)
- {
- case EQ_EXPR:
- case NE_EXPR:
- /* Transform EQ_EXPR, NE_EXPR into BIT_XOR_EXPR or identity
- if the RHS is zero or one, and the LHS are known to be boolean
- values. */
- if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
- return simplify_truth_ops_using_ranges (gsi, stmt);
- break;
-
- /* Transform TRUNC_DIV_EXPR and TRUNC_MOD_EXPR into RSHIFT_EXPR
- and BIT_AND_EXPR respectively if the first operand is greater
- than zero and the second operand is an exact power of two.
- Also optimize TRUNC_MOD_EXPR away if the second operand is
- constant and the first operand already has the right value
- range. */
- case TRUNC_DIV_EXPR:
- case TRUNC_MOD_EXPR:
- if ((TREE_CODE (rhs1) == SSA_NAME
- || TREE_CODE (rhs1) == INTEGER_CST)
- && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
- return simplify_div_or_mod_using_ranges (gsi, stmt);
- break;
-
- /* Transform ABS (X) into X or -X as appropriate. */
- case ABS_EXPR:
- if (TREE_CODE (rhs1) == SSA_NAME
- && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
- return simplify_abs_using_ranges (gsi, stmt);
- break;
-
- case BIT_AND_EXPR:
- case BIT_IOR_EXPR:
- /* Optimize away BIT_AND_EXPR and BIT_IOR_EXPR
- if all the bits being cleared are already cleared or
- all the bits being set are already set. */
- if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
- return simplify_bit_ops_using_ranges (gsi, stmt);
- break;
-
- CASE_CONVERT:
- if (TREE_CODE (rhs1) == SSA_NAME
- && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
- return simplify_conversion_using_ranges (gsi, stmt);
- break;
-
- case FLOAT_EXPR:
- if (TREE_CODE (rhs1) == SSA_NAME
- && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
- return simplify_float_conversion_using_ranges (gsi, stmt);
- break;
-
- case MIN_EXPR:
- case MAX_EXPR:
- return simplify_min_or_max_using_ranges (gsi, stmt);
+ if (vr_result.type == VR_VARYING)
+ return SSA_PROP_VARYING;
- default:
- break;
- }
+ return SSA_PROP_INTERESTING;
}
- else if (gimple_code (stmt) == GIMPLE_COND)
- return simplify_cond_using_ranges_1 (as_a <gcond *> (stmt));
- else if (gimple_code (stmt) == GIMPLE_SWITCH)
- return simplify_switch_using_ranges (as_a <gswitch *> (stmt));
- else if (is_gimple_call (stmt)
- && gimple_call_internal_p (stmt))
- return simplify_internal_call_using_ranges (gsi, stmt);
- return false;
+ /* Nothing changed, don't add outgoing edges. */
+ return SSA_PROP_NOT_INTERESTING;
}
class vrp_folder : public substitute_and_fold_engine
return op;
}
+/* A hack. */
+static class vr_values *x_vr_values;
+
/* A trivial wrapper so that we can present the generic jump threading
code with a simple API for simplifying statements. STMT is the
statement we want to simplify, WITHIN_STMT provides the location
delete avail_exprs_stack;
}
-/* Free VRP lattice. */
-
-vr_values::~vr_values ()
-{
- /* Free allocated memory. */
- free (vr_value);
- free (vr_phi_edge_counts);
- bitmap_obstack_release (&vrp_equiv_obstack);
- vrp_value_range_pool.release ();
-
- /* So that we can distinguish between VRP data being available
- and not available. */
- vr_value = NULL;
- vr_phi_edge_counts = NULL;
-}
-
/* Traverse all the blocks folding conditionals with known ranges. */
void
check_all_array_refs ();
}
-void
-vr_values::set_vr_value (tree var, value_range *vr)
-{
- if (SSA_NAME_VERSION (var) >= num_vr_values)
- return;
- vr_value[SSA_NAME_VERSION (var)] = vr;
-}
-
/* Main entry point to VRP (Value Range Propagation). This pass is
loosely based on J. R. C. Patterson, ``Accurate Static Branch
Prediction by Value Range Propagation,'' in SIGPLAN Conference on
--- /dev/null
+/* Support routines for Value Range Propagation (VRP).
+ Copyright (C) 2005-2017 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "backend.h"
+#include "insn-codes.h"
+#include "tree.h"
+#include "gimple.h"
+#include "ssa.h"
+#include "optabs-tree.h"
+#include "gimple-pretty-print.h"
+#include "diagnostic-core.h"
+#include "flags.h"
+#include "fold-const.h"
+#include "calls.h"
+#include "cfganal.h"
+#include "gimple-fold.h"
+#include "gimple-iterator.h"
+#include "tree-cfg.h"
+#include "tree-ssa-loop-niter.h"
+#include "tree-ssa-loop.h"
+#include "intl.h"
+#include "cfgloop.h"
+#include "tree-scalar-evolution.h"
+#include "tree-ssa-propagate.h"
+#include "tree-chrec.h"
+#include "omp-general.h"
+#include "case-cfn-macros.h"
+#include "alloc-pool.h"
+#include "attribs.h"
+#include "vr-values.h"
+
+/* Set value range VR to a non-negative range of type TYPE. */
+
+static inline void
+set_value_range_to_nonnegative (value_range *vr, tree type)
+{
+ tree zero = build_int_cst (type, 0);
+ set_value_range (vr, VR_RANGE, zero, vrp_val_max (type), vr->equiv);
+}
+
+/* Set value range VR to a range of a truthvalue of type TYPE. */
+
+static inline void
+set_value_range_to_truthvalue (value_range *vr, tree type)
+{
+ if (TYPE_PRECISION (type) == 1)
+ set_value_range_to_varying (vr);
+ else
+ set_value_range (vr, VR_RANGE,
+ build_int_cst (type, 0), build_int_cst (type, 1),
+ vr->equiv);
+}
+
+
+/* Return value range information for VAR.
+
+ If we have no values ranges recorded (ie, VRP is not running), then
+ return NULL. Otherwise create an empty range if none existed for VAR. */
+
+value_range *
+vr_values::get_value_range (const_tree var)
+{
+ static const value_range vr_const_varying
+ = { VR_VARYING, NULL_TREE, NULL_TREE, NULL };
+ value_range *vr;
+ tree sym;
+ unsigned ver = SSA_NAME_VERSION (var);
+
+ /* If we have no recorded ranges, then return NULL. */
+ if (! vr_value)
+ return NULL;
+
+ /* If we query the range for a new SSA name return an unmodifiable VARYING.
+ We should get here at most from the substitute-and-fold stage which
+ will never try to change values. */
+ if (ver >= num_vr_values)
+ return CONST_CAST (value_range *, &vr_const_varying);
+
+ vr = vr_value[ver];
+ if (vr)
+ return vr;
+
+ /* After propagation finished do not allocate new value-ranges. */
+ if (values_propagated)
+ return CONST_CAST (value_range *, &vr_const_varying);
+
+ /* Create a default value range. */
+ vr_value[ver] = vr = vrp_value_range_pool.allocate ();
+ memset (vr, 0, sizeof (*vr));
+
+ /* Defer allocating the equivalence set. */
+ vr->equiv = NULL;
+
+ /* If VAR is a default definition of a parameter, the variable can
+ take any value in VAR's type. */
+ if (SSA_NAME_IS_DEFAULT_DEF (var))
+ {
+ sym = SSA_NAME_VAR (var);
+ if (TREE_CODE (sym) == PARM_DECL)
+ {
+ /* Try to use the "nonnull" attribute to create ~[0, 0]
+ anti-ranges for pointers. Note that this is only valid with
+ default definitions of PARM_DECLs. */
+ if (POINTER_TYPE_P (TREE_TYPE (sym))
+ && (nonnull_arg_p (sym)
+ || get_ptr_nonnull (var)))
+ set_value_range_to_nonnull (vr, TREE_TYPE (sym));
+ else if (INTEGRAL_TYPE_P (TREE_TYPE (sym)))
+ {
+ wide_int min, max;
+ value_range_type rtype = get_range_info (var, &min, &max);
+ if (rtype == VR_RANGE || rtype == VR_ANTI_RANGE)
+ set_value_range (vr, rtype,
+ wide_int_to_tree (TREE_TYPE (var), min),
+ wide_int_to_tree (TREE_TYPE (var), max),
+ NULL);
+ else
+ set_value_range_to_varying (vr);
+ }
+ else
+ set_value_range_to_varying (vr);
+ }
+ else if (TREE_CODE (sym) == RESULT_DECL
+ && DECL_BY_REFERENCE (sym))
+ set_value_range_to_nonnull (vr, TREE_TYPE (sym));
+ }
+
+ return vr;
+}
+
+/* Set value-ranges of all SSA names defined by STMT to varying. */
+
+void
+vr_values::set_defs_to_varying (gimple *stmt)
+{
+ ssa_op_iter i;
+ tree def;
+ FOR_EACH_SSA_TREE_OPERAND (def, stmt, i, SSA_OP_DEF)
+ {
+ value_range *vr = get_value_range (def);
+ /* Avoid writing to vr_const_varying get_value_range may return. */
+ if (vr->type != VR_VARYING)
+ set_value_range_to_varying (vr);
+ }
+}
+
+/* Update the value range and equivalence set for variable VAR to
+ NEW_VR. Return true if NEW_VR is different from VAR's previous
+ value.
+
+ NOTE: This function assumes that NEW_VR is a temporary value range
+ object created for the sole purpose of updating VAR's range. The
+ storage used by the equivalence set from NEW_VR will be freed by
+ this function. Do not call update_value_range when NEW_VR
+ is the range object associated with another SSA name. */
+
+bool
+vr_values::update_value_range (const_tree var, value_range *new_vr)
+{
+ value_range *old_vr;
+ bool is_new;
+
+ /* If there is a value-range on the SSA name from earlier analysis
+ factor that in. */
+ if (INTEGRAL_TYPE_P (TREE_TYPE (var)))
+ {
+ wide_int min, max;
+ value_range_type rtype = get_range_info (var, &min, &max);
+ if (rtype == VR_RANGE || rtype == VR_ANTI_RANGE)
+ {
+ tree nr_min, nr_max;
+ nr_min = wide_int_to_tree (TREE_TYPE (var), min);
+ nr_max = wide_int_to_tree (TREE_TYPE (var), max);
+ value_range nr = VR_INITIALIZER;
+ set_and_canonicalize_value_range (&nr, rtype, nr_min, nr_max, NULL);
+ vrp_intersect_ranges (new_vr, &nr);
+ }
+ }
+
+ /* Update the value range, if necessary. */
+ old_vr = get_value_range (var);
+ is_new = old_vr->type != new_vr->type
+ || !vrp_operand_equal_p (old_vr->min, new_vr->min)
+ || !vrp_operand_equal_p (old_vr->max, new_vr->max)
+ || !vrp_bitmap_equal_p (old_vr->equiv, new_vr->equiv);
+
+ if (is_new)
+ {
+ /* Do not allow transitions up the lattice. The following
+ is slightly more awkward than just new_vr->type < old_vr->type
+ because VR_RANGE and VR_ANTI_RANGE need to be considered
+ the same. We may not have is_new when transitioning to
+ UNDEFINED. If old_vr->type is VARYING, we shouldn't be
+ called. */
+ if (new_vr->type == VR_UNDEFINED)
+ {
+ BITMAP_FREE (new_vr->equiv);
+ set_value_range_to_varying (old_vr);
+ set_value_range_to_varying (new_vr);
+ return true;
+ }
+ else
+ set_value_range (old_vr, new_vr->type, new_vr->min, new_vr->max,
+ new_vr->equiv);
+ }
+
+ BITMAP_FREE (new_vr->equiv);
+
+ return is_new;
+}
+
+
+/* Add VAR and VAR's equivalence set to EQUIV. This is the central
+ point where equivalence processing can be turned on/off. */
+
+void
+vr_values::add_equivalence (bitmap *equiv, const_tree var)
+{
+ unsigned ver = SSA_NAME_VERSION (var);
+ value_range *vr = get_value_range (var);
+
+ if (*equiv == NULL)
+ *equiv = BITMAP_ALLOC (&vrp_equiv_obstack);
+ bitmap_set_bit (*equiv, ver);
+ if (vr && vr->equiv)
+ bitmap_ior_into (*equiv, vr->equiv);
+}
+
+/* Return true if value range VR involves exactly one symbol SYM. */
+
+static bool
+symbolic_range_based_on_p (value_range *vr, const_tree sym)
+{
+ bool neg, min_has_symbol, max_has_symbol;
+ tree inv;
+
+ if (is_gimple_min_invariant (vr->min))
+ min_has_symbol = false;
+ else if (get_single_symbol (vr->min, &neg, &inv) == sym)
+ min_has_symbol = true;
+ else
+ return false;
+
+ if (is_gimple_min_invariant (vr->max))
+ max_has_symbol = false;
+ else if (get_single_symbol (vr->max, &neg, &inv) == sym)
+ max_has_symbol = true;
+ else
+ return false;
+
+ return (min_has_symbol || max_has_symbol);
+}
+
+/* Return true if the result of assignment STMT is know to be non-zero. */
+
+static bool
+gimple_assign_nonzero_p (gimple *stmt)
+{
+ enum tree_code code = gimple_assign_rhs_code (stmt);
+ bool strict_overflow_p;
+ switch (get_gimple_rhs_class (code))
+ {
+ case GIMPLE_UNARY_RHS:
+ return tree_unary_nonzero_warnv_p (gimple_assign_rhs_code (stmt),
+ gimple_expr_type (stmt),
+ gimple_assign_rhs1 (stmt),
+ &strict_overflow_p);
+ case GIMPLE_BINARY_RHS:
+ return tree_binary_nonzero_warnv_p (gimple_assign_rhs_code (stmt),
+ gimple_expr_type (stmt),
+ gimple_assign_rhs1 (stmt),
+ gimple_assign_rhs2 (stmt),
+ &strict_overflow_p);
+ case GIMPLE_TERNARY_RHS:
+ return false;
+ case GIMPLE_SINGLE_RHS:
+ return tree_single_nonzero_warnv_p (gimple_assign_rhs1 (stmt),
+ &strict_overflow_p);
+ case GIMPLE_INVALID_RHS:
+ gcc_unreachable ();
+ default:
+ gcc_unreachable ();
+ }
+}
+
+/* Return true if STMT is known to compute a non-zero value. */
+
+static bool
+gimple_stmt_nonzero_p (gimple *stmt)
+{
+ switch (gimple_code (stmt))
+ {
+ case GIMPLE_ASSIGN:
+ return gimple_assign_nonzero_p (stmt);
+ case GIMPLE_CALL:
+ {
+ tree fndecl = gimple_call_fndecl (stmt);
+ if (!fndecl) return false;
+ if (flag_delete_null_pointer_checks && !flag_check_new
+ && DECL_IS_OPERATOR_NEW (fndecl)
+ && !TREE_NOTHROW (fndecl))
+ return true;
+ /* References are always non-NULL. */
+ if (flag_delete_null_pointer_checks
+ && TREE_CODE (TREE_TYPE (fndecl)) == REFERENCE_TYPE)
+ return true;
+ if (flag_delete_null_pointer_checks &&
+ lookup_attribute ("returns_nonnull",
+ TYPE_ATTRIBUTES (gimple_call_fntype (stmt))))
+ return true;
+
+ gcall *call_stmt = as_a<gcall *> (stmt);
+ unsigned rf = gimple_call_return_flags (call_stmt);
+ if (rf & ERF_RETURNS_ARG)
+ {
+ unsigned argnum = rf & ERF_RETURN_ARG_MASK;
+ if (argnum < gimple_call_num_args (call_stmt))
+ {
+ tree arg = gimple_call_arg (call_stmt, argnum);
+ if (SSA_VAR_P (arg)
+ && infer_nonnull_range_by_attribute (stmt, arg))
+ return true;
+ }
+ }
+ return gimple_alloca_call_p (stmt);
+ }
+ default:
+ gcc_unreachable ();
+ }
+}
+/* Like tree_expr_nonzero_p, but this function uses value ranges
+ obtained so far. */
+
+bool
+vr_values::vrp_stmt_computes_nonzero (gimple *stmt)
+{
+ if (gimple_stmt_nonzero_p (stmt))
+ return true;
+
+ /* If we have an expression of the form &X->a, then the expression
+ is nonnull if X is nonnull. */
+ if (is_gimple_assign (stmt)
+ && gimple_assign_rhs_code (stmt) == ADDR_EXPR)
+ {
+ tree expr = gimple_assign_rhs1 (stmt);
+ tree base = get_base_address (TREE_OPERAND (expr, 0));
+
+ if (base != NULL_TREE
+ && TREE_CODE (base) == MEM_REF
+ && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME)
+ {
+ value_range *vr = get_value_range (TREE_OPERAND (base, 0));
+ if (range_is_nonnull (vr))
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/* Returns true if EXPR is a valid value (as expected by compare_values) --
+ a gimple invariant, or SSA_NAME +- CST. */
+
+static bool
+valid_value_p (tree expr)
+{
+ if (TREE_CODE (expr) == SSA_NAME)
+ return true;
+
+ if (TREE_CODE (expr) == PLUS_EXPR
+ || TREE_CODE (expr) == MINUS_EXPR)
+ return (TREE_CODE (TREE_OPERAND (expr, 0)) == SSA_NAME
+ && TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST);
+
+ return is_gimple_min_invariant (expr);
+}
+
+/* If OP has a value range with a single constant value return that,
+ otherwise return NULL_TREE. This returns OP itself if OP is a
+ constant. */
+
+tree
+vr_values::op_with_constant_singleton_value_range (tree op)
+{
+ if (is_gimple_min_invariant (op))
+ return op;
+
+ if (TREE_CODE (op) != SSA_NAME)
+ return NULL_TREE;
+
+ return value_range_constant_singleton (get_value_range (op));
+}
+
+/* Return true if op is in a boolean [0, 1] value-range. */
+
+bool
+vr_values::op_with_boolean_value_range_p (tree op)
+{
+ value_range *vr;
+
+ if (TYPE_PRECISION (TREE_TYPE (op)) == 1)
+ return true;
+
+ if (integer_zerop (op)
+ || integer_onep (op))
+ return true;
+
+ if (TREE_CODE (op) != SSA_NAME)
+ return false;
+
+ vr = get_value_range (op);
+ return (vr->type == VR_RANGE
+ && integer_zerop (vr->min)
+ && integer_onep (vr->max));
+}
+
+/* Extract value range information for VAR when (OP COND_CODE LIMIT) is
+ true and store it in *VR_P. */
+
+void
+vr_values::extract_range_for_var_from_comparison_expr (tree var,
+ enum tree_code cond_code,
+ tree op, tree limit,
+ value_range *vr_p)
+{
+ tree min, max, type;
+ value_range *limit_vr;
+ type = TREE_TYPE (var);
+ gcc_assert (limit != var);
+
+ /* For pointer arithmetic, we only keep track of pointer equality
+ and inequality. */
+ if (POINTER_TYPE_P (type) && cond_code != NE_EXPR && cond_code != EQ_EXPR)
+ {
+ set_value_range_to_varying (vr_p);
+ return;
+ }
+
+ /* If LIMIT is another SSA name and LIMIT has a range of its own,
+ try to use LIMIT's range to avoid creating symbolic ranges
+ unnecessarily. */
+ limit_vr = (TREE_CODE (limit) == SSA_NAME) ? get_value_range (limit) : NULL;
+
+ /* LIMIT's range is only interesting if it has any useful information. */
+ if (! limit_vr
+ || limit_vr->type == VR_UNDEFINED
+ || limit_vr->type == VR_VARYING
+ || (symbolic_range_p (limit_vr)
+ && ! (limit_vr->type == VR_RANGE
+ && (limit_vr->min == limit_vr->max
+ || operand_equal_p (limit_vr->min, limit_vr->max, 0)))))
+ limit_vr = NULL;
+
+ /* Initially, the new range has the same set of equivalences of
+ VAR's range. This will be revised before returning the final
+ value. Since assertions may be chained via mutually exclusive
+ predicates, we will need to trim the set of equivalences before
+ we are done. */
+ gcc_assert (vr_p->equiv == NULL);
+ add_equivalence (&vr_p->equiv, var);
+
+ /* Extract a new range based on the asserted comparison for VAR and
+ LIMIT's value range. Notice that if LIMIT has an anti-range, we
+ will only use it for equality comparisons (EQ_EXPR). For any
+ other kind of assertion, we cannot derive a range from LIMIT's
+ anti-range that can be used to describe the new range. For
+ instance, ASSERT_EXPR <x_2, x_2 <= b_4>. If b_4 is ~[2, 10],
+ then b_4 takes on the ranges [-INF, 1] and [11, +INF]. There is
+ no single range for x_2 that could describe LE_EXPR, so we might
+ as well build the range [b_4, +INF] for it.
+ One special case we handle is extracting a range from a
+ range test encoded as (unsigned)var + CST <= limit. */
+ if (TREE_CODE (op) == NOP_EXPR
+ || TREE_CODE (op) == PLUS_EXPR)
+ {
+ if (TREE_CODE (op) == PLUS_EXPR)
+ {
+ min = fold_build1 (NEGATE_EXPR, TREE_TYPE (TREE_OPERAND (op, 1)),
+ TREE_OPERAND (op, 1));
+ max = int_const_binop (PLUS_EXPR, limit, min);
+ op = TREE_OPERAND (op, 0);
+ }
+ else
+ {
+ min = build_int_cst (TREE_TYPE (var), 0);
+ max = limit;
+ }
+
+ /* Make sure to not set TREE_OVERFLOW on the final type
+ conversion. We are willingly interpreting large positive
+ unsigned values as negative signed values here. */
+ min = force_fit_type (TREE_TYPE (var), wi::to_widest (min), 0, false);
+ max = force_fit_type (TREE_TYPE (var), wi::to_widest (max), 0, false);
+
+ /* We can transform a max, min range to an anti-range or
+ vice-versa. Use set_and_canonicalize_value_range which does
+ this for us. */
+ if (cond_code == LE_EXPR)
+ set_and_canonicalize_value_range (vr_p, VR_RANGE,
+ min, max, vr_p->equiv);
+ else if (cond_code == GT_EXPR)
+ set_and_canonicalize_value_range (vr_p, VR_ANTI_RANGE,
+ min, max, vr_p->equiv);
+ else
+ gcc_unreachable ();
+ }
+ else if (cond_code == EQ_EXPR)
+ {
+ enum value_range_type range_type;
+
+ if (limit_vr)
+ {
+ range_type = limit_vr->type;
+ min = limit_vr->min;
+ max = limit_vr->max;
+ }
+ else
+ {
+ range_type = VR_RANGE;
+ min = limit;
+ max = limit;
+ }
+
+ set_value_range (vr_p, range_type, min, max, vr_p->equiv);
+
+ /* When asserting the equality VAR == LIMIT and LIMIT is another
+ SSA name, the new range will also inherit the equivalence set
+ from LIMIT. */
+ if (TREE_CODE (limit) == SSA_NAME)
+ add_equivalence (&vr_p->equiv, limit);
+ }
+ else if (cond_code == NE_EXPR)
+ {
+ /* As described above, when LIMIT's range is an anti-range and
+ this assertion is an inequality (NE_EXPR), then we cannot
+ derive anything from the anti-range. For instance, if
+ LIMIT's range was ~[0, 0], the assertion 'VAR != LIMIT' does
+ not imply that VAR's range is [0, 0]. So, in the case of
+ anti-ranges, we just assert the inequality using LIMIT and
+ not its anti-range.
+
+ If LIMIT_VR is a range, we can only use it to build a new
+ anti-range if LIMIT_VR is a single-valued range. For
+ instance, if LIMIT_VR is [0, 1], the predicate
+ VAR != [0, 1] does not mean that VAR's range is ~[0, 1].
+ Rather, it means that for value 0 VAR should be ~[0, 0]
+ and for value 1, VAR should be ~[1, 1]. We cannot
+ represent these ranges.
+
+ The only situation in which we can build a valid
+ anti-range is when LIMIT_VR is a single-valued range
+ (i.e., LIMIT_VR->MIN == LIMIT_VR->MAX). In that case,
+ build the anti-range ~[LIMIT_VR->MIN, LIMIT_VR->MAX]. */
+ if (limit_vr
+ && limit_vr->type == VR_RANGE
+ && compare_values (limit_vr->min, limit_vr->max) == 0)
+ {
+ min = limit_vr->min;
+ max = limit_vr->max;
+ }
+ else
+ {
+ /* In any other case, we cannot use LIMIT's range to build a
+ valid anti-range. */
+ min = max = limit;
+ }
+
+ /* If MIN and MAX cover the whole range for their type, then
+ just use the original LIMIT. */
+ if (INTEGRAL_TYPE_P (type)
+ && vrp_val_is_min (min)
+ && vrp_val_is_max (max))
+ min = max = limit;
+
+ set_and_canonicalize_value_range (vr_p, VR_ANTI_RANGE,
+ min, max, vr_p->equiv);
+ }
+ else if (cond_code == LE_EXPR || cond_code == LT_EXPR)
+ {
+ min = TYPE_MIN_VALUE (type);
+
+ if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE)
+ max = limit;
+ else
+ {
+ /* If LIMIT_VR is of the form [N1, N2], we need to build the
+ range [MIN, N2] for LE_EXPR and [MIN, N2 - 1] for
+ LT_EXPR. */
+ max = limit_vr->max;
+ }
+
+ /* If the maximum value forces us to be out of bounds, simply punt.
+ It would be pointless to try and do anything more since this
+ all should be optimized away above us. */
+ if (cond_code == LT_EXPR
+ && compare_values (max, min) == 0)
+ set_value_range_to_varying (vr_p);
+ else
+ {
+ /* For LT_EXPR, we create the range [MIN, MAX - 1]. */
+ if (cond_code == LT_EXPR)
+ {
+ if (TYPE_PRECISION (TREE_TYPE (max)) == 1
+ && !TYPE_UNSIGNED (TREE_TYPE (max)))
+ max = fold_build2 (PLUS_EXPR, TREE_TYPE (max), max,
+ build_int_cst (TREE_TYPE (max), -1));
+ else
+ max = fold_build2 (MINUS_EXPR, TREE_TYPE (max), max,
+ build_int_cst (TREE_TYPE (max), 1));
+ /* Signal to compare_values_warnv this expr doesn't overflow. */
+ if (EXPR_P (max))
+ TREE_NO_WARNING (max) = 1;
+ }
+
+ set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
+ }
+ }
+ else if (cond_code == GE_EXPR || cond_code == GT_EXPR)
+ {
+ max = TYPE_MAX_VALUE (type);
+
+ if (limit_vr == NULL || limit_vr->type == VR_ANTI_RANGE)
+ min = limit;
+ else
+ {
+ /* If LIMIT_VR is of the form [N1, N2], we need to build the
+ range [N1, MAX] for GE_EXPR and [N1 + 1, MAX] for
+ GT_EXPR. */
+ min = limit_vr->min;
+ }
+
+ /* If the minimum value forces us to be out of bounds, simply punt.
+ It would be pointless to try and do anything more since this
+ all should be optimized away above us. */
+ if (cond_code == GT_EXPR
+ && compare_values (min, max) == 0)
+ set_value_range_to_varying (vr_p);
+ else
+ {
+ /* For GT_EXPR, we create the range [MIN + 1, MAX]. */
+ if (cond_code == GT_EXPR)
+ {
+ if (TYPE_PRECISION (TREE_TYPE (min)) == 1
+ && !TYPE_UNSIGNED (TREE_TYPE (min)))
+ min = fold_build2 (MINUS_EXPR, TREE_TYPE (min), min,
+ build_int_cst (TREE_TYPE (min), -1));
+ else
+ min = fold_build2 (PLUS_EXPR, TREE_TYPE (min), min,
+ build_int_cst (TREE_TYPE (min), 1));
+ /* Signal to compare_values_warnv this expr doesn't overflow. */
+ if (EXPR_P (min))
+ TREE_NO_WARNING (min) = 1;
+ }
+
+ set_value_range (vr_p, VR_RANGE, min, max, vr_p->equiv);
+ }
+ }
+ else
+ gcc_unreachable ();
+
+ /* Finally intersect the new range with what we already know about var. */
+ vrp_intersect_ranges (vr_p, get_value_range (var));
+}
+
+/* Extract value range information from an ASSERT_EXPR EXPR and store
+ it in *VR_P. */
+
+void
+vr_values::extract_range_from_assert (value_range *vr_p, tree expr)
+{
+ tree var = ASSERT_EXPR_VAR (expr);
+ tree cond = ASSERT_EXPR_COND (expr);
+ tree limit, op;
+ enum tree_code cond_code;
+ gcc_assert (COMPARISON_CLASS_P (cond));
+
+ /* Find VAR in the ASSERT_EXPR conditional. */
+ if (var == TREE_OPERAND (cond, 0)
+ || TREE_CODE (TREE_OPERAND (cond, 0)) == PLUS_EXPR
+ || TREE_CODE (TREE_OPERAND (cond, 0)) == NOP_EXPR)
+ {
+ /* If the predicate is of the form VAR COMP LIMIT, then we just
+ take LIMIT from the RHS and use the same comparison code. */
+ cond_code = TREE_CODE (cond);
+ limit = TREE_OPERAND (cond, 1);
+ op = TREE_OPERAND (cond, 0);
+ }
+ else
+ {
+ /* If the predicate is of the form LIMIT COMP VAR, then we need
+ to flip around the comparison code to create the proper range
+ for VAR. */
+ cond_code = swap_tree_comparison (TREE_CODE (cond));
+ limit = TREE_OPERAND (cond, 0);
+ op = TREE_OPERAND (cond, 1);
+ }
+ extract_range_for_var_from_comparison_expr (var, cond_code, op,
+ limit, vr_p);
+}
+
+/* Extract range information from SSA name VAR and store it in VR. If
+ VAR has an interesting range, use it. Otherwise, create the
+ range [VAR, VAR] and return it. This is useful in situations where
+ we may have conditionals testing values of VARYING names. For
+ instance,
+
+ x_3 = y_5;
+ if (x_3 > y_5)
+ ...
+
+ Even if y_5 is deemed VARYING, we can determine that x_3 > y_5 is
+ always false. */
+
+void
+vr_values::extract_range_from_ssa_name (value_range *vr, tree var)
+{
+ value_range *var_vr = get_value_range (var);
+
+ if (var_vr->type != VR_VARYING)
+ copy_value_range (vr, var_vr);
+ else
+ set_value_range (vr, VR_RANGE, var, var, NULL);
+
+ add_equivalence (&vr->equiv, var);
+}
+
+/* Extract range information from a binary expression OP0 CODE OP1 based on
+ the ranges of each of its operands with resulting type EXPR_TYPE.
+ The resulting range is stored in *VR. */
+
+void
+vr_values::extract_range_from_binary_expr (value_range *vr,
+ enum tree_code code,
+ tree expr_type, tree op0, tree op1)
+{
+ value_range vr0 = VR_INITIALIZER;
+ value_range vr1 = VR_INITIALIZER;
+
+ /* Get value ranges for each operand. For constant operands, create
+ a new value range with the operand to simplify processing. */
+ if (TREE_CODE (op0) == SSA_NAME)
+ vr0 = *(get_value_range (op0));
+ else if (is_gimple_min_invariant (op0))
+ set_value_range_to_value (&vr0, op0, NULL);
+ else
+ set_value_range_to_varying (&vr0);
+
+ if (TREE_CODE (op1) == SSA_NAME)
+ vr1 = *(get_value_range (op1));
+ else if (is_gimple_min_invariant (op1))
+ set_value_range_to_value (&vr1, op1, NULL);
+ else
+ set_value_range_to_varying (&vr1);
+
+ extract_range_from_binary_expr_1 (vr, code, expr_type, &vr0, &vr1);
+
+ /* Try harder for PLUS and MINUS if the range of one operand is symbolic
+ and based on the other operand, for example if it was deduced from a
+ symbolic comparison. When a bound of the range of the first operand
+ is invariant, we set the corresponding bound of the new range to INF
+ in order to avoid recursing on the range of the second operand. */
+ if (vr->type == VR_VARYING
+ && (code == PLUS_EXPR || code == MINUS_EXPR)
+ && TREE_CODE (op1) == SSA_NAME
+ && vr0.type == VR_RANGE
+ && symbolic_range_based_on_p (&vr0, op1))
+ {
+ const bool minus_p = (code == MINUS_EXPR);
+ value_range n_vr1 = VR_INITIALIZER;
+
+ /* Try with VR0 and [-INF, OP1]. */
+ if (is_gimple_min_invariant (minus_p ? vr0.max : vr0.min))
+ set_value_range (&n_vr1, VR_RANGE, vrp_val_min (expr_type), op1, NULL);
+
+ /* Try with VR0 and [OP1, +INF]. */
+ else if (is_gimple_min_invariant (minus_p ? vr0.min : vr0.max))
+ set_value_range (&n_vr1, VR_RANGE, op1, vrp_val_max (expr_type), NULL);
+
+ /* Try with VR0 and [OP1, OP1]. */
+ else
+ set_value_range (&n_vr1, VR_RANGE, op1, op1, NULL);
+
+ extract_range_from_binary_expr_1 (vr, code, expr_type, &vr0, &n_vr1);
+ }
+
+ if (vr->type == VR_VARYING
+ && (code == PLUS_EXPR || code == MINUS_EXPR)
+ && TREE_CODE (op0) == SSA_NAME
+ && vr1.type == VR_RANGE
+ && symbolic_range_based_on_p (&vr1, op0))
+ {
+ const bool minus_p = (code == MINUS_EXPR);
+ value_range n_vr0 = VR_INITIALIZER;
+
+ /* Try with [-INF, OP0] and VR1. */
+ if (is_gimple_min_invariant (minus_p ? vr1.max : vr1.min))
+ set_value_range (&n_vr0, VR_RANGE, vrp_val_min (expr_type), op0, NULL);
+
+ /* Try with [OP0, +INF] and VR1. */
+ else if (is_gimple_min_invariant (minus_p ? vr1.min : vr1.max))
+ set_value_range (&n_vr0, VR_RANGE, op0, vrp_val_max (expr_type), NULL);
+
+ /* Try with [OP0, OP0] and VR1. */
+ else
+ set_value_range (&n_vr0, VR_RANGE, op0, op0, NULL);
+
+ extract_range_from_binary_expr_1 (vr, code, expr_type, &n_vr0, &vr1);
+ }
+
+ /* If we didn't derive a range for MINUS_EXPR, and
+ op1's range is ~[op0,op0] or vice-versa, then we
+ can derive a non-null range. This happens often for
+ pointer subtraction. */
+ if (vr->type == VR_VARYING
+ && code == MINUS_EXPR
+ && TREE_CODE (op0) == SSA_NAME
+ && ((vr0.type == VR_ANTI_RANGE
+ && vr0.min == op1
+ && vr0.min == vr0.max)
+ || (vr1.type == VR_ANTI_RANGE
+ && vr1.min == op0
+ && vr1.min == vr1.max)))
+ set_value_range_to_nonnull (vr, TREE_TYPE (op0));
+}
+
+/* Extract range information from a unary expression CODE OP0 based on
+ the range of its operand with resulting type TYPE.
+ The resulting range is stored in *VR. */
+
+void
+vr_values::extract_range_from_unary_expr (value_range *vr, enum tree_code code,
+ tree type, tree op0)
+{
+ value_range vr0 = VR_INITIALIZER;
+
+ /* Get value ranges for the operand. For constant operands, create
+ a new value range with the operand to simplify processing. */
+ if (TREE_CODE (op0) == SSA_NAME)
+ vr0 = *(get_value_range (op0));
+ else if (is_gimple_min_invariant (op0))
+ set_value_range_to_value (&vr0, op0, NULL);
+ else
+ set_value_range_to_varying (&vr0);
+
+ ::extract_range_from_unary_expr (vr, code, type, &vr0, TREE_TYPE (op0));
+}
+
+
+/* Extract range information from a conditional expression STMT based on
+ the ranges of each of its operands and the expression code. */
+
+void
+vr_values::extract_range_from_cond_expr (value_range *vr, gassign *stmt)
+{
+ tree op0, op1;
+ value_range vr0 = VR_INITIALIZER;
+ value_range vr1 = VR_INITIALIZER;
+
+ /* Get value ranges for each operand. For constant operands, create
+ a new value range with the operand to simplify processing. */
+ op0 = gimple_assign_rhs2 (stmt);
+ if (TREE_CODE (op0) == SSA_NAME)
+ vr0 = *(get_value_range (op0));
+ else if (is_gimple_min_invariant (op0))
+ set_value_range_to_value (&vr0, op0, NULL);
+ else
+ set_value_range_to_varying (&vr0);
+
+ op1 = gimple_assign_rhs3 (stmt);
+ if (TREE_CODE (op1) == SSA_NAME)
+ vr1 = *(get_value_range (op1));
+ else if (is_gimple_min_invariant (op1))
+ set_value_range_to_value (&vr1, op1, NULL);
+ else
+ set_value_range_to_varying (&vr1);
+
+ /* The resulting value range is the union of the operand ranges */
+ copy_value_range (vr, &vr0);
+ vrp_meet (vr, &vr1);
+}
+
+
+/* Extract range information from a comparison expression EXPR based
+ on the range of its operand and the expression code. */
+
+void
+vr_values::extract_range_from_comparison (value_range *vr, enum tree_code code,
+ tree type, tree op0, tree op1)
+{
+ bool sop;
+ tree val;
+
+ val = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, false, &sop,
+ NULL);
+ if (val)
+ {
+ /* Since this expression was found on the RHS of an assignment,
+ its type may be different from _Bool. Convert VAL to EXPR's
+ type. */
+ val = fold_convert (type, val);
+ if (is_gimple_min_invariant (val))
+ set_value_range_to_value (vr, val, vr->equiv);
+ else
+ set_value_range (vr, VR_RANGE, val, val, vr->equiv);
+ }
+ else
+ /* The result of a comparison is always true or false. */
+ set_value_range_to_truthvalue (vr, type);
+}
+
+/* Helper function for simplify_internal_call_using_ranges and
+ extract_range_basic. Return true if OP0 SUBCODE OP1 for
+ SUBCODE {PLUS,MINUS,MULT}_EXPR is known to never overflow or
+ always overflow. Set *OVF to true if it is known to always
+ overflow. */
+
+bool
+vr_values::check_for_binary_op_overflow (enum tree_code subcode, tree type,
+ tree op0, tree op1, bool *ovf)
+{
+ value_range vr0 = VR_INITIALIZER;
+ value_range vr1 = VR_INITIALIZER;
+ if (TREE_CODE (op0) == SSA_NAME)
+ vr0 = *get_value_range (op0);
+ else if (TREE_CODE (op0) == INTEGER_CST)
+ set_value_range_to_value (&vr0, op0, NULL);
+ else
+ set_value_range_to_varying (&vr0);
+
+ if (TREE_CODE (op1) == SSA_NAME)
+ vr1 = *get_value_range (op1);
+ else if (TREE_CODE (op1) == INTEGER_CST)
+ set_value_range_to_value (&vr1, op1, NULL);
+ else
+ set_value_range_to_varying (&vr1);
+
+ if (!range_int_cst_p (&vr0)
+ || TREE_OVERFLOW (vr0.min)
+ || TREE_OVERFLOW (vr0.max))
+ {
+ vr0.min = vrp_val_min (TREE_TYPE (op0));
+ vr0.max = vrp_val_max (TREE_TYPE (op0));
+ }
+ if (!range_int_cst_p (&vr1)
+ || TREE_OVERFLOW (vr1.min)
+ || TREE_OVERFLOW (vr1.max))
+ {
+ vr1.min = vrp_val_min (TREE_TYPE (op1));
+ vr1.max = vrp_val_max (TREE_TYPE (op1));
+ }
+ *ovf = arith_overflowed_p (subcode, type, vr0.min,
+ subcode == MINUS_EXPR ? vr1.max : vr1.min);
+ if (arith_overflowed_p (subcode, type, vr0.max,
+ subcode == MINUS_EXPR ? vr1.min : vr1.max) != *ovf)
+ return false;
+ if (subcode == MULT_EXPR)
+ {
+ if (arith_overflowed_p (subcode, type, vr0.min, vr1.max) != *ovf
+ || arith_overflowed_p (subcode, type, vr0.max, vr1.min) != *ovf)
+ return false;
+ }
+ if (*ovf)
+ {
+ /* So far we found that there is an overflow on the boundaries.
+ That doesn't prove that there is an overflow even for all values
+ in between the boundaries. For that compute widest_int range
+ of the result and see if it doesn't overlap the range of
+ type. */
+ widest_int wmin, wmax;
+ widest_int w[4];
+ int i;
+ w[0] = wi::to_widest (vr0.min);
+ w[1] = wi::to_widest (vr0.max);
+ w[2] = wi::to_widest (vr1.min);
+ w[3] = wi::to_widest (vr1.max);
+ for (i = 0; i < 4; i++)
+ {
+ widest_int wt;
+ switch (subcode)
+ {
+ case PLUS_EXPR:
+ wt = wi::add (w[i & 1], w[2 + (i & 2) / 2]);
+ break;
+ case MINUS_EXPR:
+ wt = wi::sub (w[i & 1], w[2 + (i & 2) / 2]);
+ break;
+ case MULT_EXPR:
+ wt = wi::mul (w[i & 1], w[2 + (i & 2) / 2]);
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ if (i == 0)
+ {
+ wmin = wt;
+ wmax = wt;
+ }
+ else
+ {
+ wmin = wi::smin (wmin, wt);
+ wmax = wi::smax (wmax, wt);
+ }
+ }
+ /* The result of op0 CODE op1 is known to be in range
+ [wmin, wmax]. */
+ widest_int wtmin = wi::to_widest (vrp_val_min (type));
+ widest_int wtmax = wi::to_widest (vrp_val_max (type));
+ /* If all values in [wmin, wmax] are smaller than
+ [wtmin, wtmax] or all are larger than [wtmin, wtmax],
+ the arithmetic operation will always overflow. */
+ if (wmax < wtmin || wmin > wtmax)
+ return true;
+ return false;
+ }
+ return true;
+}
+
+/* Try to derive a nonnegative or nonzero range out of STMT relying
+ primarily on generic routines in fold in conjunction with range data.
+ Store the result in *VR */
+
+void
+vr_values::extract_range_basic (value_range *vr, gimple *stmt)
+{
+ bool sop;
+ tree type = gimple_expr_type (stmt);
+
+ if (is_gimple_call (stmt))
+ {
+ tree arg;
+ int mini, maxi, zerov = 0, prec;
+ enum tree_code subcode = ERROR_MARK;
+ combined_fn cfn = gimple_call_combined_fn (stmt);
+ scalar_int_mode mode;
+
+ switch (cfn)
+ {
+ case CFN_BUILT_IN_CONSTANT_P:
+ /* If the call is __builtin_constant_p and the argument is a
+ function parameter resolve it to false. This avoids bogus
+ array bound warnings.
+ ??? We could do this as early as inlining is finished. */
+ arg = gimple_call_arg (stmt, 0);
+ if (TREE_CODE (arg) == SSA_NAME
+ && SSA_NAME_IS_DEFAULT_DEF (arg)
+ && TREE_CODE (SSA_NAME_VAR (arg)) == PARM_DECL
+ && cfun->after_inlining)
+ {
+ set_value_range_to_null (vr, type);
+ return;
+ }
+ break;
+ /* Both __builtin_ffs* and __builtin_popcount return
+ [0, prec]. */
+ CASE_CFN_FFS:
+ CASE_CFN_POPCOUNT:
+ arg = gimple_call_arg (stmt, 0);
+ prec = TYPE_PRECISION (TREE_TYPE (arg));
+ mini = 0;
+ maxi = prec;
+ if (TREE_CODE (arg) == SSA_NAME)
+ {
+ value_range *vr0 = get_value_range (arg);
+ /* If arg is non-zero, then ffs or popcount
+ are non-zero. */
+ if ((vr0->type == VR_RANGE
+ && range_includes_zero_p (vr0->min, vr0->max) == 0)
+ || (vr0->type == VR_ANTI_RANGE
+ && range_includes_zero_p (vr0->min, vr0->max) == 1))
+ mini = 1;
+ /* If some high bits are known to be zero,
+ we can decrease the maximum. */
+ if (vr0->type == VR_RANGE
+ && TREE_CODE (vr0->max) == INTEGER_CST
+ && !operand_less_p (vr0->min,
+ build_zero_cst (TREE_TYPE (vr0->min))))
+ maxi = tree_floor_log2 (vr0->max) + 1;
+ }
+ goto bitop_builtin;
+ /* __builtin_parity* returns [0, 1]. */
+ CASE_CFN_PARITY:
+ mini = 0;
+ maxi = 1;
+ goto bitop_builtin;
+ /* __builtin_c[lt]z* return [0, prec-1], except for
+ when the argument is 0, but that is undefined behavior.
+ On many targets where the CLZ RTL or optab value is defined
+ for 0 the value is prec, so include that in the range
+ by default. */
+ CASE_CFN_CLZ:
+ arg = gimple_call_arg (stmt, 0);
+ prec = TYPE_PRECISION (TREE_TYPE (arg));
+ mini = 0;
+ maxi = prec;
+ mode = SCALAR_INT_TYPE_MODE (TREE_TYPE (arg));
+ if (optab_handler (clz_optab, mode) != CODE_FOR_nothing
+ && CLZ_DEFINED_VALUE_AT_ZERO (mode, zerov)
+ /* Handle only the single common value. */
+ && zerov != prec)
+ /* Magic value to give up, unless vr0 proves
+ arg is non-zero. */
+ mini = -2;
+ if (TREE_CODE (arg) == SSA_NAME)
+ {
+ value_range *vr0 = get_value_range (arg);
+ /* From clz of VR_RANGE minimum we can compute
+ result maximum. */
+ if (vr0->type == VR_RANGE
+ && TREE_CODE (vr0->min) == INTEGER_CST)
+ {
+ maxi = prec - 1 - tree_floor_log2 (vr0->min);
+ if (maxi != prec)
+ mini = 0;
+ }
+ else if (vr0->type == VR_ANTI_RANGE
+ && integer_zerop (vr0->min))
+ {
+ maxi = prec - 1;
+ mini = 0;
+ }
+ if (mini == -2)
+ break;
+ /* From clz of VR_RANGE maximum we can compute
+ result minimum. */
+ if (vr0->type == VR_RANGE
+ && TREE_CODE (vr0->max) == INTEGER_CST)
+ {
+ mini = prec - 1 - tree_floor_log2 (vr0->max);
+ if (mini == prec)
+ break;
+ }
+ }
+ if (mini == -2)
+ break;
+ goto bitop_builtin;
+ /* __builtin_ctz* return [0, prec-1], except for
+ when the argument is 0, but that is undefined behavior.
+ If there is a ctz optab for this mode and
+ CTZ_DEFINED_VALUE_AT_ZERO, include that in the range,
+ otherwise just assume 0 won't be seen. */
+ CASE_CFN_CTZ:
+ arg = gimple_call_arg (stmt, 0);
+ prec = TYPE_PRECISION (TREE_TYPE (arg));
+ mini = 0;
+ maxi = prec - 1;
+ mode = SCALAR_INT_TYPE_MODE (TREE_TYPE (arg));
+ if (optab_handler (ctz_optab, mode) != CODE_FOR_nothing
+ && CTZ_DEFINED_VALUE_AT_ZERO (mode, zerov))
+ {
+ /* Handle only the two common values. */
+ if (zerov == -1)
+ mini = -1;
+ else if (zerov == prec)
+ maxi = prec;
+ else
+ /* Magic value to give up, unless vr0 proves
+ arg is non-zero. */
+ mini = -2;
+ }
+ if (TREE_CODE (arg) == SSA_NAME)
+ {
+ value_range *vr0 = get_value_range (arg);
+ /* If arg is non-zero, then use [0, prec - 1]. */
+ if ((vr0->type == VR_RANGE
+ && integer_nonzerop (vr0->min))
+ || (vr0->type == VR_ANTI_RANGE
+ && integer_zerop (vr0->min)))
+ {
+ mini = 0;
+ maxi = prec - 1;
+ }
+ /* If some high bits are known to be zero,
+ we can decrease the result maximum. */
+ if (vr0->type == VR_RANGE
+ && TREE_CODE (vr0->max) == INTEGER_CST)
+ {
+ maxi = tree_floor_log2 (vr0->max);
+ /* For vr0 [0, 0] give up. */
+ if (maxi == -1)
+ break;
+ }
+ }
+ if (mini == -2)
+ break;
+ goto bitop_builtin;
+ /* __builtin_clrsb* returns [0, prec-1]. */
+ CASE_CFN_CLRSB:
+ arg = gimple_call_arg (stmt, 0);
+ prec = TYPE_PRECISION (TREE_TYPE (arg));
+ mini = 0;
+ maxi = prec - 1;
+ goto bitop_builtin;
+ bitop_builtin:
+ set_value_range (vr, VR_RANGE, build_int_cst (type, mini),
+ build_int_cst (type, maxi), NULL);
+ return;
+ case CFN_UBSAN_CHECK_ADD:
+ subcode = PLUS_EXPR;
+ break;
+ case CFN_UBSAN_CHECK_SUB:
+ subcode = MINUS_EXPR;
+ break;
+ case CFN_UBSAN_CHECK_MUL:
+ subcode = MULT_EXPR;
+ break;
+ case CFN_GOACC_DIM_SIZE:
+ case CFN_GOACC_DIM_POS:
+ /* Optimizing these two internal functions helps the loop
+ optimizer eliminate outer comparisons. Size is [1,N]
+ and pos is [0,N-1]. */
+ {
+ bool is_pos = cfn == CFN_GOACC_DIM_POS;
+ int axis = oacc_get_ifn_dim_arg (stmt);
+ int size = oacc_get_fn_dim_size (current_function_decl, axis);
+
+ if (!size)
+ /* If it's dynamic, the backend might know a hardware
+ limitation. */
+ size = targetm.goacc.dim_limit (axis);
+
+ tree type = TREE_TYPE (gimple_call_lhs (stmt));
+ set_value_range (vr, VR_RANGE,
+ build_int_cst (type, is_pos ? 0 : 1),
+ size ? build_int_cst (type, size - is_pos)
+ : vrp_val_max (type), NULL);
+ }
+ return;
+ case CFN_BUILT_IN_STRLEN:
+ if (tree lhs = gimple_call_lhs (stmt))
+ if (ptrdiff_type_node
+ && (TYPE_PRECISION (ptrdiff_type_node)
+ == TYPE_PRECISION (TREE_TYPE (lhs))))
+ {
+ tree type = TREE_TYPE (lhs);
+ tree max = vrp_val_max (ptrdiff_type_node);
+ wide_int wmax = wi::to_wide (max, TYPE_PRECISION (TREE_TYPE (max)));
+ tree range_min = build_zero_cst (type);
+ tree range_max = wide_int_to_tree (type, wmax - 1);
+ set_value_range (vr, VR_RANGE, range_min, range_max, NULL);
+ return;
+ }
+ break;
+ default:
+ break;
+ }
+ if (subcode != ERROR_MARK)
+ {
+ bool saved_flag_wrapv = flag_wrapv;
+ /* Pretend the arithmetics is wrapping. If there is
+ any overflow, we'll complain, but will actually do
+ wrapping operation. */
+ flag_wrapv = 1;
+ extract_range_from_binary_expr (vr, subcode, type,
+ gimple_call_arg (stmt, 0),
+ gimple_call_arg (stmt, 1));
+ flag_wrapv = saved_flag_wrapv;
+
+ /* If for both arguments vrp_valueize returned non-NULL,
+ this should have been already folded and if not, it
+ wasn't folded because of overflow. Avoid removing the
+ UBSAN_CHECK_* calls in that case. */
+ if (vr->type == VR_RANGE
+ && (vr->min == vr->max
+ || operand_equal_p (vr->min, vr->max, 0)))
+ set_value_range_to_varying (vr);
+ return;
+ }
+ }
+ /* Handle extraction of the two results (result of arithmetics and
+ a flag whether arithmetics overflowed) from {ADD,SUB,MUL}_OVERFLOW
+ internal function. Similarly from ATOMIC_COMPARE_EXCHANGE. */
+ else if (is_gimple_assign (stmt)
+ && (gimple_assign_rhs_code (stmt) == REALPART_EXPR
+ || gimple_assign_rhs_code (stmt) == IMAGPART_EXPR)
+ && INTEGRAL_TYPE_P (type))
+ {
+ enum tree_code code = gimple_assign_rhs_code (stmt);
+ tree op = gimple_assign_rhs1 (stmt);
+ if (TREE_CODE (op) == code && TREE_CODE (TREE_OPERAND (op, 0)) == SSA_NAME)
+ {
+ gimple *g = SSA_NAME_DEF_STMT (TREE_OPERAND (op, 0));
+ if (is_gimple_call (g) && gimple_call_internal_p (g))
+ {
+ enum tree_code subcode = ERROR_MARK;
+ switch (gimple_call_internal_fn (g))
+ {
+ case IFN_ADD_OVERFLOW:
+ subcode = PLUS_EXPR;
+ break;
+ case IFN_SUB_OVERFLOW:
+ subcode = MINUS_EXPR;
+ break;
+ case IFN_MUL_OVERFLOW:
+ subcode = MULT_EXPR;
+ break;
+ case IFN_ATOMIC_COMPARE_EXCHANGE:
+ if (code == IMAGPART_EXPR)
+ {
+ /* This is the boolean return value whether compare and
+ exchange changed anything or not. */
+ set_value_range (vr, VR_RANGE, build_int_cst (type, 0),
+ build_int_cst (type, 1), NULL);
+ return;
+ }
+ break;
+ default:
+ break;
+ }
+ if (subcode != ERROR_MARK)
+ {
+ tree op0 = gimple_call_arg (g, 0);
+ tree op1 = gimple_call_arg (g, 1);
+ if (code == IMAGPART_EXPR)
+ {
+ bool ovf = false;
+ if (check_for_binary_op_overflow (subcode, type,
+ op0, op1, &ovf))
+ set_value_range_to_value (vr,
+ build_int_cst (type, ovf),
+ NULL);
+ else if (TYPE_PRECISION (type) == 1
+ && !TYPE_UNSIGNED (type))
+ set_value_range_to_varying (vr);
+ else
+ set_value_range (vr, VR_RANGE, build_int_cst (type, 0),
+ build_int_cst (type, 1), NULL);
+ }
+ else if (types_compatible_p (type, TREE_TYPE (op0))
+ && types_compatible_p (type, TREE_TYPE (op1)))
+ {
+ bool saved_flag_wrapv = flag_wrapv;
+ /* Pretend the arithmetics is wrapping. If there is
+ any overflow, IMAGPART_EXPR will be set. */
+ flag_wrapv = 1;
+ extract_range_from_binary_expr (vr, subcode, type,
+ op0, op1);
+ flag_wrapv = saved_flag_wrapv;
+ }
+ else
+ {
+ value_range vr0 = VR_INITIALIZER;
+ value_range vr1 = VR_INITIALIZER;
+ bool saved_flag_wrapv = flag_wrapv;
+ /* Pretend the arithmetics is wrapping. If there is
+ any overflow, IMAGPART_EXPR will be set. */
+ flag_wrapv = 1;
+ extract_range_from_unary_expr (&vr0, NOP_EXPR,
+ type, op0);
+ extract_range_from_unary_expr (&vr1, NOP_EXPR,
+ type, op1);
+ extract_range_from_binary_expr_1 (vr, subcode, type,
+ &vr0, &vr1);
+ flag_wrapv = saved_flag_wrapv;
+ }
+ return;
+ }
+ }
+ }
+ }
+ if (INTEGRAL_TYPE_P (type)
+ && gimple_stmt_nonnegative_warnv_p (stmt, &sop))
+ set_value_range_to_nonnegative (vr, type);
+ else if (vrp_stmt_computes_nonzero (stmt))
+ set_value_range_to_nonnull (vr, type);
+ else
+ set_value_range_to_varying (vr);
+}
+
+
+/* Try to compute a useful range out of assignment STMT and store it
+ in *VR. */
+
+void
+vr_values::extract_range_from_assignment (value_range *vr, gassign *stmt)
+{
+ enum tree_code code = gimple_assign_rhs_code (stmt);
+
+ if (code == ASSERT_EXPR)
+ extract_range_from_assert (vr, gimple_assign_rhs1 (stmt));
+ else if (code == SSA_NAME)
+ extract_range_from_ssa_name (vr, gimple_assign_rhs1 (stmt));
+ else if (TREE_CODE_CLASS (code) == tcc_binary)
+ extract_range_from_binary_expr (vr, gimple_assign_rhs_code (stmt),
+ gimple_expr_type (stmt),
+ gimple_assign_rhs1 (stmt),
+ gimple_assign_rhs2 (stmt));
+ else if (TREE_CODE_CLASS (code) == tcc_unary)
+ extract_range_from_unary_expr (vr, gimple_assign_rhs_code (stmt),
+ gimple_expr_type (stmt),
+ gimple_assign_rhs1 (stmt));
+ else if (code == COND_EXPR)
+ extract_range_from_cond_expr (vr, stmt);
+ else if (TREE_CODE_CLASS (code) == tcc_comparison)
+ extract_range_from_comparison (vr, gimple_assign_rhs_code (stmt),
+ gimple_expr_type (stmt),
+ gimple_assign_rhs1 (stmt),
+ gimple_assign_rhs2 (stmt));
+ else if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS
+ && is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
+ set_value_range_to_value (vr, gimple_assign_rhs1 (stmt), NULL);
+ else
+ set_value_range_to_varying (vr);
+
+ if (vr->type == VR_VARYING)
+ extract_range_basic (vr, stmt);
+}
+
+/* Given two numeric value ranges VR0, VR1 and a comparison code COMP:
+
+ - Return BOOLEAN_TRUE_NODE if VR0 COMP VR1 always returns true for
+ all the values in the ranges.
+
+ - Return BOOLEAN_FALSE_NODE if the comparison always returns false.
+
+ - Return NULL_TREE if it is not always possible to determine the
+ value of the comparison.
+
+ Also set *STRICT_OVERFLOW_P to indicate whether comparision evaluation
+ assumed signed overflow is undefined. */
+
+
+static tree
+compare_ranges (enum tree_code comp, value_range *vr0, value_range *vr1,
+ bool *strict_overflow_p)
+{
+ /* VARYING or UNDEFINED ranges cannot be compared. */
+ if (vr0->type == VR_VARYING
+ || vr0->type == VR_UNDEFINED
+ || vr1->type == VR_VARYING
+ || vr1->type == VR_UNDEFINED)
+ return NULL_TREE;
+
+ /* Anti-ranges need to be handled separately. */
+ if (vr0->type == VR_ANTI_RANGE || vr1->type == VR_ANTI_RANGE)
+ {
+ /* If both are anti-ranges, then we cannot compute any
+ comparison. */
+ if (vr0->type == VR_ANTI_RANGE && vr1->type == VR_ANTI_RANGE)
+ return NULL_TREE;
+
+ /* These comparisons are never statically computable. */
+ if (comp == GT_EXPR
+ || comp == GE_EXPR
+ || comp == LT_EXPR
+ || comp == LE_EXPR)
+ return NULL_TREE;
+
+ /* Equality can be computed only between a range and an
+ anti-range. ~[VAL1, VAL2] == [VAL1, VAL2] is always false. */
+ if (vr0->type == VR_RANGE)
+ {
+ /* To simplify processing, make VR0 the anti-range. */
+ value_range *tmp = vr0;
+ vr0 = vr1;
+ vr1 = tmp;
+ }
+
+ gcc_assert (comp == NE_EXPR || comp == EQ_EXPR);
+
+ if (compare_values_warnv (vr0->min, vr1->min, strict_overflow_p) == 0
+ && compare_values_warnv (vr0->max, vr1->max, strict_overflow_p) == 0)
+ return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node;
+
+ return NULL_TREE;
+ }
+
+ /* Simplify processing. If COMP is GT_EXPR or GE_EXPR, switch the
+ operands around and change the comparison code. */
+ if (comp == GT_EXPR || comp == GE_EXPR)
+ {
+ comp = (comp == GT_EXPR) ? LT_EXPR : LE_EXPR;
+ std::swap (vr0, vr1);
+ }
+
+ if (comp == EQ_EXPR)
+ {
+ /* Equality may only be computed if both ranges represent
+ exactly one value. */
+ if (compare_values_warnv (vr0->min, vr0->max, strict_overflow_p) == 0
+ && compare_values_warnv (vr1->min, vr1->max, strict_overflow_p) == 0)
+ {
+ int cmp_min = compare_values_warnv (vr0->min, vr1->min,
+ strict_overflow_p);
+ int cmp_max = compare_values_warnv (vr0->max, vr1->max,
+ strict_overflow_p);
+ if (cmp_min == 0 && cmp_max == 0)
+ return boolean_true_node;
+ else if (cmp_min != -2 && cmp_max != -2)
+ return boolean_false_node;
+ }
+ /* If [V0_MIN, V1_MAX] < [V1_MIN, V1_MAX] then V0 != V1. */
+ else if (compare_values_warnv (vr0->min, vr1->max,
+ strict_overflow_p) == 1
+ || compare_values_warnv (vr1->min, vr0->max,
+ strict_overflow_p) == 1)
+ return boolean_false_node;
+
+ return NULL_TREE;
+ }
+ else if (comp == NE_EXPR)
+ {
+ int cmp1, cmp2;
+
+ /* If VR0 is completely to the left or completely to the right
+ of VR1, they are always different. Notice that we need to
+ make sure that both comparisons yield similar results to
+ avoid comparing values that cannot be compared at
+ compile-time. */
+ cmp1 = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p);
+ cmp2 = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p);
+ if ((cmp1 == -1 && cmp2 == -1) || (cmp1 == 1 && cmp2 == 1))
+ return boolean_true_node;
+
+ /* If VR0 and VR1 represent a single value and are identical,
+ return false. */
+ else if (compare_values_warnv (vr0->min, vr0->max,
+ strict_overflow_p) == 0
+ && compare_values_warnv (vr1->min, vr1->max,
+ strict_overflow_p) == 0
+ && compare_values_warnv (vr0->min, vr1->min,
+ strict_overflow_p) == 0
+ && compare_values_warnv (vr0->max, vr1->max,
+ strict_overflow_p) == 0)
+ return boolean_false_node;
+
+ /* Otherwise, they may or may not be different. */
+ else
+ return NULL_TREE;
+ }
+ else if (comp == LT_EXPR || comp == LE_EXPR)
+ {
+ int tst;
+
+ /* If VR0 is to the left of VR1, return true. */
+ tst = compare_values_warnv (vr0->max, vr1->min, strict_overflow_p);
+ if ((comp == LT_EXPR && tst == -1)
+ || (comp == LE_EXPR && (tst == -1 || tst == 0)))
+ return boolean_true_node;
+
+ /* If VR0 is to the right of VR1, return false. */
+ tst = compare_values_warnv (vr0->min, vr1->max, strict_overflow_p);
+ if ((comp == LT_EXPR && (tst == 0 || tst == 1))
+ || (comp == LE_EXPR && tst == 1))
+ return boolean_false_node;
+
+ /* Otherwise, we don't know. */
+ return NULL_TREE;
+ }
+
+ gcc_unreachable ();
+}
+
+/* Given a value range VR, a value VAL and a comparison code COMP, return
+ BOOLEAN_TRUE_NODE if VR COMP VAL always returns true for all the
+ values in VR. Return BOOLEAN_FALSE_NODE if the comparison
+ always returns false. Return NULL_TREE if it is not always
+ possible to determine the value of the comparison. Also set
+ *STRICT_OVERFLOW_P to indicate whether comparision evaluation
+ assumed signed overflow is undefined. */
+
+static tree
+compare_range_with_value (enum tree_code comp, value_range *vr, tree val,
+ bool *strict_overflow_p)
+{
+ if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED)
+ return NULL_TREE;
+
+ /* Anti-ranges need to be handled separately. */
+ if (vr->type == VR_ANTI_RANGE)
+ {
+ /* For anti-ranges, the only predicates that we can compute at
+ compile time are equality and inequality. */
+ if (comp == GT_EXPR
+ || comp == GE_EXPR
+ || comp == LT_EXPR
+ || comp == LE_EXPR)
+ return NULL_TREE;
+
+ /* ~[VAL_1, VAL_2] OP VAL is known if VAL_1 <= VAL <= VAL_2. */
+ if (value_inside_range (val, vr->min, vr->max) == 1)
+ return (comp == NE_EXPR) ? boolean_true_node : boolean_false_node;
+
+ return NULL_TREE;
+ }
+
+ if (comp == EQ_EXPR)
+ {
+ /* EQ_EXPR may only be computed if VR represents exactly
+ one value. */
+ if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0)
+ {
+ int cmp = compare_values_warnv (vr->min, val, strict_overflow_p);
+ if (cmp == 0)
+ return boolean_true_node;
+ else if (cmp == -1 || cmp == 1 || cmp == 2)
+ return boolean_false_node;
+ }
+ else if (compare_values_warnv (val, vr->min, strict_overflow_p) == -1
+ || compare_values_warnv (vr->max, val, strict_overflow_p) == -1)
+ return boolean_false_node;
+
+ return NULL_TREE;
+ }
+ else if (comp == NE_EXPR)
+ {
+ /* If VAL is not inside VR, then they are always different. */
+ if (compare_values_warnv (vr->max, val, strict_overflow_p) == -1
+ || compare_values_warnv (vr->min, val, strict_overflow_p) == 1)
+ return boolean_true_node;
+
+ /* If VR represents exactly one value equal to VAL, then return
+ false. */
+ if (compare_values_warnv (vr->min, vr->max, strict_overflow_p) == 0
+ && compare_values_warnv (vr->min, val, strict_overflow_p) == 0)
+ return boolean_false_node;
+
+ /* Otherwise, they may or may not be different. */
+ return NULL_TREE;
+ }
+ else if (comp == LT_EXPR || comp == LE_EXPR)
+ {
+ int tst;
+
+ /* If VR is to the left of VAL, return true. */
+ tst = compare_values_warnv (vr->max, val, strict_overflow_p);
+ if ((comp == LT_EXPR && tst == -1)
+ || (comp == LE_EXPR && (tst == -1 || tst == 0)))
+ return boolean_true_node;
+
+ /* If VR is to the right of VAL, return false. */
+ tst = compare_values_warnv (vr->min, val, strict_overflow_p);
+ if ((comp == LT_EXPR && (tst == 0 || tst == 1))
+ || (comp == LE_EXPR && tst == 1))
+ return boolean_false_node;
+
+ /* Otherwise, we don't know. */
+ return NULL_TREE;
+ }
+ else if (comp == GT_EXPR || comp == GE_EXPR)
+ {
+ int tst;
+
+ /* If VR is to the right of VAL, return true. */
+ tst = compare_values_warnv (vr->min, val, strict_overflow_p);
+ if ((comp == GT_EXPR && tst == 1)
+ || (comp == GE_EXPR && (tst == 0 || tst == 1)))
+ return boolean_true_node;
+
+ /* If VR is to the left of VAL, return false. */
+ tst = compare_values_warnv (vr->max, val, strict_overflow_p);
+ if ((comp == GT_EXPR && (tst == -1 || tst == 0))
+ || (comp == GE_EXPR && tst == -1))
+ return boolean_false_node;
+
+ /* Otherwise, we don't know. */
+ return NULL_TREE;
+ }
+
+ gcc_unreachable ();
+}
+/* Given a range VR, a LOOP and a variable VAR, determine whether it
+ would be profitable to adjust VR using scalar evolution information
+ for VAR. If so, update VR with the new limits. */
+
+void
+vr_values::adjust_range_with_scev (value_range *vr, struct loop *loop,
+ gimple *stmt, tree var)
+{
+ tree init, step, chrec, tmin, tmax, min, max, type, tem;
+ enum ev_direction dir;
+
+ /* TODO. Don't adjust anti-ranges. An anti-range may provide
+ better opportunities than a regular range, but I'm not sure. */
+ if (vr->type == VR_ANTI_RANGE)
+ return;
+
+ chrec = instantiate_parameters (loop, analyze_scalar_evolution (loop, var));
+
+ /* Like in PR19590, scev can return a constant function. */
+ if (is_gimple_min_invariant (chrec))
+ {
+ set_value_range_to_value (vr, chrec, vr->equiv);
+ return;
+ }
+
+ if (TREE_CODE (chrec) != POLYNOMIAL_CHREC)
+ return;
+
+ init = initial_condition_in_loop_num (chrec, loop->num);
+ tem = op_with_constant_singleton_value_range (init);
+ if (tem)
+ init = tem;
+ step = evolution_part_in_loop_num (chrec, loop->num);
+ tem = op_with_constant_singleton_value_range (step);
+ if (tem)
+ step = tem;
+
+ /* If STEP is symbolic, we can't know whether INIT will be the
+ minimum or maximum value in the range. Also, unless INIT is
+ a simple expression, compare_values and possibly other functions
+ in tree-vrp won't be able to handle it. */
+ if (step == NULL_TREE
+ || !is_gimple_min_invariant (step)
+ || !valid_value_p (init))
+ return;
+
+ dir = scev_direction (chrec);
+ if (/* Do not adjust ranges if we do not know whether the iv increases
+ or decreases, ... */
+ dir == EV_DIR_UNKNOWN
+ /* ... or if it may wrap. */
+ || scev_probably_wraps_p (NULL_TREE, init, step, stmt,
+ get_chrec_loop (chrec), true))
+ return;
+
+ type = TREE_TYPE (var);
+ if (POINTER_TYPE_P (type) || !TYPE_MIN_VALUE (type))
+ tmin = lower_bound_in_type (type, type);
+ else
+ tmin = TYPE_MIN_VALUE (type);
+ if (POINTER_TYPE_P (type) || !TYPE_MAX_VALUE (type))
+ tmax = upper_bound_in_type (type, type);
+ else
+ tmax = TYPE_MAX_VALUE (type);
+
+ /* Try to use estimated number of iterations for the loop to constrain the
+ final value in the evolution. */
+ if (TREE_CODE (step) == INTEGER_CST
+ && is_gimple_val (init)
+ && (TREE_CODE (init) != SSA_NAME
+ || get_value_range (init)->type == VR_RANGE))
+ {
+ widest_int nit;
+
+ /* We are only entering here for loop header PHI nodes, so using
+ the number of latch executions is the correct thing to use. */
+ if (max_loop_iterations (loop, &nit))
+ {
+ value_range maxvr = VR_INITIALIZER;
+ signop sgn = TYPE_SIGN (TREE_TYPE (step));
+ bool overflow;
+
+ widest_int wtmp = wi::mul (wi::to_widest (step), nit, sgn,
+ &overflow);
+ /* If the multiplication overflowed we can't do a meaningful
+ adjustment. Likewise if the result doesn't fit in the type
+ of the induction variable. For a signed type we have to
+ check whether the result has the expected signedness which
+ is that of the step as number of iterations is unsigned. */
+ if (!overflow
+ && wi::fits_to_tree_p (wtmp, TREE_TYPE (init))
+ && (sgn == UNSIGNED
+ || wi::gts_p (wtmp, 0) == wi::gts_p (wi::to_wide (step), 0)))
+ {
+ tem = wide_int_to_tree (TREE_TYPE (init), wtmp);
+ extract_range_from_binary_expr (&maxvr, PLUS_EXPR,
+ TREE_TYPE (init), init, tem);
+ /* Likewise if the addition did. */
+ if (maxvr.type == VR_RANGE)
+ {
+ value_range initvr = VR_INITIALIZER;
+
+ if (TREE_CODE (init) == SSA_NAME)
+ initvr = *(get_value_range (init));
+ else if (is_gimple_min_invariant (init))
+ set_value_range_to_value (&initvr, init, NULL);
+ else
+ return;
+
+ /* Check if init + nit * step overflows. Though we checked
+ scev {init, step}_loop doesn't wrap, it is not enough
+ because the loop may exit immediately. Overflow could
+ happen in the plus expression in this case. */
+ if ((dir == EV_DIR_DECREASES
+ && compare_values (maxvr.min, initvr.min) != -1)
+ || (dir == EV_DIR_GROWS
+ && compare_values (maxvr.max, initvr.max) != 1))
+ return;
+
+ tmin = maxvr.min;
+ tmax = maxvr.max;
+ }
+ }
+ }
+ }
+
+ if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED)
+ {
+ min = tmin;
+ max = tmax;
+
+ /* For VARYING or UNDEFINED ranges, just about anything we get
+ from scalar evolutions should be better. */
+
+ if (dir == EV_DIR_DECREASES)
+ max = init;
+ else
+ min = init;
+ }
+ else if (vr->type == VR_RANGE)
+ {
+ min = vr->min;
+ max = vr->max;
+
+ if (dir == EV_DIR_DECREASES)
+ {
+ /* INIT is the maximum value. If INIT is lower than VR->MAX
+ but no smaller than VR->MIN, set VR->MAX to INIT. */
+ if (compare_values (init, max) == -1)
+ max = init;
+
+ /* According to the loop information, the variable does not
+ overflow. */
+ if (compare_values (min, tmin) == -1)
+ min = tmin;
+
+ }
+ else
+ {
+ /* If INIT is bigger than VR->MIN, set VR->MIN to INIT. */
+ if (compare_values (init, min) == 1)
+ min = init;
+
+ if (compare_values (tmax, max) == -1)
+ max = tmax;
+ }
+ }
+ else
+ return;
+
+ /* If we just created an invalid range with the minimum
+ greater than the maximum, we fail conservatively.
+ This should happen only in unreachable
+ parts of code, or for invalid programs. */
+ if (compare_values (min, max) == 1)
+ return;
+
+ /* Even for valid range info, sometimes overflow flag will leak in.
+ As GIMPLE IL should have no constants with TREE_OVERFLOW set, we
+ drop them. */
+ if (TREE_OVERFLOW_P (min))
+ min = drop_tree_overflow (min);
+ if (TREE_OVERFLOW_P (max))
+ max = drop_tree_overflow (max);
+
+ set_value_range (vr, VR_RANGE, min, max, vr->equiv);
+}
+
+/* Dump value ranges of all SSA_NAMEs to FILE. */
+
+void
+vr_values::dump_all_value_ranges (FILE *file)
+{
+ size_t i;
+
+ for (i = 0; i < num_vr_values; i++)
+ {
+ if (vr_value[i])
+ {
+ print_generic_expr (file, ssa_name (i));
+ fprintf (file, ": ");
+ dump_value_range (file, vr_value[i]);
+ fprintf (file, "\n");
+ }
+ }
+
+ fprintf (file, "\n");
+}
+
+/* Initialize VRP lattice. */
+
+vr_values::vr_values () : vrp_value_range_pool ("Tree VRP value ranges")
+{
+ values_propagated = false;
+ num_vr_values = num_ssa_names;
+ vr_value = XCNEWVEC (value_range *, num_vr_values);
+ vr_phi_edge_counts = XCNEWVEC (int, num_ssa_names);
+ bitmap_obstack_initialize (&vrp_equiv_obstack);
+}
+
+/* Free VRP lattice. */
+
+vr_values::~vr_values ()
+{
+ /* Free allocated memory. */
+ free (vr_value);
+ free (vr_phi_edge_counts);
+ bitmap_obstack_release (&vrp_equiv_obstack);
+ vrp_value_range_pool.release ();
+
+ /* So that we can distinguish between VRP data being available
+ and not available. */
+ vr_value = NULL;
+ vr_phi_edge_counts = NULL;
+}
+
+
+/* A hack. */
+static class vr_values *x_vr_values;
+
+/* Return the singleton value-range for NAME or NAME. */
+
+static inline tree
+vrp_valueize (tree name)
+{
+ if (TREE_CODE (name) == SSA_NAME)
+ {
+ value_range *vr = x_vr_values->get_value_range (name);
+ if (vr->type == VR_RANGE
+ && (TREE_CODE (vr->min) == SSA_NAME
+ || is_gimple_min_invariant (vr->min))
+ && vrp_operand_equal_p (vr->min, vr->max))
+ return vr->min;
+ }
+ return name;
+}
+
+/* Return the singleton value-range for NAME if that is a constant
+ but signal to not follow SSA edges. */
+
+static inline tree
+vrp_valueize_1 (tree name)
+{
+ if (TREE_CODE (name) == SSA_NAME)
+ {
+ /* If the definition may be simulated again we cannot follow
+ this SSA edge as the SSA propagator does not necessarily
+ re-visit the use. */
+ gimple *def_stmt = SSA_NAME_DEF_STMT (name);
+ if (!gimple_nop_p (def_stmt)
+ && prop_simulate_again_p (def_stmt))
+ return NULL_TREE;
+ value_range *vr = x_vr_values->get_value_range (name);
+ if (range_int_cst_singleton_p (vr))
+ return vr->min;
+ }
+ return name;
+}
+/* Visit assignment STMT. If it produces an interesting range, record
+ the range in VR and set LHS to OUTPUT_P. */
+
+void
+vr_values::vrp_visit_assignment_or_call (gimple *stmt, tree *output_p,
+ value_range *vr)
+{
+ tree lhs;
+ enum gimple_code code = gimple_code (stmt);
+ lhs = gimple_get_lhs (stmt);
+ *output_p = NULL_TREE;
+
+ /* We only keep track of ranges in integral and pointer types. */
+ if (TREE_CODE (lhs) == SSA_NAME
+ && ((INTEGRAL_TYPE_P (TREE_TYPE (lhs))
+ /* It is valid to have NULL MIN/MAX values on a type. See
+ build_range_type. */
+ && TYPE_MIN_VALUE (TREE_TYPE (lhs))
+ && TYPE_MAX_VALUE (TREE_TYPE (lhs)))
+ || POINTER_TYPE_P (TREE_TYPE (lhs))))
+ {
+ *output_p = lhs;
+
+ /* Try folding the statement to a constant first. */
+ x_vr_values = this;
+ tree tem = gimple_fold_stmt_to_constant_1 (stmt, vrp_valueize,
+ vrp_valueize_1);
+ x_vr_values = NULL;
+ if (tem)
+ {
+ if (TREE_CODE (tem) == SSA_NAME
+ && (SSA_NAME_IS_DEFAULT_DEF (tem)
+ || ! prop_simulate_again_p (SSA_NAME_DEF_STMT (tem))))
+ {
+ extract_range_from_ssa_name (vr, tem);
+ return;
+ }
+ else if (is_gimple_min_invariant (tem))
+ {
+ set_value_range_to_value (vr, tem, NULL);
+ return;
+ }
+ }
+ /* Then dispatch to value-range extracting functions. */
+ if (code == GIMPLE_CALL)
+ extract_range_basic (vr, stmt);
+ else
+ extract_range_from_assignment (vr, as_a <gassign *> (stmt));
+ }
+}
+
+/* Helper that gets the value range of the SSA_NAME with version I
+ or a symbolic range containing the SSA_NAME only if the value range
+ is varying or undefined. */
+
+value_range
+vr_values::get_vr_for_comparison (int i)
+{
+ value_range vr = *get_value_range (ssa_name (i));
+
+ /* If name N_i does not have a valid range, use N_i as its own
+ range. This allows us to compare against names that may
+ have N_i in their ranges. */
+ if (vr.type == VR_VARYING || vr.type == VR_UNDEFINED)
+ {
+ vr.type = VR_RANGE;
+ vr.min = ssa_name (i);
+ vr.max = ssa_name (i);
+ }
+
+ return vr;
+}
+
+/* Compare all the value ranges for names equivalent to VAR with VAL
+ using comparison code COMP. Return the same value returned by
+ compare_range_with_value, including the setting of
+ *STRICT_OVERFLOW_P. */
+
+tree
+vr_values::compare_name_with_value (enum tree_code comp, tree var, tree val,
+ bool *strict_overflow_p, bool use_equiv_p)
+{
+ bitmap_iterator bi;
+ unsigned i;
+ bitmap e;
+ tree retval, t;
+ int used_strict_overflow;
+ bool sop;
+ value_range equiv_vr;
+
+ /* Get the set of equivalences for VAR. */
+ e = get_value_range (var)->equiv;
+
+ /* Start at -1. Set it to 0 if we do a comparison without relying
+ on overflow, or 1 if all comparisons rely on overflow. */
+ used_strict_overflow = -1;
+
+ /* Compare vars' value range with val. */
+ equiv_vr = get_vr_for_comparison (SSA_NAME_VERSION (var));
+ sop = false;
+ retval = compare_range_with_value (comp, &equiv_vr, val, &sop);
+ if (retval)
+ used_strict_overflow = sop ? 1 : 0;
+
+ /* If the equiv set is empty we have done all work we need to do. */
+ if (e == NULL)
+ {
+ if (retval
+ && used_strict_overflow > 0)
+ *strict_overflow_p = true;
+ return retval;
+ }
+
+ EXECUTE_IF_SET_IN_BITMAP (e, 0, i, bi)
+ {
+ tree name = ssa_name (i);
+ if (! name)
+ continue;
+
+ if (! use_equiv_p
+ && ! SSA_NAME_IS_DEFAULT_DEF (name)
+ && prop_simulate_again_p (SSA_NAME_DEF_STMT (name)))
+ continue;
+
+ equiv_vr = get_vr_for_comparison (i);
+ sop = false;
+ t = compare_range_with_value (comp, &equiv_vr, val, &sop);
+ if (t)
+ {
+ /* If we get different answers from different members
+ of the equivalence set this check must be in a dead
+ code region. Folding it to a trap representation
+ would be correct here. For now just return don't-know. */
+ if (retval != NULL
+ && t != retval)
+ {
+ retval = NULL_TREE;
+ break;
+ }
+ retval = t;
+
+ if (!sop)
+ used_strict_overflow = 0;
+ else if (used_strict_overflow < 0)
+ used_strict_overflow = 1;
+ }
+ }
+
+ if (retval
+ && used_strict_overflow > 0)
+ *strict_overflow_p = true;
+
+ return retval;
+}
+
+
+/* Given a comparison code COMP and names N1 and N2, compare all the
+ ranges equivalent to N1 against all the ranges equivalent to N2
+ to determine the value of N1 COMP N2. Return the same value
+ returned by compare_ranges. Set *STRICT_OVERFLOW_P to indicate
+ whether we relied on undefined signed overflow in the comparison. */
+
+
+tree
+vr_values::compare_names (enum tree_code comp, tree n1, tree n2,
+ bool *strict_overflow_p)
+{
+ tree t, retval;
+ bitmap e1, e2;
+ bitmap_iterator bi1, bi2;
+ unsigned i1, i2;
+ int used_strict_overflow;
+ static bitmap_obstack *s_obstack = NULL;
+ static bitmap s_e1 = NULL, s_e2 = NULL;
+
+ /* Compare the ranges of every name equivalent to N1 against the
+ ranges of every name equivalent to N2. */
+ e1 = get_value_range (n1)->equiv;
+ e2 = get_value_range (n2)->equiv;
+
+ /* Use the fake bitmaps if e1 or e2 are not available. */
+ if (s_obstack == NULL)
+ {
+ s_obstack = XNEW (bitmap_obstack);
+ bitmap_obstack_initialize (s_obstack);
+ s_e1 = BITMAP_ALLOC (s_obstack);
+ s_e2 = BITMAP_ALLOC (s_obstack);
+ }
+ if (e1 == NULL)
+ e1 = s_e1;
+ if (e2 == NULL)
+ e2 = s_e2;
+
+ /* Add N1 and N2 to their own set of equivalences to avoid
+ duplicating the body of the loop just to check N1 and N2
+ ranges. */
+ bitmap_set_bit (e1, SSA_NAME_VERSION (n1));
+ bitmap_set_bit (e2, SSA_NAME_VERSION (n2));
+
+ /* If the equivalence sets have a common intersection, then the two
+ names can be compared without checking their ranges. */
+ if (bitmap_intersect_p (e1, e2))
+ {
+ bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
+ bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
+
+ return (comp == EQ_EXPR || comp == GE_EXPR || comp == LE_EXPR)
+ ? boolean_true_node
+ : boolean_false_node;
+ }
+
+ /* Start at -1. Set it to 0 if we do a comparison without relying
+ on overflow, or 1 if all comparisons rely on overflow. */
+ used_strict_overflow = -1;
+
+ /* Otherwise, compare all the equivalent ranges. First, add N1 and
+ N2 to their own set of equivalences to avoid duplicating the body
+ of the loop just to check N1 and N2 ranges. */
+ EXECUTE_IF_SET_IN_BITMAP (e1, 0, i1, bi1)
+ {
+ if (! ssa_name (i1))
+ continue;
+
+ value_range vr1 = get_vr_for_comparison (i1);
+
+ t = retval = NULL_TREE;
+ EXECUTE_IF_SET_IN_BITMAP (e2, 0, i2, bi2)
+ {
+ if (! ssa_name (i2))
+ continue;
+
+ bool sop = false;
+
+ value_range vr2 = get_vr_for_comparison (i2);
+
+ t = compare_ranges (comp, &vr1, &vr2, &sop);
+ if (t)
+ {
+ /* If we get different answers from different members
+ of the equivalence set this check must be in a dead
+ code region. Folding it to a trap representation
+ would be correct here. For now just return don't-know. */
+ if (retval != NULL
+ && t != retval)
+ {
+ bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
+ bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
+ return NULL_TREE;
+ }
+ retval = t;
+
+ if (!sop)
+ used_strict_overflow = 0;
+ else if (used_strict_overflow < 0)
+ used_strict_overflow = 1;
+ }
+ }
+
+ if (retval)
+ {
+ bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
+ bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
+ if (used_strict_overflow > 0)
+ *strict_overflow_p = true;
+ return retval;
+ }
+ }
+
+ /* None of the equivalent ranges are useful in computing this
+ comparison. */
+ bitmap_clear_bit (e1, SSA_NAME_VERSION (n1));
+ bitmap_clear_bit (e2, SSA_NAME_VERSION (n2));
+ return NULL_TREE;
+}
+
+/* Helper function for vrp_evaluate_conditional_warnv & other
+ optimizers. */
+
+tree
+vr_values::vrp_evaluate_conditional_warnv_with_ops_using_ranges
+ (enum tree_code code, tree op0, tree op1, bool * strict_overflow_p)
+{
+ value_range *vr0, *vr1;
+
+ vr0 = (TREE_CODE (op0) == SSA_NAME) ? get_value_range (op0) : NULL;
+ vr1 = (TREE_CODE (op1) == SSA_NAME) ? get_value_range (op1) : NULL;
+
+ tree res = NULL_TREE;
+ if (vr0 && vr1)
+ res = compare_ranges (code, vr0, vr1, strict_overflow_p);
+ if (!res && vr0)
+ res = compare_range_with_value (code, vr0, op1, strict_overflow_p);
+ if (!res && vr1)
+ res = (compare_range_with_value
+ (swap_tree_comparison (code), vr1, op0, strict_overflow_p));
+ return res;
+}
+
+/* Helper function for vrp_evaluate_conditional_warnv. */
+
+tree
+vr_values::vrp_evaluate_conditional_warnv_with_ops (enum tree_code code,
+ tree op0, tree op1,
+ bool use_equiv_p,
+ bool *strict_overflow_p,
+ bool *only_ranges)
+{
+ tree ret;
+ if (only_ranges)
+ *only_ranges = true;
+
+ /* We only deal with integral and pointer types. */
+ if (!INTEGRAL_TYPE_P (TREE_TYPE (op0))
+ && !POINTER_TYPE_P (TREE_TYPE (op0)))
+ return NULL_TREE;
+
+ /* If OP0 CODE OP1 is an overflow comparison, if it can be expressed
+ as a simple equality test, then prefer that over its current form
+ for evaluation.
+
+ An overflow test which collapses to an equality test can always be
+ expressed as a comparison of one argument against zero. Overflow
+ occurs when the chosen argument is zero and does not occur if the
+ chosen argument is not zero. */
+ tree x;
+ if (overflow_comparison_p (code, op0, op1, use_equiv_p, &x))
+ {
+ wide_int max = wi::max_value (TYPE_PRECISION (TREE_TYPE (op0)), UNSIGNED);
+ /* B = A - 1; if (A < B) -> B = A - 1; if (A == 0)
+ B = A - 1; if (A > B) -> B = A - 1; if (A != 0)
+ B = A + 1; if (B < A) -> B = A + 1; if (B == 0)
+ B = A + 1; if (B > A) -> B = A + 1; if (B != 0) */
+ if (integer_zerop (x))
+ {
+ op1 = x;
+ code = (code == LT_EXPR || code == LE_EXPR) ? EQ_EXPR : NE_EXPR;
+ }
+ /* B = A + 1; if (A > B) -> B = A + 1; if (B == 0)
+ B = A + 1; if (A < B) -> B = A + 1; if (B != 0)
+ B = A - 1; if (B > A) -> B = A - 1; if (A == 0)
+ B = A - 1; if (B < A) -> B = A - 1; if (A != 0) */
+ else if (wi::to_wide (x) == max - 1)
+ {
+ op0 = op1;
+ op1 = wide_int_to_tree (TREE_TYPE (op0), 0);
+ code = (code == GT_EXPR || code == GE_EXPR) ? EQ_EXPR : NE_EXPR;
+ }
+ }
+
+ if ((ret = vrp_evaluate_conditional_warnv_with_ops_using_ranges
+ (code, op0, op1, strict_overflow_p)))
+ return ret;
+ if (only_ranges)
+ *only_ranges = false;
+ /* Do not use compare_names during propagation, it's quadratic. */
+ if (TREE_CODE (op0) == SSA_NAME && TREE_CODE (op1) == SSA_NAME
+ && use_equiv_p)
+ return compare_names (code, op0, op1, strict_overflow_p);
+ else if (TREE_CODE (op0) == SSA_NAME)
+ return compare_name_with_value (code, op0, op1,
+ strict_overflow_p, use_equiv_p);
+ else if (TREE_CODE (op1) == SSA_NAME)
+ return compare_name_with_value (swap_tree_comparison (code), op1, op0,
+ strict_overflow_p, use_equiv_p);
+ return NULL_TREE;
+}
+
+/* Given (CODE OP0 OP1) within STMT, try to simplify it based on value range
+ information. Return NULL if the conditional can not be evaluated.
+ The ranges of all the names equivalent with the operands in COND
+ will be used when trying to compute the value. If the result is
+ based on undefined signed overflow, issue a warning if
+ appropriate. */
+
+tree
+vr_values::vrp_evaluate_conditional (tree_code code, tree op0,
+ tree op1, gimple *stmt)
+{
+ bool sop;
+ tree ret;
+ bool only_ranges;
+
+ /* Some passes and foldings leak constants with overflow flag set
+ into the IL. Avoid doing wrong things with these and bail out. */
+ if ((TREE_CODE (op0) == INTEGER_CST
+ && TREE_OVERFLOW (op0))
+ || (TREE_CODE (op1) == INTEGER_CST
+ && TREE_OVERFLOW (op1)))
+ return NULL_TREE;
+
+ sop = false;
+ ret = vrp_evaluate_conditional_warnv_with_ops (code, op0, op1, true, &sop,
+ &only_ranges);
+
+ if (ret && sop)
+ {
+ enum warn_strict_overflow_code wc;
+ const char* warnmsg;
+
+ if (is_gimple_min_invariant (ret))
+ {
+ wc = WARN_STRICT_OVERFLOW_CONDITIONAL;
+ warnmsg = G_("assuming signed overflow does not occur when "
+ "simplifying conditional to constant");
+ }
+ else
+ {
+ wc = WARN_STRICT_OVERFLOW_COMPARISON;
+ warnmsg = G_("assuming signed overflow does not occur when "
+ "simplifying conditional");
+ }
+
+ if (issue_strict_overflow_warning (wc))
+ {
+ location_t location;
+
+ if (!gimple_has_location (stmt))
+ location = input_location;
+ else
+ location = gimple_location (stmt);
+ warning_at (location, OPT_Wstrict_overflow, "%s", warnmsg);
+ }
+ }
+
+ if (warn_type_limits
+ && ret && only_ranges
+ && TREE_CODE_CLASS (code) == tcc_comparison
+ && TREE_CODE (op0) == SSA_NAME)
+ {
+ /* If the comparison is being folded and the operand on the LHS
+ is being compared against a constant value that is outside of
+ the natural range of OP0's type, then the predicate will
+ always fold regardless of the value of OP0. If -Wtype-limits
+ was specified, emit a warning. */
+ tree type = TREE_TYPE (op0);
+ value_range *vr0 = get_value_range (op0);
+
+ if (vr0->type == VR_RANGE
+ && INTEGRAL_TYPE_P (type)
+ && vrp_val_is_min (vr0->min)
+ && vrp_val_is_max (vr0->max)
+ && is_gimple_min_invariant (op1))
+ {
+ location_t location;
+
+ if (!gimple_has_location (stmt))
+ location = input_location;
+ else
+ location = gimple_location (stmt);
+
+ warning_at (location, OPT_Wtype_limits,
+ integer_zerop (ret)
+ ? G_("comparison always false "
+ "due to limited range of data type")
+ : G_("comparison always true "
+ "due to limited range of data type"));
+ }
+ }
+
+ return ret;
+}
+
+
+/* Visit conditional statement STMT. If we can determine which edge
+ will be taken out of STMT's basic block, record it in
+ *TAKEN_EDGE_P. Otherwise, set *TAKEN_EDGE_P to NULL. */
+
+void
+vr_values::vrp_visit_cond_stmt (gcond *stmt, edge *taken_edge_p)
+{
+ tree val;
+
+ *taken_edge_p = NULL;
+
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ tree use;
+ ssa_op_iter i;
+
+ fprintf (dump_file, "\nVisiting conditional with predicate: ");
+ print_gimple_stmt (dump_file, stmt, 0);
+ fprintf (dump_file, "\nWith known ranges\n");
+
+ FOR_EACH_SSA_TREE_OPERAND (use, stmt, i, SSA_OP_USE)
+ {
+ fprintf (dump_file, "\t");
+ print_generic_expr (dump_file, use);
+ fprintf (dump_file, ": ");
+ dump_value_range (dump_file, vr_value[SSA_NAME_VERSION (use)]);
+ }
+
+ fprintf (dump_file, "\n");
+ }
+
+ /* Compute the value of the predicate COND by checking the known
+ ranges of each of its operands.
+
+ Note that we cannot evaluate all the equivalent ranges here
+ because those ranges may not yet be final and with the current
+ propagation strategy, we cannot determine when the value ranges
+ of the names in the equivalence set have changed.
+
+ For instance, given the following code fragment
+
+ i_5 = PHI <8, i_13>
+ ...
+ i_14 = ASSERT_EXPR <i_5, i_5 != 0>
+ if (i_14 == 1)
+ ...
+
+ Assume that on the first visit to i_14, i_5 has the temporary
+ range [8, 8] because the second argument to the PHI function is
+ not yet executable. We derive the range ~[0, 0] for i_14 and the
+ equivalence set { i_5 }. So, when we visit 'if (i_14 == 1)' for
+ the first time, since i_14 is equivalent to the range [8, 8], we
+ determine that the predicate is always false.
+
+ On the next round of propagation, i_13 is determined to be
+ VARYING, which causes i_5 to drop down to VARYING. So, another
+ visit to i_14 is scheduled. In this second visit, we compute the
+ exact same range and equivalence set for i_14, namely ~[0, 0] and
+ { i_5 }. But we did not have the previous range for i_5
+ registered, so vrp_visit_assignment thinks that the range for
+ i_14 has not changed. Therefore, the predicate 'if (i_14 == 1)'
+ is not visited again, which stops propagation from visiting
+ statements in the THEN clause of that if().
+
+ To properly fix this we would need to keep the previous range
+ value for the names in the equivalence set. This way we would've
+ discovered that from one visit to the other i_5 changed from
+ range [8, 8] to VR_VARYING.
+
+ However, fixing this apparent limitation may not be worth the
+ additional checking. Testing on several code bases (GCC, DLV,
+ MICO, TRAMP3D and SPEC2000) showed that doing this results in
+ 4 more predicates folded in SPEC. */
+
+ bool sop;
+ val = vrp_evaluate_conditional_warnv_with_ops (gimple_cond_code (stmt),
+ gimple_cond_lhs (stmt),
+ gimple_cond_rhs (stmt),
+ false, &sop, NULL);
+ if (val)
+ *taken_edge_p = find_taken_edge (gimple_bb (stmt), val);
+
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "\nPredicate evaluates to: ");
+ if (val == NULL_TREE)
+ fprintf (dump_file, "DON'T KNOW\n");
+ else
+ print_generic_stmt (dump_file, val);
+ }
+}
+
+/* Searches the case label vector VEC for the ranges of CASE_LABELs that are
+ used in range VR. The indices are placed in MIN_IDX1, MAX_IDX, MIN_IDX2 and
+ MAX_IDX2. If the ranges of CASE_LABELs are empty then MAX_IDX1 < MIN_IDX1.
+ Returns true if the default label is not needed. */
+
+static bool
+find_case_label_ranges (gswitch *stmt, value_range *vr, size_t *min_idx1,
+ size_t *max_idx1, size_t *min_idx2,
+ size_t *max_idx2)
+{
+ size_t i, j, k, l;
+ unsigned int n = gimple_switch_num_labels (stmt);
+ bool take_default;
+ tree case_low, case_high;
+ tree min = vr->min, max = vr->max;
+
+ gcc_checking_assert (vr->type == VR_RANGE || vr->type == VR_ANTI_RANGE);
+
+ take_default = !find_case_label_range (stmt, min, max, &i, &j);
+
+ /* Set second range to emtpy. */
+ *min_idx2 = 1;
+ *max_idx2 = 0;
+
+ if (vr->type == VR_RANGE)
+ {
+ *min_idx1 = i;
+ *max_idx1 = j;
+ return !take_default;
+ }
+
+ /* Set first range to all case labels. */
+ *min_idx1 = 1;
+ *max_idx1 = n - 1;
+
+ if (i > j)
+ return false;
+
+ /* Make sure all the values of case labels [i , j] are contained in
+ range [MIN, MAX]. */
+ case_low = CASE_LOW (gimple_switch_label (stmt, i));
+ case_high = CASE_HIGH (gimple_switch_label (stmt, j));
+ if (tree_int_cst_compare (case_low, min) < 0)
+ i += 1;
+ if (case_high != NULL_TREE
+ && tree_int_cst_compare (max, case_high) < 0)
+ j -= 1;
+
+ if (i > j)
+ return false;
+
+ /* If the range spans case labels [i, j], the corresponding anti-range spans
+ the labels [1, i - 1] and [j + 1, n - 1]. */
+ k = j + 1;
+ l = n - 1;
+ if (k > l)
+ {
+ k = 1;
+ l = 0;
+ }
+
+ j = i - 1;
+ i = 1;
+ if (i > j)
+ {
+ i = k;
+ j = l;
+ k = 1;
+ l = 0;
+ }
+
+ *min_idx1 = i;
+ *max_idx1 = j;
+ *min_idx2 = k;
+ *max_idx2 = l;
+ return false;
+}
+
+/* Visit switch statement STMT. If we can determine which edge
+ will be taken out of STMT's basic block, record it in
+ *TAKEN_EDGE_P. Otherwise, *TAKEN_EDGE_P set to NULL. */
+
+void
+vr_values::vrp_visit_switch_stmt (gswitch *stmt, edge *taken_edge_p)
+{
+ tree op, val;
+ value_range *vr;
+ size_t i = 0, j = 0, k, l;
+ bool take_default;
+
+ *taken_edge_p = NULL;
+ op = gimple_switch_index (stmt);
+ if (TREE_CODE (op) != SSA_NAME)
+ return;
+
+ vr = get_value_range (op);
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "\nVisiting switch expression with operand ");
+ print_generic_expr (dump_file, op);
+ fprintf (dump_file, " with known range ");
+ dump_value_range (dump_file, vr);
+ fprintf (dump_file, "\n");
+ }
+
+ if ((vr->type != VR_RANGE
+ && vr->type != VR_ANTI_RANGE)
+ || symbolic_range_p (vr))
+ return;
+
+ /* Find the single edge that is taken from the switch expression. */
+ take_default = !find_case_label_ranges (stmt, vr, &i, &j, &k, &l);
+
+ /* Check if the range spans no CASE_LABEL. If so, we only reach the default
+ label */
+ if (j < i)
+ {
+ gcc_assert (take_default);
+ val = gimple_switch_default_label (stmt);
+ }
+ else
+ {
+ /* Check if labels with index i to j and maybe the default label
+ are all reaching the same label. */
+
+ val = gimple_switch_label (stmt, i);
+ if (take_default
+ && CASE_LABEL (gimple_switch_default_label (stmt))
+ != CASE_LABEL (val))
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, " not a single destination for this "
+ "range\n");
+ return;
+ }
+ for (++i; i <= j; ++i)
+ {
+ if (CASE_LABEL (gimple_switch_label (stmt, i)) != CASE_LABEL (val))
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, " not a single destination for this "
+ "range\n");
+ return;
+ }
+ }
+ for (; k <= l; ++k)
+ {
+ if (CASE_LABEL (gimple_switch_label (stmt, k)) != CASE_LABEL (val))
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, " not a single destination for this "
+ "range\n");
+ return;
+ }
+ }
+ }
+
+ *taken_edge_p = find_edge (gimple_bb (stmt),
+ label_to_block (CASE_LABEL (val)));
+
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, " will take edge to ");
+ print_generic_stmt (dump_file, CASE_LABEL (val));
+ }
+}
+
+
+/* Evaluate statement STMT. If the statement produces a useful range,
+ set VR and corepsponding OUTPUT_P.
+
+ If STMT is a conditional branch and we can determine its truth
+ value, the taken edge is recorded in *TAKEN_EDGE_P. */
+
+void
+vr_values::extract_range_from_stmt (gimple *stmt, edge *taken_edge_p,
+ tree *output_p, value_range *vr)
+{
+
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "\nVisiting statement:\n");
+ print_gimple_stmt (dump_file, stmt, 0, dump_flags);
+ }
+
+ if (!stmt_interesting_for_vrp (stmt))
+ gcc_assert (stmt_ends_bb_p (stmt));
+ else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
+ vrp_visit_assignment_or_call (stmt, output_p, vr);
+ else if (gimple_code (stmt) == GIMPLE_COND)
+ vrp_visit_cond_stmt (as_a <gcond *> (stmt), taken_edge_p);
+ else if (gimple_code (stmt) == GIMPLE_SWITCH)
+ vrp_visit_switch_stmt (as_a <gswitch *> (stmt), taken_edge_p);
+}
+
+/* Visit all arguments for PHI node PHI that flow through executable
+ edges. If a valid value range can be derived from all the incoming
+ value ranges, set a new range in VR_RESULT. */
+
+void
+vr_values::extract_range_from_phi_node (gphi *phi, value_range *vr_result)
+{
+ size_t i;
+ tree lhs = PHI_RESULT (phi);
+ value_range *lhs_vr = get_value_range (lhs);
+ bool first = true;
+ int edges, old_edges;
+ struct loop *l;
+
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "\nVisiting PHI node: ");
+ print_gimple_stmt (dump_file, phi, 0, dump_flags);
+ }
+
+ bool may_simulate_backedge_again = false;
+ edges = 0;
+ for (i = 0; i < gimple_phi_num_args (phi); i++)
+ {
+ edge e = gimple_phi_arg_edge (phi, i);
+
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file,
+ " Argument #%d (%d -> %d %sexecutable)\n",
+ (int) i, e->src->index, e->dest->index,
+ (e->flags & EDGE_EXECUTABLE) ? "" : "not ");
+ }
+
+ if (e->flags & EDGE_EXECUTABLE)
+ {
+ tree arg = PHI_ARG_DEF (phi, i);
+ value_range vr_arg;
+
+ ++edges;
+
+ if (TREE_CODE (arg) == SSA_NAME)
+ {
+ /* See if we are eventually going to change one of the args. */
+ gimple *def_stmt = SSA_NAME_DEF_STMT (arg);
+ if (! gimple_nop_p (def_stmt)
+ && prop_simulate_again_p (def_stmt)
+ && e->flags & EDGE_DFS_BACK)
+ may_simulate_backedge_again = true;
+
+ vr_arg = *(get_value_range (arg));
+ /* Do not allow equivalences or symbolic ranges to leak in from
+ backedges. That creates invalid equivalencies.
+ See PR53465 and PR54767. */
+ if (e->flags & EDGE_DFS_BACK)
+ {
+ if (vr_arg.type == VR_RANGE
+ || vr_arg.type == VR_ANTI_RANGE)
+ {
+ vr_arg.equiv = NULL;
+ if (symbolic_range_p (&vr_arg))
+ {
+ vr_arg.type = VR_VARYING;
+ vr_arg.min = NULL_TREE;
+ vr_arg.max = NULL_TREE;
+ }
+ }
+ }
+ else
+ {
+ /* If the non-backedge arguments range is VR_VARYING then
+ we can still try recording a simple equivalence. */
+ if (vr_arg.type == VR_VARYING)
+ {
+ vr_arg.type = VR_RANGE;
+ vr_arg.min = arg;
+ vr_arg.max = arg;
+ vr_arg.equiv = NULL;
+ }
+ }
+ }
+ else
+ {
+ if (TREE_OVERFLOW_P (arg))
+ arg = drop_tree_overflow (arg);
+
+ vr_arg.type = VR_RANGE;
+ vr_arg.min = arg;
+ vr_arg.max = arg;
+ vr_arg.equiv = NULL;
+ }
+
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "\t");
+ print_generic_expr (dump_file, arg, dump_flags);
+ fprintf (dump_file, ": ");
+ dump_value_range (dump_file, &vr_arg);
+ fprintf (dump_file, "\n");
+ }
+
+ if (first)
+ copy_value_range (vr_result, &vr_arg);
+ else
+ vrp_meet (vr_result, &vr_arg);
+ first = false;
+
+ if (vr_result->type == VR_VARYING)
+ break;
+ }
+ }
+
+ if (vr_result->type == VR_VARYING)
+ goto varying;
+ else if (vr_result->type == VR_UNDEFINED)
+ goto update_range;
+
+ old_edges = vr_phi_edge_counts[SSA_NAME_VERSION (lhs)];
+ vr_phi_edge_counts[SSA_NAME_VERSION (lhs)] = edges;
+
+ /* To prevent infinite iterations in the algorithm, derive ranges
+ when the new value is slightly bigger or smaller than the
+ previous one. We don't do this if we have seen a new executable
+ edge; this helps us avoid an infinity for conditionals
+ which are not in a loop. If the old value-range was VR_UNDEFINED
+ use the updated range and iterate one more time. If we will not
+ simulate this PHI again via the backedge allow us to iterate. */
+ if (edges > 0
+ && gimple_phi_num_args (phi) > 1
+ && edges == old_edges
+ && lhs_vr->type != VR_UNDEFINED
+ && may_simulate_backedge_again)
+ {
+ /* Compare old and new ranges, fall back to varying if the
+ values are not comparable. */
+ int cmp_min = compare_values (lhs_vr->min, vr_result->min);
+ if (cmp_min == -2)
+ goto varying;
+ int cmp_max = compare_values (lhs_vr->max, vr_result->max);
+ if (cmp_max == -2)
+ goto varying;
+
+ /* For non VR_RANGE or for pointers fall back to varying if
+ the range changed. */
+ if ((lhs_vr->type != VR_RANGE || vr_result->type != VR_RANGE
+ || POINTER_TYPE_P (TREE_TYPE (lhs)))
+ && (cmp_min != 0 || cmp_max != 0))
+ goto varying;
+
+ /* If the new minimum is larger than the previous one
+ retain the old value. If the new minimum value is smaller
+ than the previous one and not -INF go all the way to -INF + 1.
+ In the first case, to avoid infinite bouncing between different
+ minimums, and in the other case to avoid iterating millions of
+ times to reach -INF. Going to -INF + 1 also lets the following
+ iteration compute whether there will be any overflow, at the
+ expense of one additional iteration. */
+ if (cmp_min < 0)
+ vr_result->min = lhs_vr->min;
+ else if (cmp_min > 0
+ && !vrp_val_is_min (vr_result->min))
+ vr_result->min
+ = int_const_binop (PLUS_EXPR,
+ vrp_val_min (TREE_TYPE (vr_result->min)),
+ build_int_cst (TREE_TYPE (vr_result->min), 1));
+
+ /* Similarly for the maximum value. */
+ if (cmp_max > 0)
+ vr_result->max = lhs_vr->max;
+ else if (cmp_max < 0
+ && !vrp_val_is_max (vr_result->max))
+ vr_result->max
+ = int_const_binop (MINUS_EXPR,
+ vrp_val_max (TREE_TYPE (vr_result->min)),
+ build_int_cst (TREE_TYPE (vr_result->min), 1));
+
+ /* If we dropped either bound to +-INF then if this is a loop
+ PHI node SCEV may known more about its value-range. */
+ if (cmp_min > 0 || cmp_min < 0
+ || cmp_max < 0 || cmp_max > 0)
+ goto scev_check;
+
+ goto infinite_check;
+ }
+
+ goto update_range;
+
+varying:
+ set_value_range_to_varying (vr_result);
+
+scev_check:
+ /* If this is a loop PHI node SCEV may known more about its value-range.
+ scev_check can be reached from two paths, one is a fall through from above
+ "varying" label, the other is direct goto from code block which tries to
+ avoid infinite simulation. */
+ if ((l = loop_containing_stmt (phi))
+ && l->header == gimple_bb (phi))
+ adjust_range_with_scev (vr_result, l, phi, lhs);
+
+infinite_check:
+ /* If we will end up with a (-INF, +INF) range, set it to
+ VARYING. Same if the previous max value was invalid for
+ the type and we end up with vr_result.min > vr_result.max. */
+ if ((vr_result->type == VR_RANGE || vr_result->type == VR_ANTI_RANGE)
+ && !((vrp_val_is_max (vr_result->max) && vrp_val_is_min (vr_result->min))
+ || compare_values (vr_result->min, vr_result->max) > 0))
+ ;
+ else
+ set_value_range_to_varying (vr_result);
+
+ /* If the new range is different than the previous value, keep
+ iterating. */
+update_range:
+ return;
+}
+
+/* Simplify boolean operations if the source is known
+ to be already a boolean. */
+bool
+vr_values::simplify_truth_ops_using_ranges (gimple_stmt_iterator *gsi,
+ gimple *stmt)
+{
+ enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
+ tree lhs, op0, op1;
+ bool need_conversion;
+
+ /* We handle only !=/== case here. */
+ gcc_assert (rhs_code == EQ_EXPR || rhs_code == NE_EXPR);
+
+ op0 = gimple_assign_rhs1 (stmt);
+ if (!op_with_boolean_value_range_p (op0))
+ return false;
+
+ op1 = gimple_assign_rhs2 (stmt);
+ if (!op_with_boolean_value_range_p (op1))
+ return false;
+
+ /* Reduce number of cases to handle to NE_EXPR. As there is no
+ BIT_XNOR_EXPR we cannot replace A == B with a single statement. */
+ if (rhs_code == EQ_EXPR)
+ {
+ if (TREE_CODE (op1) == INTEGER_CST)
+ op1 = int_const_binop (BIT_XOR_EXPR, op1,
+ build_int_cst (TREE_TYPE (op1), 1));
+ else
+ return false;
+ }
+
+ lhs = gimple_assign_lhs (stmt);
+ need_conversion
+ = !useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (op0));
+
+ /* Make sure to not sign-extend a 1-bit 1 when converting the result. */
+ if (need_conversion
+ && !TYPE_UNSIGNED (TREE_TYPE (op0))
+ && TYPE_PRECISION (TREE_TYPE (op0)) == 1
+ && TYPE_PRECISION (TREE_TYPE (lhs)) > 1)
+ return false;
+
+ /* For A != 0 we can substitute A itself. */
+ if (integer_zerop (op1))
+ gimple_assign_set_rhs_with_ops (gsi,
+ need_conversion
+ ? NOP_EXPR : TREE_CODE (op0), op0);
+ /* For A != B we substitute A ^ B. Either with conversion. */
+ else if (need_conversion)
+ {
+ tree tem = make_ssa_name (TREE_TYPE (op0));
+ gassign *newop
+ = gimple_build_assign (tem, BIT_XOR_EXPR, op0, op1);
+ gsi_insert_before (gsi, newop, GSI_SAME_STMT);
+ if (INTEGRAL_TYPE_P (TREE_TYPE (tem))
+ && TYPE_PRECISION (TREE_TYPE (tem)) > 1)
+ set_range_info (tem, VR_RANGE,
+ wi::zero (TYPE_PRECISION (TREE_TYPE (tem))),
+ wi::one (TYPE_PRECISION (TREE_TYPE (tem))));
+ gimple_assign_set_rhs_with_ops (gsi, NOP_EXPR, tem);
+ }
+ /* Or without. */
+ else
+ gimple_assign_set_rhs_with_ops (gsi, BIT_XOR_EXPR, op0, op1);
+ update_stmt (gsi_stmt (*gsi));
+ fold_stmt (gsi, follow_single_use_edges);
+
+ return true;
+}
+
+/* Simplify a division or modulo operator to a right shift or bitwise and
+ if the first operand is unsigned or is greater than zero and the second
+ operand is an exact power of two. For TRUNC_MOD_EXPR op0 % op1 with
+ constant op1 (op1min = op1) or with op1 in [op1min, op1max] range,
+ optimize it into just op0 if op0's range is known to be a subset of
+ [-op1min + 1, op1min - 1] for signed and [0, op1min - 1] for unsigned
+ modulo. */
+
+bool
+vr_values::simplify_div_or_mod_using_ranges (gimple_stmt_iterator *gsi,
+ gimple *stmt)
+{
+ enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
+ tree val = NULL;
+ tree op0 = gimple_assign_rhs1 (stmt);
+ tree op1 = gimple_assign_rhs2 (stmt);
+ tree op0min = NULL_TREE, op0max = NULL_TREE;
+ tree op1min = op1;
+ value_range *vr = NULL;
+
+ if (TREE_CODE (op0) == INTEGER_CST)
+ {
+ op0min = op0;
+ op0max = op0;
+ }
+ else
+ {
+ vr = get_value_range (op0);
+ if (range_int_cst_p (vr))
+ {
+ op0min = vr->min;
+ op0max = vr->max;
+ }
+ }
+
+ if (rhs_code == TRUNC_MOD_EXPR
+ && TREE_CODE (op1) == SSA_NAME)
+ {
+ value_range *vr1 = get_value_range (op1);
+ if (range_int_cst_p (vr1))
+ op1min = vr1->min;
+ }
+ if (rhs_code == TRUNC_MOD_EXPR
+ && TREE_CODE (op1min) == INTEGER_CST
+ && tree_int_cst_sgn (op1min) == 1
+ && op0max
+ && tree_int_cst_lt (op0max, op1min))
+ {
+ if (TYPE_UNSIGNED (TREE_TYPE (op0))
+ || tree_int_cst_sgn (op0min) >= 0
+ || tree_int_cst_lt (fold_unary (NEGATE_EXPR, TREE_TYPE (op1min), op1min),
+ op0min))
+ {
+ /* If op0 already has the range op0 % op1 has,
+ then TRUNC_MOD_EXPR won't change anything. */
+ gimple_assign_set_rhs_from_tree (gsi, op0);
+ return true;
+ }
+ }
+
+ if (TREE_CODE (op0) != SSA_NAME)
+ return false;
+
+ if (!integer_pow2p (op1))
+ {
+ /* X % -Y can be only optimized into X % Y either if
+ X is not INT_MIN, or Y is not -1. Fold it now, as after
+ remove_range_assertions the range info might be not available
+ anymore. */
+ if (rhs_code == TRUNC_MOD_EXPR
+ && fold_stmt (gsi, follow_single_use_edges))
+ return true;
+ return false;
+ }
+
+ if (TYPE_UNSIGNED (TREE_TYPE (op0)))
+ val = integer_one_node;
+ else
+ {
+ bool sop = false;
+
+ val = compare_range_with_value (GE_EXPR, vr, integer_zero_node, &sop);
+
+ if (val
+ && sop
+ && integer_onep (val)
+ && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
+ {
+ location_t location;
+
+ if (!gimple_has_location (stmt))
+ location = input_location;
+ else
+ location = gimple_location (stmt);
+ warning_at (location, OPT_Wstrict_overflow,
+ "assuming signed overflow does not occur when "
+ "simplifying %</%> or %<%%%> to %<>>%> or %<&%>");
+ }
+ }
+
+ if (val && integer_onep (val))
+ {
+ tree t;
+
+ if (rhs_code == TRUNC_DIV_EXPR)
+ {
+ t = build_int_cst (integer_type_node, tree_log2 (op1));
+ gimple_assign_set_rhs_code (stmt, RSHIFT_EXPR);
+ gimple_assign_set_rhs1 (stmt, op0);
+ gimple_assign_set_rhs2 (stmt, t);
+ }
+ else
+ {
+ t = build_int_cst (TREE_TYPE (op1), 1);
+ t = int_const_binop (MINUS_EXPR, op1, t);
+ t = fold_convert (TREE_TYPE (op0), t);
+
+ gimple_assign_set_rhs_code (stmt, BIT_AND_EXPR);
+ gimple_assign_set_rhs1 (stmt, op0);
+ gimple_assign_set_rhs2 (stmt, t);
+ }
+
+ update_stmt (stmt);
+ fold_stmt (gsi, follow_single_use_edges);
+ return true;
+ }
+
+ return false;
+}
+
+/* Simplify a min or max if the ranges of the two operands are
+ disjoint. Return true if we do simplify. */
+
+bool
+vr_values::simplify_min_or_max_using_ranges (gimple_stmt_iterator *gsi,
+ gimple *stmt)
+{
+ tree op0 = gimple_assign_rhs1 (stmt);
+ tree op1 = gimple_assign_rhs2 (stmt);
+ bool sop = false;
+ tree val;
+
+ val = (vrp_evaluate_conditional_warnv_with_ops_using_ranges
+ (LE_EXPR, op0, op1, &sop));
+ if (!val)
+ {
+ sop = false;
+ val = (vrp_evaluate_conditional_warnv_with_ops_using_ranges
+ (LT_EXPR, op0, op1, &sop));
+ }
+
+ if (val)
+ {
+ if (sop && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
+ {
+ location_t location;
+
+ if (!gimple_has_location (stmt))
+ location = input_location;
+ else
+ location = gimple_location (stmt);
+ warning_at (location, OPT_Wstrict_overflow,
+ "assuming signed overflow does not occur when "
+ "simplifying %<min/max (X,Y)%> to %<X%> or %<Y%>");
+ }
+
+ /* VAL == TRUE -> OP0 < or <= op1
+ VAL == FALSE -> OP0 > or >= op1. */
+ tree res = ((gimple_assign_rhs_code (stmt) == MAX_EXPR)
+ == integer_zerop (val)) ? op0 : op1;
+ gimple_assign_set_rhs_from_tree (gsi, res);
+ return true;
+ }
+
+ return false;
+}
+
+/* If the operand to an ABS_EXPR is >= 0, then eliminate the
+ ABS_EXPR. If the operand is <= 0, then simplify the
+ ABS_EXPR into a NEGATE_EXPR. */
+
+bool
+vr_values::simplify_abs_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt)
+{
+ tree op = gimple_assign_rhs1 (stmt);
+ value_range *vr = get_value_range (op);
+
+ if (vr)
+ {
+ tree val = NULL;
+ bool sop = false;
+
+ val = compare_range_with_value (LE_EXPR, vr, integer_zero_node, &sop);
+ if (!val)
+ {
+ /* The range is neither <= 0 nor > 0. Now see if it is
+ either < 0 or >= 0. */
+ sop = false;
+ val = compare_range_with_value (LT_EXPR, vr, integer_zero_node,
+ &sop);
+ }
+
+ if (val)
+ {
+ if (sop && issue_strict_overflow_warning (WARN_STRICT_OVERFLOW_MISC))
+ {
+ location_t location;
+
+ if (!gimple_has_location (stmt))
+ location = input_location;
+ else
+ location = gimple_location (stmt);
+ warning_at (location, OPT_Wstrict_overflow,
+ "assuming signed overflow does not occur when "
+ "simplifying %<abs (X)%> to %<X%> or %<-X%>");
+ }
+
+ gimple_assign_set_rhs1 (stmt, op);
+ if (integer_zerop (val))
+ gimple_assign_set_rhs_code (stmt, SSA_NAME);
+ else
+ gimple_assign_set_rhs_code (stmt, NEGATE_EXPR);
+ update_stmt (stmt);
+ fold_stmt (gsi, follow_single_use_edges);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/* Optimize away redundant BIT_AND_EXPR and BIT_IOR_EXPR.
+ If all the bits that are being cleared by & are already
+ known to be zero from VR, or all the bits that are being
+ set by | are already known to be one from VR, the bit
+ operation is redundant. */
+
+bool
+vr_values::simplify_bit_ops_using_ranges (gimple_stmt_iterator *gsi,
+ gimple *stmt)
+{
+ tree op0 = gimple_assign_rhs1 (stmt);
+ tree op1 = gimple_assign_rhs2 (stmt);
+ tree op = NULL_TREE;
+ value_range vr0 = VR_INITIALIZER;
+ value_range vr1 = VR_INITIALIZER;
+ wide_int may_be_nonzero0, may_be_nonzero1;
+ wide_int must_be_nonzero0, must_be_nonzero1;
+ wide_int mask;
+
+ if (TREE_CODE (op0) == SSA_NAME)
+ vr0 = *(get_value_range (op0));
+ else if (is_gimple_min_invariant (op0))
+ set_value_range_to_value (&vr0, op0, NULL);
+ else
+ return false;
+
+ if (TREE_CODE (op1) == SSA_NAME)
+ vr1 = *(get_value_range (op1));
+ else if (is_gimple_min_invariant (op1))
+ set_value_range_to_value (&vr1, op1, NULL);
+ else
+ return false;
+
+ if (!zero_nonzero_bits_from_vr (TREE_TYPE (op0), &vr0, &may_be_nonzero0,
+ &must_be_nonzero0))
+ return false;
+ if (!zero_nonzero_bits_from_vr (TREE_TYPE (op1), &vr1, &may_be_nonzero1,
+ &must_be_nonzero1))
+ return false;
+
+ switch (gimple_assign_rhs_code (stmt))
+ {
+ case BIT_AND_EXPR:
+ mask = wi::bit_and_not (may_be_nonzero0, must_be_nonzero1);
+ if (mask == 0)
+ {
+ op = op0;
+ break;
+ }
+ mask = wi::bit_and_not (may_be_nonzero1, must_be_nonzero0);
+ if (mask == 0)
+ {
+ op = op1;
+ break;
+ }
+ break;
+ case BIT_IOR_EXPR:
+ mask = wi::bit_and_not (may_be_nonzero0, must_be_nonzero1);
+ if (mask == 0)
+ {
+ op = op1;
+ break;
+ }
+ mask = wi::bit_and_not (may_be_nonzero1, must_be_nonzero0);
+ if (mask == 0)
+ {
+ op = op0;
+ break;
+ }
+ break;
+ default:
+ gcc_unreachable ();
+ }
+
+ if (op == NULL_TREE)
+ return false;
+
+ gimple_assign_set_rhs_with_ops (gsi, TREE_CODE (op), op);
+ update_stmt (gsi_stmt (*gsi));
+ return true;
+}
+
+/* We are comparing trees OP0 and OP1 using COND_CODE. OP0 has
+ a known value range VR.
+
+ If there is one and only one value which will satisfy the
+ conditional, then return that value. Else return NULL.
+
+ If signed overflow must be undefined for the value to satisfy
+ the conditional, then set *STRICT_OVERFLOW_P to true. */
+
+static tree
+test_for_singularity (enum tree_code cond_code, tree op0,
+ tree op1, value_range *vr)
+{
+ tree min = NULL;
+ tree max = NULL;
+
+ /* Extract minimum/maximum values which satisfy the conditional as it was
+ written. */
+ if (cond_code == LE_EXPR || cond_code == LT_EXPR)
+ {
+ min = TYPE_MIN_VALUE (TREE_TYPE (op0));
+
+ max = op1;
+ if (cond_code == LT_EXPR)
+ {
+ tree one = build_int_cst (TREE_TYPE (op0), 1);
+ max = fold_build2 (MINUS_EXPR, TREE_TYPE (op0), max, one);
+ /* Signal to compare_values_warnv this expr doesn't overflow. */
+ if (EXPR_P (max))
+ TREE_NO_WARNING (max) = 1;
+ }
+ }
+ else if (cond_code == GE_EXPR || cond_code == GT_EXPR)
+ {
+ max = TYPE_MAX_VALUE (TREE_TYPE (op0));
+
+ min = op1;
+ if (cond_code == GT_EXPR)
+ {
+ tree one = build_int_cst (TREE_TYPE (op0), 1);
+ min = fold_build2 (PLUS_EXPR, TREE_TYPE (op0), min, one);
+ /* Signal to compare_values_warnv this expr doesn't overflow. */
+ if (EXPR_P (min))
+ TREE_NO_WARNING (min) = 1;
+ }
+ }
+
+ /* Now refine the minimum and maximum values using any
+ value range information we have for op0. */
+ if (min && max)
+ {
+ if (compare_values (vr->min, min) == 1)
+ min = vr->min;
+ if (compare_values (vr->max, max) == -1)
+ max = vr->max;
+
+ /* If the new min/max values have converged to a single value,
+ then there is only one value which can satisfy the condition,
+ return that value. */
+ if (operand_equal_p (min, max, 0) && is_gimple_min_invariant (min))
+ return min;
+ }
+ return NULL;
+}
+
+/* Return whether the value range *VR fits in an integer type specified
+ by PRECISION and UNSIGNED_P. */
+
+static bool
+range_fits_type_p (value_range *vr, unsigned dest_precision, signop dest_sgn)
+{
+ tree src_type;
+ unsigned src_precision;
+ widest_int tem;
+ signop src_sgn;
+
+ /* We can only handle integral and pointer types. */
+ src_type = TREE_TYPE (vr->min);
+ if (!INTEGRAL_TYPE_P (src_type)
+ && !POINTER_TYPE_P (src_type))
+ return false;
+
+ /* An extension is fine unless VR is SIGNED and dest_sgn is UNSIGNED,
+ and so is an identity transform. */
+ src_precision = TYPE_PRECISION (TREE_TYPE (vr->min));
+ src_sgn = TYPE_SIGN (src_type);
+ if ((src_precision < dest_precision
+ && !(dest_sgn == UNSIGNED && src_sgn == SIGNED))
+ || (src_precision == dest_precision && src_sgn == dest_sgn))
+ return true;
+
+ /* Now we can only handle ranges with constant bounds. */
+ if (vr->type != VR_RANGE
+ || TREE_CODE (vr->min) != INTEGER_CST
+ || TREE_CODE (vr->max) != INTEGER_CST)
+ return false;
+
+ /* For sign changes, the MSB of the wide_int has to be clear.
+ An unsigned value with its MSB set cannot be represented by
+ a signed wide_int, while a negative value cannot be represented
+ by an unsigned wide_int. */
+ if (src_sgn != dest_sgn
+ && (wi::lts_p (wi::to_wide (vr->min), 0)
+ || wi::lts_p (wi::to_wide (vr->max), 0)))
+ return false;
+
+ /* Then we can perform the conversion on both ends and compare
+ the result for equality. */
+ tem = wi::ext (wi::to_widest (vr->min), dest_precision, dest_sgn);
+ if (tem != wi::to_widest (vr->min))
+ return false;
+ tem = wi::ext (wi::to_widest (vr->max), dest_precision, dest_sgn);
+ if (tem != wi::to_widest (vr->max))
+ return false;
+
+ return true;
+}
+
+/* Simplify a conditional using a relational operator to an equality
+ test if the range information indicates only one value can satisfy
+ the original conditional. */
+
+bool
+vr_values::simplify_cond_using_ranges_1 (gcond *stmt)
+{
+ tree op0 = gimple_cond_lhs (stmt);
+ tree op1 = gimple_cond_rhs (stmt);
+ enum tree_code cond_code = gimple_cond_code (stmt);
+
+ if (cond_code != NE_EXPR
+ && cond_code != EQ_EXPR
+ && TREE_CODE (op0) == SSA_NAME
+ && INTEGRAL_TYPE_P (TREE_TYPE (op0))
+ && is_gimple_min_invariant (op1))
+ {
+ value_range *vr = get_value_range (op0);
+
+ /* If we have range information for OP0, then we might be
+ able to simplify this conditional. */
+ if (vr->type == VR_RANGE)
+ {
+ tree new_tree = test_for_singularity (cond_code, op0, op1, vr);
+ if (new_tree)
+ {
+ if (dump_file)
+ {
+ fprintf (dump_file, "Simplified relational ");
+ print_gimple_stmt (dump_file, stmt, 0);
+ fprintf (dump_file, " into ");
+ }
+
+ gimple_cond_set_code (stmt, EQ_EXPR);
+ gimple_cond_set_lhs (stmt, op0);
+ gimple_cond_set_rhs (stmt, new_tree);
+
+ update_stmt (stmt);
+
+ if (dump_file)
+ {
+ print_gimple_stmt (dump_file, stmt, 0);
+ fprintf (dump_file, "\n");
+ }
+
+ return true;
+ }
+
+ /* Try again after inverting the condition. We only deal
+ with integral types here, so no need to worry about
+ issues with inverting FP comparisons. */
+ new_tree = test_for_singularity
+ (invert_tree_comparison (cond_code, false),
+ op0, op1, vr);
+ if (new_tree)
+ {
+ if (dump_file)
+ {
+ fprintf (dump_file, "Simplified relational ");
+ print_gimple_stmt (dump_file, stmt, 0);
+ fprintf (dump_file, " into ");
+ }
+
+ gimple_cond_set_code (stmt, NE_EXPR);
+ gimple_cond_set_lhs (stmt, op0);
+ gimple_cond_set_rhs (stmt, new_tree);
+
+ update_stmt (stmt);
+
+ if (dump_file)
+ {
+ print_gimple_stmt (dump_file, stmt, 0);
+ fprintf (dump_file, "\n");
+ }
+
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+/* STMT is a conditional at the end of a basic block.
+
+ If the conditional is of the form SSA_NAME op constant and the SSA_NAME
+ was set via a type conversion, try to replace the SSA_NAME with the RHS
+ of the type conversion. Doing so makes the conversion dead which helps
+ subsequent passes. */
+
+void
+vr_values::simplify_cond_using_ranges_2 (gcond *stmt)
+{
+ tree op0 = gimple_cond_lhs (stmt);
+ tree op1 = gimple_cond_rhs (stmt);
+
+ /* If we have a comparison of an SSA_NAME (OP0) against a constant,
+ see if OP0 was set by a type conversion where the source of
+ the conversion is another SSA_NAME with a range that fits
+ into the range of OP0's type.
+
+ If so, the conversion is redundant as the earlier SSA_NAME can be
+ used for the comparison directly if we just massage the constant in the
+ comparison. */
+ if (TREE_CODE (op0) == SSA_NAME
+ && TREE_CODE (op1) == INTEGER_CST)
+ {
+ gimple *def_stmt = SSA_NAME_DEF_STMT (op0);
+ tree innerop;
+
+ if (!is_gimple_assign (def_stmt)
+ || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)))
+ return;
+
+ innerop = gimple_assign_rhs1 (def_stmt);
+
+ if (TREE_CODE (innerop) == SSA_NAME
+ && !POINTER_TYPE_P (TREE_TYPE (innerop))
+ && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (innerop)
+ && desired_pro_or_demotion_p (TREE_TYPE (innerop), TREE_TYPE (op0)))
+ {
+ value_range *vr = get_value_range (innerop);
+
+ if (range_int_cst_p (vr)
+ && range_fits_type_p (vr,
+ TYPE_PRECISION (TREE_TYPE (op0)),
+ TYPE_SIGN (TREE_TYPE (op0)))
+ && int_fits_type_p (op1, TREE_TYPE (innerop)))
+ {
+ tree newconst = fold_convert (TREE_TYPE (innerop), op1);
+ gimple_cond_set_lhs (stmt, innerop);
+ gimple_cond_set_rhs (stmt, newconst);
+ update_stmt (stmt);
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "Folded into: ");
+ print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
+ fprintf (dump_file, "\n");
+ }
+ }
+ }
+ }
+}
+
+/* Simplify a switch statement using the value range of the switch
+ argument. */
+
+bool
+vr_values::simplify_switch_using_ranges (gswitch *stmt)
+{
+ tree op = gimple_switch_index (stmt);
+ value_range *vr = NULL;
+ bool take_default;
+ edge e;
+ edge_iterator ei;
+ size_t i = 0, j = 0, n, n2;
+ tree vec2;
+ switch_update su;
+ size_t k = 1, l = 0;
+
+ if (TREE_CODE (op) == SSA_NAME)
+ {
+ vr = get_value_range (op);
+
+ /* We can only handle integer ranges. */
+ if ((vr->type != VR_RANGE
+ && vr->type != VR_ANTI_RANGE)
+ || symbolic_range_p (vr))
+ return false;
+
+ /* Find case label for min/max of the value range. */
+ take_default = !find_case_label_ranges (stmt, vr, &i, &j, &k, &l);
+ }
+ else if (TREE_CODE (op) == INTEGER_CST)
+ {
+ take_default = !find_case_label_index (stmt, 1, op, &i);
+ if (take_default)
+ {
+ i = 1;
+ j = 0;
+ }
+ else
+ {
+ j = i;
+ }
+ }
+ else
+ return false;
+
+ n = gimple_switch_num_labels (stmt);
+
+ /* We can truncate the case label ranges that partially overlap with OP's
+ value range. */
+ size_t min_idx = 1, max_idx = 0;
+ if (vr != NULL)
+ find_case_label_range (stmt, vr->min, vr->max, &min_idx, &max_idx);
+ if (min_idx <= max_idx)
+ {
+ tree min_label = gimple_switch_label (stmt, min_idx);
+ tree max_label = gimple_switch_label (stmt, max_idx);
+
+ /* Avoid changing the type of the case labels when truncating. */
+ tree case_label_type = TREE_TYPE (CASE_LOW (min_label));
+ tree vr_min = fold_convert (case_label_type, vr->min);
+ tree vr_max = fold_convert (case_label_type, vr->max);
+
+ if (vr->type == VR_RANGE)
+ {
+ /* If OP's value range is [2,8] and the low label range is
+ 0 ... 3, truncate the label's range to 2 .. 3. */
+ if (tree_int_cst_compare (CASE_LOW (min_label), vr_min) < 0
+ && CASE_HIGH (min_label) != NULL_TREE
+ && tree_int_cst_compare (CASE_HIGH (min_label), vr_min) >= 0)
+ CASE_LOW (min_label) = vr_min;
+
+ /* If OP's value range is [2,8] and the high label range is
+ 7 ... 10, truncate the label's range to 7 .. 8. */
+ if (tree_int_cst_compare (CASE_LOW (max_label), vr_max) <= 0
+ && CASE_HIGH (max_label) != NULL_TREE
+ && tree_int_cst_compare (CASE_HIGH (max_label), vr_max) > 0)
+ CASE_HIGH (max_label) = vr_max;
+ }
+ else if (vr->type == VR_ANTI_RANGE)
+ {
+ tree one_cst = build_one_cst (case_label_type);
+
+ if (min_label == max_label)
+ {
+ /* If OP's value range is ~[7,8] and the label's range is
+ 7 ... 10, truncate the label's range to 9 ... 10. */
+ if (tree_int_cst_compare (CASE_LOW (min_label), vr_min) == 0
+ && CASE_HIGH (min_label) != NULL_TREE
+ && tree_int_cst_compare (CASE_HIGH (min_label), vr_max) > 0)
+ CASE_LOW (min_label)
+ = int_const_binop (PLUS_EXPR, vr_max, one_cst);
+
+ /* If OP's value range is ~[7,8] and the label's range is
+ 5 ... 8, truncate the label's range to 5 ... 6. */
+ if (tree_int_cst_compare (CASE_LOW (min_label), vr_min) < 0
+ && CASE_HIGH (min_label) != NULL_TREE
+ && tree_int_cst_compare (CASE_HIGH (min_label), vr_max) == 0)
+ CASE_HIGH (min_label)
+ = int_const_binop (MINUS_EXPR, vr_min, one_cst);
+ }
+ else
+ {
+ /* If OP's value range is ~[2,8] and the low label range is
+ 0 ... 3, truncate the label's range to 0 ... 1. */
+ if (tree_int_cst_compare (CASE_LOW (min_label), vr_min) < 0
+ && CASE_HIGH (min_label) != NULL_TREE
+ && tree_int_cst_compare (CASE_HIGH (min_label), vr_min) >= 0)
+ CASE_HIGH (min_label)
+ = int_const_binop (MINUS_EXPR, vr_min, one_cst);
+
+ /* If OP's value range is ~[2,8] and the high label range is
+ 7 ... 10, truncate the label's range to 9 ... 10. */
+ if (tree_int_cst_compare (CASE_LOW (max_label), vr_max) <= 0
+ && CASE_HIGH (max_label) != NULL_TREE
+ && tree_int_cst_compare (CASE_HIGH (max_label), vr_max) > 0)
+ CASE_LOW (max_label)
+ = int_const_binop (PLUS_EXPR, vr_max, one_cst);
+ }
+ }
+
+ /* Canonicalize singleton case ranges. */
+ if (tree_int_cst_equal (CASE_LOW (min_label), CASE_HIGH (min_label)))
+ CASE_HIGH (min_label) = NULL_TREE;
+ if (tree_int_cst_equal (CASE_LOW (max_label), CASE_HIGH (max_label)))
+ CASE_HIGH (max_label) = NULL_TREE;
+ }
+
+ /* We can also eliminate case labels that lie completely outside OP's value
+ range. */
+
+ /* Bail out if this is just all edges taken. */
+ if (i == 1
+ && j == n - 1
+ && take_default)
+ return false;
+
+ /* Build a new vector of taken case labels. */
+ vec2 = make_tree_vec (j - i + 1 + l - k + 1 + (int)take_default);
+ n2 = 0;
+
+ /* Add the default edge, if necessary. */
+ if (take_default)
+ TREE_VEC_ELT (vec2, n2++) = gimple_switch_default_label (stmt);
+
+ for (; i <= j; ++i, ++n2)
+ TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, i);
+
+ for (; k <= l; ++k, ++n2)
+ TREE_VEC_ELT (vec2, n2) = gimple_switch_label (stmt, k);
+
+ /* Mark needed edges. */
+ for (i = 0; i < n2; ++i)
+ {
+ e = find_edge (gimple_bb (stmt),
+ label_to_block (CASE_LABEL (TREE_VEC_ELT (vec2, i))));
+ e->aux = (void *)-1;
+ }
+
+ /* Queue not needed edges for later removal. */
+ FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
+ {
+ if (e->aux == (void *)-1)
+ {
+ e->aux = NULL;
+ continue;
+ }
+
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "removing unreachable case label\n");
+ }
+ to_remove_edges.safe_push (e);
+ e->flags &= ~EDGE_EXECUTABLE;
+ }
+
+ /* And queue an update for the stmt. */
+ su.stmt = stmt;
+ su.vec = vec2;
+ to_update_switch_stmts.safe_push (su);
+ return false;
+}
+
+/* Simplify an integral conversion from an SSA name in STMT. */
+
+static bool
+simplify_conversion_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt)
+{
+ tree innerop, middleop, finaltype;
+ gimple *def_stmt;
+ signop inner_sgn, middle_sgn, final_sgn;
+ unsigned inner_prec, middle_prec, final_prec;
+ widest_int innermin, innermed, innermax, middlemin, middlemed, middlemax;
+
+ finaltype = TREE_TYPE (gimple_assign_lhs (stmt));
+ if (!INTEGRAL_TYPE_P (finaltype))
+ return false;
+ middleop = gimple_assign_rhs1 (stmt);
+ def_stmt = SSA_NAME_DEF_STMT (middleop);
+ if (!is_gimple_assign (def_stmt)
+ || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)))
+ return false;
+ innerop = gimple_assign_rhs1 (def_stmt);
+ if (TREE_CODE (innerop) != SSA_NAME
+ || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (innerop))
+ return false;
+
+ /* Get the value-range of the inner operand. Use get_range_info in
+ case innerop was created during substitute-and-fold. */
+ wide_int imin, imax;
+ if (!INTEGRAL_TYPE_P (TREE_TYPE (innerop))
+ || get_range_info (innerop, &imin, &imax) != VR_RANGE)
+ return false;
+ innermin = widest_int::from (imin, TYPE_SIGN (TREE_TYPE (innerop)));
+ innermax = widest_int::from (imax, TYPE_SIGN (TREE_TYPE (innerop)));
+
+ /* Simulate the conversion chain to check if the result is equal if
+ the middle conversion is removed. */
+ inner_prec = TYPE_PRECISION (TREE_TYPE (innerop));
+ middle_prec = TYPE_PRECISION (TREE_TYPE (middleop));
+ final_prec = TYPE_PRECISION (finaltype);
+
+ /* If the first conversion is not injective, the second must not
+ be widening. */
+ if (wi::gtu_p (innermax - innermin,
+ wi::mask <widest_int> (middle_prec, false))
+ && middle_prec < final_prec)
+ return false;
+ /* We also want a medium value so that we can track the effect that
+ narrowing conversions with sign change have. */
+ inner_sgn = TYPE_SIGN (TREE_TYPE (innerop));
+ if (inner_sgn == UNSIGNED)
+ innermed = wi::shifted_mask <widest_int> (1, inner_prec - 1, false);
+ else
+ innermed = 0;
+ if (wi::cmp (innermin, innermed, inner_sgn) >= 0
+ || wi::cmp (innermed, innermax, inner_sgn) >= 0)
+ innermed = innermin;
+
+ middle_sgn = TYPE_SIGN (TREE_TYPE (middleop));
+ middlemin = wi::ext (innermin, middle_prec, middle_sgn);
+ middlemed = wi::ext (innermed, middle_prec, middle_sgn);
+ middlemax = wi::ext (innermax, middle_prec, middle_sgn);
+
+ /* Require that the final conversion applied to both the original
+ and the intermediate range produces the same result. */
+ final_sgn = TYPE_SIGN (finaltype);
+ if (wi::ext (middlemin, final_prec, final_sgn)
+ != wi::ext (innermin, final_prec, final_sgn)
+ || wi::ext (middlemed, final_prec, final_sgn)
+ != wi::ext (innermed, final_prec, final_sgn)
+ || wi::ext (middlemax, final_prec, final_sgn)
+ != wi::ext (innermax, final_prec, final_sgn))
+ return false;
+
+ gimple_assign_set_rhs1 (stmt, innerop);
+ fold_stmt (gsi, follow_single_use_edges);
+ return true;
+}
+
+/* Simplify a conversion from integral SSA name to float in STMT. */
+
+bool
+vr_values::simplify_float_conversion_using_ranges (gimple_stmt_iterator *gsi,
+ gimple *stmt)
+{
+ tree rhs1 = gimple_assign_rhs1 (stmt);
+ value_range *vr = get_value_range (rhs1);
+ scalar_float_mode fltmode
+ = SCALAR_FLOAT_TYPE_MODE (TREE_TYPE (gimple_assign_lhs (stmt)));
+ scalar_int_mode mode;
+ tree tem;
+ gassign *conv;
+
+ /* We can only handle constant ranges. */
+ if (vr->type != VR_RANGE
+ || TREE_CODE (vr->min) != INTEGER_CST
+ || TREE_CODE (vr->max) != INTEGER_CST)
+ return false;
+
+ /* First check if we can use a signed type in place of an unsigned. */
+ scalar_int_mode rhs_mode = SCALAR_INT_TYPE_MODE (TREE_TYPE (rhs1));
+ if (TYPE_UNSIGNED (TREE_TYPE (rhs1))
+ && can_float_p (fltmode, rhs_mode, 0) != CODE_FOR_nothing
+ && range_fits_type_p (vr, TYPE_PRECISION (TREE_TYPE (rhs1)), SIGNED))
+ mode = rhs_mode;
+ /* If we can do the conversion in the current input mode do nothing. */
+ else if (can_float_p (fltmode, rhs_mode,
+ TYPE_UNSIGNED (TREE_TYPE (rhs1))) != CODE_FOR_nothing)
+ return false;
+ /* Otherwise search for a mode we can use, starting from the narrowest
+ integer mode available. */
+ else
+ {
+ mode = NARROWEST_INT_MODE;
+ for (;;)
+ {
+ /* If we cannot do a signed conversion to float from mode
+ or if the value-range does not fit in the signed type
+ try with a wider mode. */
+ if (can_float_p (fltmode, mode, 0) != CODE_FOR_nothing
+ && range_fits_type_p (vr, GET_MODE_PRECISION (mode), SIGNED))
+ break;
+
+ /* But do not widen the input. Instead leave that to the
+ optabs expansion code. */
+ if (!GET_MODE_WIDER_MODE (mode).exists (&mode)
+ || GET_MODE_PRECISION (mode) > TYPE_PRECISION (TREE_TYPE (rhs1)))
+ return false;
+ }
+ }
+
+ /* It works, insert a truncation or sign-change before the
+ float conversion. */
+ tem = make_ssa_name (build_nonstandard_integer_type
+ (GET_MODE_PRECISION (mode), 0));
+ conv = gimple_build_assign (tem, NOP_EXPR, rhs1);
+ gsi_insert_before (gsi, conv, GSI_SAME_STMT);
+ gimple_assign_set_rhs1 (stmt, tem);
+ fold_stmt (gsi, follow_single_use_edges);
+
+ return true;
+}
+
+/* Simplify an internal fn call using ranges if possible. */
+
+bool
+vr_values::simplify_internal_call_using_ranges (gimple_stmt_iterator *gsi,
+ gimple *stmt)
+{
+ enum tree_code subcode;
+ bool is_ubsan = false;
+ bool ovf = false;
+ switch (gimple_call_internal_fn (stmt))
+ {
+ case IFN_UBSAN_CHECK_ADD:
+ subcode = PLUS_EXPR;
+ is_ubsan = true;
+ break;
+ case IFN_UBSAN_CHECK_SUB:
+ subcode = MINUS_EXPR;
+ is_ubsan = true;
+ break;
+ case IFN_UBSAN_CHECK_MUL:
+ subcode = MULT_EXPR;
+ is_ubsan = true;
+ break;
+ case IFN_ADD_OVERFLOW:
+ subcode = PLUS_EXPR;
+ break;
+ case IFN_SUB_OVERFLOW:
+ subcode = MINUS_EXPR;
+ break;
+ case IFN_MUL_OVERFLOW:
+ subcode = MULT_EXPR;
+ break;
+ default:
+ return false;
+ }
+
+ tree op0 = gimple_call_arg (stmt, 0);
+ tree op1 = gimple_call_arg (stmt, 1);
+ tree type;
+ if (is_ubsan)
+ {
+ type = TREE_TYPE (op0);
+ if (VECTOR_TYPE_P (type))
+ return false;
+ }
+ else if (gimple_call_lhs (stmt) == NULL_TREE)
+ return false;
+ else
+ type = TREE_TYPE (TREE_TYPE (gimple_call_lhs (stmt)));
+ if (!check_for_binary_op_overflow (subcode, type, op0, op1, &ovf)
+ || (is_ubsan && ovf))
+ return false;
+
+ gimple *g;
+ location_t loc = gimple_location (stmt);
+ if (is_ubsan)
+ g = gimple_build_assign (gimple_call_lhs (stmt), subcode, op0, op1);
+ else
+ {
+ int prec = TYPE_PRECISION (type);
+ tree utype = type;
+ if (ovf
+ || !useless_type_conversion_p (type, TREE_TYPE (op0))
+ || !useless_type_conversion_p (type, TREE_TYPE (op1)))
+ utype = build_nonstandard_integer_type (prec, 1);
+ if (TREE_CODE (op0) == INTEGER_CST)
+ op0 = fold_convert (utype, op0);
+ else if (!useless_type_conversion_p (utype, TREE_TYPE (op0)))
+ {
+ g = gimple_build_assign (make_ssa_name (utype), NOP_EXPR, op0);
+ gimple_set_location (g, loc);
+ gsi_insert_before (gsi, g, GSI_SAME_STMT);
+ op0 = gimple_assign_lhs (g);
+ }
+ if (TREE_CODE (op1) == INTEGER_CST)
+ op1 = fold_convert (utype, op1);
+ else if (!useless_type_conversion_p (utype, TREE_TYPE (op1)))
+ {
+ g = gimple_build_assign (make_ssa_name (utype), NOP_EXPR, op1);
+ gimple_set_location (g, loc);
+ gsi_insert_before (gsi, g, GSI_SAME_STMT);
+ op1 = gimple_assign_lhs (g);
+ }
+ g = gimple_build_assign (make_ssa_name (utype), subcode, op0, op1);
+ gimple_set_location (g, loc);
+ gsi_insert_before (gsi, g, GSI_SAME_STMT);
+ if (utype != type)
+ {
+ g = gimple_build_assign (make_ssa_name (type), NOP_EXPR,
+ gimple_assign_lhs (g));
+ gimple_set_location (g, loc);
+ gsi_insert_before (gsi, g, GSI_SAME_STMT);
+ }
+ g = gimple_build_assign (gimple_call_lhs (stmt), COMPLEX_EXPR,
+ gimple_assign_lhs (g),
+ build_int_cst (type, ovf));
+ }
+ gimple_set_location (g, loc);
+ gsi_replace (gsi, g, false);
+ return true;
+}
+
+/* Return true if VAR is a two-valued variable. Set a and b with the
+ two-values when it is true. Return false otherwise. */
+
+bool
+vr_values::two_valued_val_range_p (tree var, tree *a, tree *b)
+{
+ value_range *vr = get_value_range (var);
+ if ((vr->type != VR_RANGE
+ && vr->type != VR_ANTI_RANGE)
+ || TREE_CODE (vr->min) != INTEGER_CST
+ || TREE_CODE (vr->max) != INTEGER_CST)
+ return false;
+
+ if (vr->type == VR_RANGE
+ && wi::to_wide (vr->max) - wi::to_wide (vr->min) == 1)
+ {
+ *a = vr->min;
+ *b = vr->max;
+ return true;
+ }
+
+ /* ~[TYPE_MIN + 1, TYPE_MAX - 1] */
+ if (vr->type == VR_ANTI_RANGE
+ && (wi::to_wide (vr->min)
+ - wi::to_wide (vrp_val_min (TREE_TYPE (var)))) == 1
+ && (wi::to_wide (vrp_val_max (TREE_TYPE (var)))
+ - wi::to_wide (vr->max)) == 1)
+ {
+ *a = vrp_val_min (TREE_TYPE (var));
+ *b = vrp_val_max (TREE_TYPE (var));
+ return true;
+ }
+
+ return false;
+}
+
+/* Simplify STMT using ranges if possible. */
+
+bool
+vr_values::simplify_stmt_using_ranges (gimple_stmt_iterator *gsi)
+{
+ gimple *stmt = gsi_stmt (*gsi);
+ if (is_gimple_assign (stmt))
+ {
+ enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
+ tree rhs1 = gimple_assign_rhs1 (stmt);
+ tree rhs2 = gimple_assign_rhs2 (stmt);
+ tree lhs = gimple_assign_lhs (stmt);
+ tree val1 = NULL_TREE, val2 = NULL_TREE;
+ use_operand_p use_p;
+ gimple *use_stmt;
+
+ /* Convert:
+ LHS = CST BINOP VAR
+ Where VAR is two-valued and LHS is used in GIMPLE_COND only
+ To:
+ LHS = VAR == VAL1 ? (CST BINOP VAL1) : (CST BINOP VAL2)
+
+ Also handles:
+ LHS = VAR BINOP CST
+ Where VAR is two-valued and LHS is used in GIMPLE_COND only
+ To:
+ LHS = VAR == VAL1 ? (VAL1 BINOP CST) : (VAL2 BINOP CST) */
+
+ if (TREE_CODE_CLASS (rhs_code) == tcc_binary
+ && INTEGRAL_TYPE_P (TREE_TYPE (lhs))
+ && ((TREE_CODE (rhs1) == INTEGER_CST
+ && TREE_CODE (rhs2) == SSA_NAME)
+ || (TREE_CODE (rhs2) == INTEGER_CST
+ && TREE_CODE (rhs1) == SSA_NAME))
+ && single_imm_use (lhs, &use_p, &use_stmt)
+ && gimple_code (use_stmt) == GIMPLE_COND)
+
+ {
+ tree new_rhs1 = NULL_TREE;
+ tree new_rhs2 = NULL_TREE;
+ tree cmp_var = NULL_TREE;
+
+ if (TREE_CODE (rhs2) == SSA_NAME
+ && two_valued_val_range_p (rhs2, &val1, &val2))
+ {
+ /* Optimize RHS1 OP [VAL1, VAL2]. */
+ new_rhs1 = int_const_binop (rhs_code, rhs1, val1);
+ new_rhs2 = int_const_binop (rhs_code, rhs1, val2);
+ cmp_var = rhs2;
+ }
+ else if (TREE_CODE (rhs1) == SSA_NAME
+ && two_valued_val_range_p (rhs1, &val1, &val2))
+ {
+ /* Optimize [VAL1, VAL2] OP RHS2. */
+ new_rhs1 = int_const_binop (rhs_code, val1, rhs2);
+ new_rhs2 = int_const_binop (rhs_code, val2, rhs2);
+ cmp_var = rhs1;
+ }
+
+ /* If we could not find two-vals or the optimzation is invalid as
+ in divide by zero, new_rhs1 / new_rhs will be NULL_TREE. */
+ if (new_rhs1 && new_rhs2)
+ {
+ tree cond = build2 (EQ_EXPR, boolean_type_node, cmp_var, val1);
+ gimple_assign_set_rhs_with_ops (gsi,
+ COND_EXPR, cond,
+ new_rhs1,
+ new_rhs2);
+ update_stmt (gsi_stmt (*gsi));
+ fold_stmt (gsi, follow_single_use_edges);
+ return true;
+ }
+ }
+
+ switch (rhs_code)
+ {
+ case EQ_EXPR:
+ case NE_EXPR:
+ /* Transform EQ_EXPR, NE_EXPR into BIT_XOR_EXPR or identity
+ if the RHS is zero or one, and the LHS are known to be boolean
+ values. */
+ if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
+ return simplify_truth_ops_using_ranges (gsi, stmt);
+ break;
+
+ /* Transform TRUNC_DIV_EXPR and TRUNC_MOD_EXPR into RSHIFT_EXPR
+ and BIT_AND_EXPR respectively if the first operand is greater
+ than zero and the second operand is an exact power of two.
+ Also optimize TRUNC_MOD_EXPR away if the second operand is
+ constant and the first operand already has the right value
+ range. */
+ case TRUNC_DIV_EXPR:
+ case TRUNC_MOD_EXPR:
+ if ((TREE_CODE (rhs1) == SSA_NAME
+ || TREE_CODE (rhs1) == INTEGER_CST)
+ && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
+ return simplify_div_or_mod_using_ranges (gsi, stmt);
+ break;
+
+ /* Transform ABS (X) into X or -X as appropriate. */
+ case ABS_EXPR:
+ if (TREE_CODE (rhs1) == SSA_NAME
+ && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
+ return simplify_abs_using_ranges (gsi, stmt);
+ break;
+
+ case BIT_AND_EXPR:
+ case BIT_IOR_EXPR:
+ /* Optimize away BIT_AND_EXPR and BIT_IOR_EXPR
+ if all the bits being cleared are already cleared or
+ all the bits being set are already set. */
+ if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
+ return simplify_bit_ops_using_ranges (gsi, stmt);
+ break;
+
+ CASE_CONVERT:
+ if (TREE_CODE (rhs1) == SSA_NAME
+ && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
+ return simplify_conversion_using_ranges (gsi, stmt);
+ break;
+
+ case FLOAT_EXPR:
+ if (TREE_CODE (rhs1) == SSA_NAME
+ && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
+ return simplify_float_conversion_using_ranges (gsi, stmt);
+ break;
+
+ case MIN_EXPR:
+ case MAX_EXPR:
+ return simplify_min_or_max_using_ranges (gsi, stmt);
+
+ default:
+ break;
+ }
+ }
+ else if (gimple_code (stmt) == GIMPLE_COND)
+ return simplify_cond_using_ranges_1 (as_a <gcond *> (stmt));
+ else if (gimple_code (stmt) == GIMPLE_SWITCH)
+ return simplify_switch_using_ranges (as_a <gswitch *> (stmt));
+ else if (is_gimple_call (stmt)
+ && gimple_call_internal_p (stmt))
+ return simplify_internal_call_using_ranges (gsi, stmt);
+
+ return false;
+}
+
+void
+vr_values::set_vr_value (tree var, value_range *vr)
+{
+ if (SSA_NAME_VERSION (var) >= num_vr_values)
+ return;
+ vr_value[SSA_NAME_VERSION (var)] = vr;
+}
+