--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-O2 -fno-ipa-icf -fdump-tree-vrp1" } */
+
+extern void arf (unsigned x, unsigned y);
+extern void baz (unsigned x, unsigned y);
+
+unsigned
+f1 (unsigned a, unsigned b)
+{
+ b = a + 1;
+ if (b < a)
+ {
+ arf (a, b);
+ return 42;
+ }
+ baz (a, b);
+ return b;
+}
+
+
+unsigned
+f1r (unsigned a, unsigned b)
+{
+ b = a + 1;
+ if (a < b)
+ {
+ baz (a, b);
+ return 42;
+ }
+ arf (a, b);
+ return b;
+}
+
+unsigned
+f1n (unsigned a, unsigned b)
+{
+ b = a + 1;
+ if (!(b < a))
+ {
+ baz (a, b);
+ return 42;
+ }
+ arf (a, b);
+ return b;
+}
+
+unsigned
+f1nr (unsigned a, unsigned b)
+{
+ b = a + 1;
+ if (!(a < b))
+ {
+ arf (a, b);
+ return 42;
+ }
+ baz (a, b);
+ return b;
+}
+
+
+unsigned
+f1o (unsigned a, unsigned b)
+{
+ b = a + 1;
+ if (a < b)
+ {
+ baz (a, b);
+ return 42;
+ }
+ arf (a, b);
+ return b;
+}
+
+unsigned
+f1ro (unsigned a, unsigned b)
+{
+ b = a + 1;
+ if (b < a)
+ {
+ arf (a, b);
+ return 42;
+ }
+ baz (a, b);
+ return b;
+}
+
+unsigned
+f1no (unsigned a, unsigned b)
+{
+ b = a + 1;
+ if (!(a < b))
+ {
+ arf (a, b);
+ return 42;
+ }
+ baz (a, b);
+ return b;
+}
+
+unsigned
+f1nro (unsigned a, unsigned b)
+{
+ b = a + 1;
+ if (!(b < a))
+ {
+ baz (a, b);
+ return 42;
+ }
+ arf (a, b);
+ return b;
+}
+
+
+unsigned
+f2 (unsigned a, unsigned b)
+{
+ b = a + 1;
+ if (b <= a)
+ {
+ arf (a, b);
+ return 42;
+ }
+ baz (a, b);
+ return b;
+}
+
+unsigned
+f2r (unsigned a, unsigned b)
+{
+ b = a + 1;
+ if (a <= b)
+ {
+ baz (a, b);
+ return 42;
+ }
+ arf (a, b);
+ return b;
+}
+
+unsigned
+f2n (unsigned a, unsigned b)
+{
+ b = a + 1;
+ if (!(b <= a))
+ {
+ baz (a, b);
+ return 42;
+ }
+ arf (a, b);
+ return b;
+}
+
+unsigned
+f2nr (unsigned a, unsigned b)
+{
+ b = a + 1;
+ if (!(a <= b))
+ {
+ arf (a, b);
+ return 42;
+ }
+ baz (a, b);
+ return b;
+}
+
+
+unsigned
+f2o (unsigned a, unsigned b)
+{
+ b = a + 1;
+ if (a <= b)
+ {
+ baz (a, b);
+ return 42;
+ }
+ arf (a, b);
+ return b;
+}
+
+unsigned
+f2ro (unsigned a, unsigned b)
+{
+ b = a + 1;
+ if (b <= a)
+ {
+ arf (a, b);
+ return 42;
+ }
+ baz (a, b);
+ return b;
+}
+
+unsigned
+f2no (unsigned a, unsigned b)
+{
+ b = a + 1;
+ if (!(a <= b))
+ {
+ arf (a, b);
+ return 42;
+ }
+ baz (a, b);
+ return b;
+}
+
+unsigned
+f2nro (unsigned a, unsigned b)
+{
+ b = a + 1;
+ if (!(b <= a))
+ {
+ baz (a, b);
+ return 42;
+ }
+ arf (a, b);
+ return b;
+}
+
+
+unsigned
+f3 (unsigned a, unsigned b)
+{
+ b = a - 1;
+ if (b < a)
+ {
+ baz (a, b);
+ return 42;
+ }
+ arf (a, b);
+ return b;
+}
+
+unsigned
+f3r (unsigned a, unsigned b)
+{
+ b = a - 1;
+ if (a < b)
+ {
+ arf (a, b);
+ return 42;
+ }
+ baz (a, b);
+ return b;
+}
+
+unsigned
+f3n (unsigned a, unsigned b)
+{
+ b = a - 1;
+ if (!(b < a))
+ {
+ arf (a, b);
+ return 42;
+ }
+ baz (a, b);
+ return b;
+}
+
+unsigned
+f3nr (unsigned a, unsigned b)
+{
+ b = a - 1;
+ if (!(a < b))
+ {
+ baz (a, b);
+ return 42;
+ }
+ arf (a, b);
+ return b;
+}
+
+
+unsigned
+f3o (unsigned a, unsigned b)
+{
+ b = a - 1;
+ if (a < b)
+ {
+ arf (a, b);
+ return 42;
+ }
+ baz (a, b);
+ return b;
+}
+
+unsigned
+f3ro (unsigned a, unsigned b)
+{
+ b = a - 1;
+ if (b < a)
+ {
+ baz (a, b);
+ return 42;
+ }
+ arf (a, b);
+ return b;
+}
+
+unsigned
+f3no (unsigned a, unsigned b)
+{
+ b = a - 1;
+ if (!(a < b))
+ {
+ baz (a, b);
+ return 42;
+ }
+ arf (a, b);
+ return b;
+}
+
+unsigned
+f3nro (unsigned a, unsigned b)
+{
+ b = a - 1;
+ if (!(b < a))
+ {
+ arf (a, b);
+ return 42;
+ }
+ baz (a, b);
+ return b;
+}
+
+
+unsigned
+f4 (unsigned a, unsigned b)
+{
+ b = a - 1;
+ if (b <= a)
+ {
+ baz (a, b);
+ return 42;
+ }
+ arf (a, b);
+ return b;
+}
+
+unsigned
+f4r (unsigned a, unsigned b)
+{
+ b = a - 1;
+ if (a <= b)
+ {
+ arf (a, b);
+ return 42;
+ }
+ baz (a, b);
+ return b;
+}
+
+unsigned
+f4n (unsigned a, unsigned b)
+{
+ b = a - 1;
+ if (!(b <= a))
+ {
+ arf (a, b);
+ return 42;
+ }
+ baz (a, b);
+ return b;
+}
+
+unsigned
+f4nr (unsigned a, unsigned b)
+{
+ b = a - 1;
+ if (!(a <= b))
+ {
+ baz (a, b);
+ return 42;
+ }
+ arf (a, b);
+ return b;
+}
+
+
+unsigned
+f4o (unsigned a, unsigned b)
+{
+ b = a - 1;
+ if (a <= b)
+ {
+ arf (a, b);
+ return 42;
+ }
+ baz (a, b);
+ return b;
+}
+
+unsigned
+f4ro (unsigned a, unsigned b)
+{
+ b = a - 1;
+ if (b <= a)
+ {
+ baz (a, b);
+ return 42;
+ }
+ arf (a, b);
+ return b;
+}
+
+unsigned
+f4no (unsigned a, unsigned b)
+{
+ b = a - 1;
+ if (!(a <= b))
+ {
+ baz (a, b);
+ return 42;
+ }
+ arf (a, b);
+ return b;
+}
+
+unsigned
+f4nro (unsigned a, unsigned b)
+{
+ b = a - 1;
+ if (!(b <= a))
+ {
+ arf (a, b);
+ return 42;
+ }
+ baz (a, b);
+ return b;
+}
+
+/* All calls to baz should still reference a & b as arguments. */
+/* { dg-final { scan-tree-dump-times "baz \\(a_\[0-9\]+\\(D\\), b_\[0-9\]+\\)" 32 "vrp1"} } */
+
+
+/* All calls to arf should have constant arguments. */
+/* { dg-final { scan-tree-dump-times "arf \\(\[0-9\]+, \[0-9\]+\\)" 32 "vrp1"} } */
else if (vr1.type == VR_UNDEFINED)
set_value_range_to_varying (&vr1);
+ /* We get imprecise results from ranges_from_anti_range when
+ code is EXACT_DIV_EXPR. We could mask out bits in the resulting
+ range, but then we also need to hack up vrp_meet. It's just
+ easier to special case when vr0 is ~[0,0] for EXACT_DIV_EXPR. */
+ if (code == EXACT_DIV_EXPR
+ && vr0.type == VR_ANTI_RANGE
+ && vr0.min == vr0.max
+ && integer_zerop (vr0.min))
+ {
+ set_value_range_to_nonnull (vr, expr_type);
+ return;
+ }
+
/* Now canonicalize anti-ranges to ranges when they are not symbolic
and express ~[] op X as ([]' op X) U ([]'' op X). */
if (vr0.type == VR_ANTI_RANGE
extract_range_from_binary_expr_1 (vr, code, expr_type, &n_vr0, &vr1);
}
+
+ /* If we didn't derive a range for MINUS_EXPR, and
+ op1's range is ~[op0,op0] or vice-versa, then we
+ can derive a non-null range. This happens often for
+ pointer subtraction. */
+ if (vr->type == VR_VARYING
+ && code == MINUS_EXPR
+ && TREE_CODE (op0) == SSA_NAME
+ && ((vr0.type == VR_ANTI_RANGE
+ && vr0.min == op1
+ && vr0.min == vr0.max)
+ || (vr1.type == VR_ANTI_RANGE
+ && vr1.min == op0
+ && vr1.min == vr1.max)))
+ set_value_range_to_nonnull (vr, TREE_TYPE (op0));
}
/* Extract range information from a unary operation CODE based on
return val ^ sgnbit;
}
+/* Helper for overflow_comparison_p
+
+ OP0 CODE OP1 is a comparison. Examine the comparison and potentially
+ OP1's defining statement to see if it ultimately has the form
+ OP0 CODE (OP0 PLUS INTEGER_CST)
+
+ If so, return TRUE indicating this is an overflow test and store into
+ *NEW_CST an updated constant that can be used in a narrowed range test.
+
+ REVERSED indicates if the comparison was originally:
+
+ OP1 CODE' OP0.
+
+ This affects how we build the updated constant. */
+
+static bool
+overflow_comparison_p_1 (enum tree_code code, tree op0, tree op1,
+ bool follow_assert_exprs, bool reversed, tree *new_cst)
+{
+ /* See if this is a relational operation between two SSA_NAMES with
+ unsigned, overflow wrapping values. If so, check it more deeply. */
+ if ((code == LT_EXPR || code == LE_EXPR
+ || code == GE_EXPR || code == GT_EXPR)
+ && TREE_CODE (op0) == SSA_NAME
+ && TREE_CODE (op1) == SSA_NAME
+ && INTEGRAL_TYPE_P (TREE_TYPE (op0))
+ && TYPE_UNSIGNED (TREE_TYPE (op0))
+ && TYPE_OVERFLOW_WRAPS (TREE_TYPE (op0)))
+ {
+ gimple *op1_def = SSA_NAME_DEF_STMT (op1);
+
+ /* If requested, follow any ASSERT_EXPRs backwards for OP1. */
+ if (follow_assert_exprs)
+ {
+ while (gimple_assign_single_p (op1_def)
+ && TREE_CODE (gimple_assign_rhs1 (op1_def)) == ASSERT_EXPR)
+ {
+ op1 = TREE_OPERAND (gimple_assign_rhs1 (op1_def), 0);
+ if (TREE_CODE (op1) != SSA_NAME)
+ break;
+ op1_def = SSA_NAME_DEF_STMT (op1);
+ }
+ }
+
+ /* Now look at the defining statement of OP1 to see if it adds
+ or subtracts a nonzero constant from another operand. */
+ if (op1_def
+ && is_gimple_assign (op1_def)
+ && gimple_assign_rhs_code (op1_def) == PLUS_EXPR
+ && TREE_CODE (gimple_assign_rhs2 (op1_def)) == INTEGER_CST
+ && !integer_zerop (gimple_assign_rhs2 (op1_def)))
+ {
+ tree target = gimple_assign_rhs1 (op1_def);
+
+ /* If requested, follow ASSERT_EXPRs backwards for op0 looking
+ for one where TARGET appears on the RHS. */
+ if (follow_assert_exprs)
+ {
+ /* Now see if that "other operand" is op0, following the chain
+ of ASSERT_EXPRs if necessary. */
+ gimple *op0_def = SSA_NAME_DEF_STMT (op0);
+ while (op0 != target
+ && gimple_assign_single_p (op0_def)
+ && TREE_CODE (gimple_assign_rhs1 (op0_def)) == ASSERT_EXPR)
+ {
+ op0 = TREE_OPERAND (gimple_assign_rhs1 (op0_def), 0);
+ if (TREE_CODE (op0) != SSA_NAME)
+ break;
+ op0_def = SSA_NAME_DEF_STMT (op0);
+ }
+ }
+
+ /* If we did not find our target SSA_NAME, then this is not
+ an overflow test. */
+ if (op0 != target)
+ return false;
+
+ tree type = TREE_TYPE (op0);
+ wide_int max = wi::max_value (TYPE_PRECISION (type), UNSIGNED);
+ tree inc = gimple_assign_rhs2 (op1_def);
+ if (reversed)
+ *new_cst = wide_int_to_tree (type, max + inc);
+ else
+ *new_cst = wide_int_to_tree (type, max - inc);
+ return true;
+ }
+ }
+ return false;
+}
+
+/* OP0 CODE OP1 is a comparison. Examine the comparison and potentially
+ OP1's defining statement to see if it ultimately has the form
+ OP0 CODE (OP0 PLUS INTEGER_CST)
+
+ If so, return TRUE indicating this is an overflow test and store into
+ *NEW_CST an updated constant that can be used in a narrowed range test.
+
+ These statements are left as-is in the IL to facilitate discovery of
+ {ADD,SUB}_OVERFLOW sequences later in the optimizer pipeline. But
+ the alternate range representation is often useful within VRP. */
+
+static bool
+overflow_comparison_p (tree_code code, tree name, tree val,
+ bool use_equiv_p, tree *new_cst)
+{
+ if (overflow_comparison_p_1 (code, name, val, use_equiv_p, false, new_cst))
+ return true;
+ return overflow_comparison_p_1 (swap_tree_comparison (code), val, name,
+ use_equiv_p, true, new_cst);
+}
+
+
/* Try to register an edge assertion for SSA name NAME on edge E for
the condition COND contributing to the conditional jump pointed to by BSI.
Invert the condition COND if INVERT is true. */
/* Only register an ASSERT_EXPR if NAME was found in the sub-graph
reachable from E. */
if (live_on_edge (e, name))
- register_new_assert_for (name, name, comp_code, val, NULL, e, bsi);
+ {
+ tree x;
+ if (overflow_comparison_p (comp_code, name, val, false, &x))
+ {
+ enum tree_code new_code
+ = ((comp_code == GT_EXPR || comp_code == GE_EXPR)
+ ? GT_EXPR : LE_EXPR);
+ register_new_assert_for (name, name, new_code, x, NULL, e, bsi);
+ }
+ register_new_assert_for (name, name, comp_code, val, NULL, e, bsi);
+ }
/* In the case of NAME <= CST and NAME being defined as
NAME = (unsigned) NAME2 + CST2 we can assert NAME2 >= -CST2
&& !POINTER_TYPE_P (TREE_TYPE (op0)))
return NULL_TREE;
+ /* If OP0 CODE OP1 is an overflow comparison, if it can be expressed
+ as a simple equality test, then prefer that over its current form
+ for evaluation.
+
+ An overflow test which collapses to an equality test can always be
+ expressed as a comparison of one argument against zero. Overflow
+ occurs when the chosen argument is zero and does not occur if the
+ chosen argument is not zero. */
+ tree x;
+ if (overflow_comparison_p (code, op0, op1, use_equiv_p, &x))
+ {
+ wide_int max = wi::max_value (TYPE_PRECISION (TREE_TYPE (op0)), UNSIGNED);
+ /* B = A - 1; if (A < B) -> B = A - 1; if (A == 0)
+ B = A - 1; if (A > B) -> B = A - 1; if (A != 0)
+ B = A + 1; if (B < A) -> B = A + 1; if (B == 0)
+ B = A + 1; if (B > A) -> B = A + 1; if (B != 0) */
+ if (integer_zerop (x))
+ {
+ op1 = x;
+ code = (code == LT_EXPR || code == LE_EXPR) ? EQ_EXPR : NE_EXPR;
+ }
+ /* B = A + 1; if (A > B) -> B = A + 1; if (B == 0)
+ B = A + 1; if (A < B) -> B = A + 1; if (B != 0)
+ B = A - 1; if (B > A) -> B = A - 1; if (A == 0)
+ B = A - 1; if (B < A) -> B = A - 1; if (A != 0) */
+ else if (wi::eq_p (x, max - 1))
+ {
+ op0 = op1;
+ op1 = wide_int_to_tree (TREE_TYPE (op0), 0);
+ code = (code == GT_EXPR || code == GE_EXPR) ? EQ_EXPR : NE_EXPR;
+ }
+ }
+
if ((ret = vrp_evaluate_conditional_warnv_with_ops_using_ranges
(code, op0, op1, strict_overflow_p)))
return ret;
else if (vrp_val_is_min (vr1min)
&& vrp_val_is_max (vr1max))
;
+ /* Choose the anti-range if it is ~[0,0], that range is special
+ enough to special case when vr1's range is relatively wide. */
+ else if (*vr0min == *vr0max
+ && integer_zerop (*vr0min)
+ && (TYPE_PRECISION (TREE_TYPE (*vr0min))
+ == TYPE_PRECISION (ptr_type_node))
+ && TREE_CODE (vr1max) == INTEGER_CST
+ && TREE_CODE (vr1min) == INTEGER_CST
+ && (wi::clz (wi::sub (vr1max, vr1min))
+ < TYPE_PRECISION (TREE_TYPE (*vr0min)) / 2))
+ ;
/* Else choose the range. */
else
{