* fold-const.c (int_const_binop_1): Abstract...
(wide_int_binop): ...wide int code here.
(poly_int_binop): ...poly int code here.
(tree_binop): ...tree code here.
* fold-const.h (wide_int_binop): New.
* tree-vrp.c (vrp_int_const_binop): Call wide_int_binop.
Remove useless PLUS/MINUS_EXPR case.
(zero_nonzero_bits_from_vr): Move wide int code...
(zero_nonzero_bits_from_bounds): ...here.
(extract_range_from_binary_expr_1): Move mask optimization code...
(range_easy_mask_min_max): ...here.
* tree-vrp.h (zero_nonzero_bits_from_bounds): New.
(range_easy_mask_min_max): New.
From-SVN: r262676
+2018-07-16 Aldy Hernandez <aldyh@redhat.com>
+
+ * fold-const.c (int_const_binop_1): Abstract...
+ (wide_int_binop): ...wide int code here.
+ (poly_int_binop): ...poly int code here.
+ Abstract the rest of int_const_binop_1 into int_const_binop.
+ * fold-const.h (wide_int_binop): New.
+ * tree-vrp.c (vrp_int_const_binop): Call wide_int_binop.
+ Remove useless PLUS/MINUS_EXPR case.
+ (zero_nonzero_bits_from_vr): Move wide int code...
+ (zero_nonzero_bits_from_bounds): ...here.
+ (extract_range_from_binary_expr_1): Move mask optimization code...
+ (range_easy_mask_min_max): ...here.
+ * tree-vrp.h (zero_nonzero_bits_from_bounds): New.
+ (range_easy_mask_min_max): New.
+
2018-07-15 Jeff Law <law@redhat.com>
PR target/85993
&& TYPE_MODE (type1) == TYPE_MODE (type2);
}
-/* Subroutine of int_const_binop_1 that handles two INTEGER_CSTs. */
+/* Combine two wide ints ARG1 and ARG2 under operation CODE to produce
+ a new constant in RES. Return FALSE if we don't know how to
+ evaluate CODE at compile-time. */
-static tree
-int_const_binop_2 (enum tree_code code, const_tree parg1, const_tree parg2,
- int overflowable)
+bool
+wide_int_binop (wide_int &res,
+ enum tree_code code, const wide_int &arg1, const wide_int &arg2,
+ signop sign, wi::overflow_type *overflow)
{
- wide_int res;
- tree t;
- tree type = TREE_TYPE (parg1);
- signop sign = TYPE_SIGN (type);
- wi::overflow_type overflow = wi::OVF_NONE;
-
- wi::tree_to_wide_ref arg1 = wi::to_wide (parg1);
- wide_int arg2 = wi::to_wide (parg2, TYPE_PRECISION (type));
-
+ wide_int tmp;
+ *overflow = wi::OVF_NONE;
switch (code)
{
case BIT_IOR_EXPR:
case LSHIFT_EXPR:
if (wi::neg_p (arg2))
{
- arg2 = -arg2;
+ tmp = -arg2;
if (code == RSHIFT_EXPR)
code = LSHIFT_EXPR;
else
code = RSHIFT_EXPR;
}
+ else
+ tmp = arg2;
if (code == RSHIFT_EXPR)
/* It's unclear from the C standard whether shifts can overflow.
The following code ignores overflow; perhaps a C standard
interpretation ruling is needed. */
- res = wi::rshift (arg1, arg2, sign);
+ res = wi::rshift (arg1, tmp, sign);
else
- res = wi::lshift (arg1, arg2);
+ res = wi::lshift (arg1, tmp);
break;
case RROTATE_EXPR:
case LROTATE_EXPR:
if (wi::neg_p (arg2))
{
- arg2 = -arg2;
+ tmp = -arg2;
if (code == RROTATE_EXPR)
code = LROTATE_EXPR;
else
code = RROTATE_EXPR;
}
+ else
+ tmp = arg2;
if (code == RROTATE_EXPR)
- res = wi::rrotate (arg1, arg2);
+ res = wi::rrotate (arg1, tmp);
else
- res = wi::lrotate (arg1, arg2);
+ res = wi::lrotate (arg1, tmp);
break;
case PLUS_EXPR:
- res = wi::add (arg1, arg2, sign, &overflow);
+ res = wi::add (arg1, arg2, sign, overflow);
break;
case MINUS_EXPR:
- res = wi::sub (arg1, arg2, sign, &overflow);
+ res = wi::sub (arg1, arg2, sign, overflow);
break;
case MULT_EXPR:
- res = wi::mul (arg1, arg2, sign, &overflow);
+ res = wi::mul (arg1, arg2, sign, overflow);
break;
case MULT_HIGHPART_EXPR:
case TRUNC_DIV_EXPR:
case EXACT_DIV_EXPR:
if (arg2 == 0)
- return NULL_TREE;
- res = wi::div_trunc (arg1, arg2, sign, &overflow);
+ return false;
+ res = wi::div_trunc (arg1, arg2, sign, overflow);
break;
case FLOOR_DIV_EXPR:
if (arg2 == 0)
- return NULL_TREE;
- res = wi::div_floor (arg1, arg2, sign, &overflow);
+ return false;
+ res = wi::div_floor (arg1, arg2, sign, overflow);
break;
case CEIL_DIV_EXPR:
if (arg2 == 0)
- return NULL_TREE;
- res = wi::div_ceil (arg1, arg2, sign, &overflow);
+ return false;
+ res = wi::div_ceil (arg1, arg2, sign, overflow);
break;
case ROUND_DIV_EXPR:
if (arg2 == 0)
- return NULL_TREE;
- res = wi::div_round (arg1, arg2, sign, &overflow);
+ return false;
+ res = wi::div_round (arg1, arg2, sign, overflow);
break;
case TRUNC_MOD_EXPR:
if (arg2 == 0)
- return NULL_TREE;
- res = wi::mod_trunc (arg1, arg2, sign, &overflow);
+ return false;
+ res = wi::mod_trunc (arg1, arg2, sign, overflow);
break;
case FLOOR_MOD_EXPR:
if (arg2 == 0)
- return NULL_TREE;
- res = wi::mod_floor (arg1, arg2, sign, &overflow);
+ return false;
+ res = wi::mod_floor (arg1, arg2, sign, overflow);
break;
case CEIL_MOD_EXPR:
if (arg2 == 0)
- return NULL_TREE;
- res = wi::mod_ceil (arg1, arg2, sign, &overflow);
+ return false;
+ res = wi::mod_ceil (arg1, arg2, sign, overflow);
break;
case ROUND_MOD_EXPR:
if (arg2 == 0)
- return NULL_TREE;
- res = wi::mod_round (arg1, arg2, sign, &overflow);
+ return false;
+ res = wi::mod_round (arg1, arg2, sign, overflow);
break;
case MIN_EXPR:
break;
default:
- return NULL_TREE;
+ return false;
}
-
- t = force_fit_type (type, res, overflowable,
- (((sign == SIGNED || overflowable == -1)
- && overflow)
- | TREE_OVERFLOW (parg1) | TREE_OVERFLOW (parg2)));
-
- return t;
+ return true;
}
-/* Combine two integer constants PARG1 and PARG2 under operation CODE
- to produce a new constant. Return NULL_TREE if we don't know how
+/* Combine two poly int's ARG1 and ARG2 under operation CODE to
+ produce a new constant in RES. Return FALSE if we don't know how
to evaluate CODE at compile-time. */
-static tree
-int_const_binop_1 (enum tree_code code, const_tree arg1, const_tree arg2,
- int overflowable)
+static bool
+poly_int_binop (poly_wide_int &res, enum tree_code code,
+ const_tree arg1, const_tree arg2,
+ signop sign, wi::overflow_type *overflow)
{
- if (TREE_CODE (arg1) == INTEGER_CST && TREE_CODE (arg2) == INTEGER_CST)
- return int_const_binop_2 (code, arg1, arg2, overflowable);
-
gcc_assert (NUM_POLY_INT_COEFFS != 1);
-
- if (poly_int_tree_p (arg1) && poly_int_tree_p (arg2))
+ gcc_assert (poly_int_tree_p (arg1) && poly_int_tree_p (arg2));
+ switch (code)
{
- poly_wide_int res;
- wi::overflow_type overflow;
- tree type = TREE_TYPE (arg1);
- signop sign = TYPE_SIGN (type);
- switch (code)
- {
- case PLUS_EXPR:
- res = wi::add (wi::to_poly_wide (arg1),
- wi::to_poly_wide (arg2), sign, &overflow);
- break;
+ case PLUS_EXPR:
+ res = wi::add (wi::to_poly_wide (arg1),
+ wi::to_poly_wide (arg2), sign, overflow);
+ break;
- case MINUS_EXPR:
- res = wi::sub (wi::to_poly_wide (arg1),
- wi::to_poly_wide (arg2), sign, &overflow);
- break;
+ case MINUS_EXPR:
+ res = wi::sub (wi::to_poly_wide (arg1),
+ wi::to_poly_wide (arg2), sign, overflow);
+ break;
- case MULT_EXPR:
- if (TREE_CODE (arg2) == INTEGER_CST)
- res = wi::mul (wi::to_poly_wide (arg1),
- wi::to_wide (arg2), sign, &overflow);
- else if (TREE_CODE (arg1) == INTEGER_CST)
- res = wi::mul (wi::to_poly_wide (arg2),
- wi::to_wide (arg1), sign, &overflow);
- else
- return NULL_TREE;
- break;
+ case MULT_EXPR:
+ if (TREE_CODE (arg2) == INTEGER_CST)
+ res = wi::mul (wi::to_poly_wide (arg1),
+ wi::to_wide (arg2), sign, overflow);
+ else if (TREE_CODE (arg1) == INTEGER_CST)
+ res = wi::mul (wi::to_poly_wide (arg2),
+ wi::to_wide (arg1), sign, overflow);
+ else
+ return NULL_TREE;
+ break;
- case LSHIFT_EXPR:
- if (TREE_CODE (arg2) == INTEGER_CST)
- res = wi::to_poly_wide (arg1) << wi::to_wide (arg2);
- else
- return NULL_TREE;
- break;
+ case LSHIFT_EXPR:
+ if (TREE_CODE (arg2) == INTEGER_CST)
+ res = wi::to_poly_wide (arg1) << wi::to_wide (arg2);
+ else
+ return false;
+ break;
- case BIT_IOR_EXPR:
- if (TREE_CODE (arg2) != INTEGER_CST
- || !can_ior_p (wi::to_poly_wide (arg1), wi::to_wide (arg2),
- &res))
- return NULL_TREE;
- break;
+ case BIT_IOR_EXPR:
+ if (TREE_CODE (arg2) != INTEGER_CST
+ || !can_ior_p (wi::to_poly_wide (arg1), wi::to_wide (arg2),
+ &res))
+ return false;
+ break;
- default:
- return NULL_TREE;
- }
- return force_fit_type (type, res, overflowable,
- (((sign == SIGNED || overflowable == -1)
- && overflow)
- | TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2)));
+ default:
+ return false;
}
-
- return NULL_TREE;
+ return true;
}
+/* Combine two integer constants ARG1 and ARG2 under operation CODE to
+ produce a new constant. Return NULL_TREE if we don't know how to
+ evaluate CODE at compile-time. */
+
tree
-int_const_binop (enum tree_code code, const_tree arg1, const_tree arg2)
+int_const_binop (enum tree_code code, const_tree arg1, const_tree arg2,
+ int overflowable)
{
- return int_const_binop_1 (code, arg1, arg2, 1);
+ bool success = false;
+ poly_wide_int poly_res;
+ tree type = TREE_TYPE (arg1);
+ signop sign = TYPE_SIGN (type);
+ wi::overflow_type overflow = wi::OVF_NONE;
+
+ if (TREE_CODE (arg1) == INTEGER_CST && TREE_CODE (arg2) == INTEGER_CST)
+ {
+ wide_int warg1 = wi::to_wide (arg1), res;
+ wide_int warg2 = wi::to_wide (arg2, TYPE_PRECISION (type));
+ success = wide_int_binop (res, code, warg1, warg2, sign, &overflow);
+ poly_res = res;
+ }
+ else if (poly_int_tree_p (arg1) && poly_int_tree_p (arg2))
+ success = poly_int_binop (poly_res, code, arg1, arg2, sign, &overflow);
+ if (success)
+ return force_fit_type (type, poly_res, overflowable,
+ (((sign == SIGNED || overflowable == -1)
+ && overflow)
+ | TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2)));
+ return NULL_TREE;
}
/* Return true if binary operation OP distributes over addition in operand
/* Handle general case of two integer constants. For sizetype
constant calculations we always want to know about overflow,
even in the unsigned case. */
- tree res = int_const_binop_1 (code, arg0, arg1, -1);
+ tree res = int_const_binop (code, arg0, arg1, -1);
if (res != NULL_TREE)
return res;
}
tree, enum tree_code, tree, tree,
tree, enum tree_code, tree, tree, tree *);
extern tree fold_read_from_constant_string (tree);
-extern tree int_const_binop (enum tree_code, const_tree, const_tree);
+extern bool wide_int_binop (wide_int &res, enum tree_code,
+ const wide_int &arg1, const wide_int &arg2,
+ signop, wi::overflow_type *);
+extern tree int_const_binop (enum tree_code, const_tree, const_tree, int = 1);
#define build_fold_addr_expr(T)\
build_fold_addr_expr_loc (UNKNOWN_LOCATION, (T))
extern tree build_fold_addr_expr_loc (location_t, tree);
return NULL_TREE;
}
-/* Wrapper around int_const_binop. Return true if we can compute the
- result; i.e. if the operation doesn't overflow or if the overflow is
- undefined. In the latter case (if the operation overflows and
- overflow is undefined), then adjust the result to be -INF or +INF
- depending on CODE, VAL1 and VAL2. Return the value in *RES.
+/* Wrapper around wide_int_binop that adjusts for overflow.
+
+ Return true if we can compute the result; i.e. if the operation
+ doesn't overflow or if the overflow is undefined. In the latter
+ case (if the operation overflows and overflow is undefined), then
+ adjust the result to be -INF or +INF depending on CODE, VAL1 and
+ VAL2. Return the value in *RES.
Return false for division by zero, for which the result is
indeterminate. */
{
wi::overflow_type overflow = wi::OVF_NONE;
signop sign = TYPE_SIGN (TREE_TYPE (val1));
+ wide_int w1 = wi::to_wide (val1);
+ wide_int w2 = wi::to_wide (val2);
switch (code)
{
case RSHIFT_EXPR:
case LSHIFT_EXPR:
- {
- wide_int wval2 = wi::to_wide (val2, TYPE_PRECISION (TREE_TYPE (val1)));
- if (wi::neg_p (wval2))
- {
- wval2 = -wval2;
- if (code == RSHIFT_EXPR)
- code = LSHIFT_EXPR;
- else
- code = RSHIFT_EXPR;
- }
-
- if (code == RSHIFT_EXPR)
- /* It's unclear from the C standard whether shifts can overflow.
- The following code ignores overflow; perhaps a C standard
- interpretation ruling is needed. */
- *res = wi::rshift (wi::to_wide (val1), wval2, sign);
- else
- *res = wi::lshift (wi::to_wide (val1), wval2);
- break;
- }
-
+ w2 = wi::to_wide (val2, TYPE_PRECISION (TREE_TYPE (val1)));
+ /* FALLTHRU */
case MULT_EXPR:
- *res = wi::mul (wi::to_wide (val1),
- wi::to_wide (val2), sign, &overflow);
- break;
-
case TRUNC_DIV_EXPR:
case EXACT_DIV_EXPR:
- if (val2 == 0)
- return false;
- else
- *res = wi::div_trunc (wi::to_wide (val1),
- wi::to_wide (val2), sign, &overflow);
- break;
-
case FLOOR_DIV_EXPR:
- if (val2 == 0)
- return false;
- *res = wi::div_floor (wi::to_wide (val1),
- wi::to_wide (val2), sign, &overflow);
- break;
-
case CEIL_DIV_EXPR:
- if (val2 == 0)
- return false;
- *res = wi::div_ceil (wi::to_wide (val1),
- wi::to_wide (val2), sign, &overflow);
- break;
-
case ROUND_DIV_EXPR:
- if (val2 == 0)
+ if (!wide_int_binop (*res, code, w1, w2, sign, &overflow))
return false;
- *res = wi::div_round (wi::to_wide (val1),
- wi::to_wide (val2), sign, &overflow);
break;
default:
gcc_unreachable ();
}
+ /* If the operation overflowed return -INF or +INF depending on the
+ operation and the combination of signs of the operands. */
if (overflow
&& TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1)))
{
- /* If the operation overflowed return -INF or +INF depending
- on the operation and the combination of signs of the operands. */
- int sgn1 = tree_int_cst_sgn (val1);
- int sgn2 = tree_int_cst_sgn (val2);
+ int sign1 = tree_int_cst_sgn (val1);
+ int sign2 = tree_int_cst_sgn (val2);
/* Notice that we only need to handle the restricted set of
operations handled by extract_range_from_binary_expr.
/* For multiplication, the sign of the overflow is given
by the comparison of the signs of the operands. */
- if ((code == MULT_EXPR && sgn1 == sgn2)
- /* For addition, the operands must be of the same sign
- to yield an overflow. Its sign is therefore that
- of one of the operands, for example the first. */
- || (code == PLUS_EXPR && sgn1 >= 0)
- /* For subtraction, operands must be of
- different signs to yield an overflow. Its sign is
- therefore that of the first operand or the opposite of
- that of the second operand. A first operand of 0 counts
- as positive here, for the corner case 0 - (-INF), which
- overflows, but must yield +INF. */
- || (code == MINUS_EXPR && sgn1 >= 0)
+ if ((code == MULT_EXPR && sign1 == sign2)
/* For division, the only case is -INF / -1 = +INF. */
|| code == TRUNC_DIV_EXPR
|| code == FLOOR_DIV_EXPR
|| code == CEIL_DIV_EXPR
|| code == EXACT_DIV_EXPR
|| code == ROUND_DIV_EXPR)
- *res = wi::max_value (TYPE_PRECISION (TREE_TYPE (val1)),
- TYPE_SIGN (TREE_TYPE (val1)));
+ *res = wi::max_value (TYPE_PRECISION (TREE_TYPE (val1)), sign);
else
- *res = wi::min_value (TYPE_PRECISION (TREE_TYPE (val1)),
- TYPE_SIGN (TREE_TYPE (val1)));
+ *res = wi::min_value (TYPE_PRECISION (TREE_TYPE (val1)), sign);
return true;
}
return !overflow;
}
-
-/* For range VR compute two wide_int bitmasks. In *MAY_BE_NONZERO
- bitmask if some bit is unset, it means for all numbers in the range
+/* For range [LB, UB] compute two wide_int bitmasks. In *MAY_BE_NONZERO
+ bitmask, if some bit is unset, it means for all numbers in the range
the bit is 0, otherwise it might be 0 or 1. In *MUST_BE_NONZERO
- bitmask if some bit is set, it means for all numbers in the range
+ bitmask, if some bit is set, it means for all numbers in the range
the bit is 1, otherwise it might be 0 or 1. */
-bool
-zero_nonzero_bits_from_vr (const tree expr_type,
- value_range *vr,
- wide_int *may_be_nonzero,
- wide_int *must_be_nonzero)
+void
+zero_nonzero_bits_from_bounds (signop sign,
+ const wide_int &lb, const wide_int &ub,
+ wide_int *may_be_nonzero,
+ wide_int *must_be_nonzero)
{
- *may_be_nonzero = wi::minus_one (TYPE_PRECISION (expr_type));
- *must_be_nonzero = wi::zero (TYPE_PRECISION (expr_type));
- if (!range_int_cst_p (vr))
- return false;
+ *may_be_nonzero = wi::minus_one (lb.get_precision ());
+ *must_be_nonzero = wi::zero (lb.get_precision ());
- if (range_int_cst_singleton_p (vr))
+ if (wi::eq_p (lb, ub))
{
- *may_be_nonzero = wi::to_wide (vr->min);
+ *may_be_nonzero = lb;
*must_be_nonzero = *may_be_nonzero;
}
- else if (tree_int_cst_sgn (vr->min) >= 0
- || tree_int_cst_sgn (vr->max) < 0)
+ else if (wi::ge_p (lb, 0, sign) || wi::lt_p (ub, 0, sign))
{
- wide_int xor_mask = wi::to_wide (vr->min) ^ wi::to_wide (vr->max);
- *may_be_nonzero = wi::to_wide (vr->min) | wi::to_wide (vr->max);
- *must_be_nonzero = wi::to_wide (vr->min) & wi::to_wide (vr->max);
+ wide_int xor_mask = lb ^ ub;
+ *may_be_nonzero = lb | ub;
+ *must_be_nonzero = lb & ub;
if (xor_mask != 0)
{
wide_int mask = wi::mask (wi::floor_log2 (xor_mask), false,
*must_be_nonzero = wi::bit_and_not (*must_be_nonzero, mask);
}
}
+}
+/* Like zero_nonzero_bits_from_bounds, but use the range in value_range VR. */
+
+bool
+zero_nonzero_bits_from_vr (const tree expr_type,
+ value_range *vr,
+ wide_int *may_be_nonzero,
+ wide_int *must_be_nonzero)
+{
+ if (!range_int_cst_p (vr))
+ {
+ *may_be_nonzero = wi::minus_one (TYPE_PRECISION (expr_type));
+ *must_be_nonzero = wi::zero (TYPE_PRECISION (expr_type));
+ return false;
+ }
+
+ zero_nonzero_bits_from_bounds (TYPE_SIGN (expr_type),
+ wi::to_wide (vr->min), wi::to_wide (vr->max),
+ may_be_nonzero, must_be_nonzero);
return true;
}
wide_int_to_tree (type, max), NULL);
}
+/* For op & or | attempt to optimize:
+
+ [LB, UB] op Z
+ into:
+ [LB op Z, UB op Z]
+
+ if Z is a constant which (for op | its bitwise not) has n
+ consecutive least significant bits cleared followed by m 1
+ consecutive bits set immediately above it and either
+ m + n == precision, or (x >> (m + n)) == (y >> (m + n)).
+
+ The least significant n bits of all the values in the range are
+ cleared or set, the m bits above it are preserved and any bits
+ above these are required to be the same for all values in the
+ range.
+
+ Return TRUE if the min and max can simply be folded. */
+
+bool
+range_easy_mask_min_max (tree_code code,
+ const wide_int &lb, const wide_int &ub,
+ const wide_int &mask)
+
+{
+ wide_int w = mask;
+ int m = 0, n = 0;
+ if (code == BIT_IOR_EXPR)
+ w = ~w;
+ if (wi::eq_p (w, 0))
+ n = w.get_precision ();
+ else
+ {
+ n = wi::ctz (w);
+ w = ~(w | wi::mask (n, false, w.get_precision ()));
+ if (wi::eq_p (w, 0))
+ m = w.get_precision () - n;
+ else
+ m = wi::ctz (w) - n;
+ }
+ wide_int new_mask = wi::mask (m + n, true, w.get_precision ());
+ if ((new_mask & lb) == (new_mask & ub))
+ return true;
+
+ return false;
+}
+
/* If BOUND will include a symbolic bound, adjust it accordingly,
otherwise leave it as is.
vr1p = &vr0;
}
/* For op & or | attempt to optimize:
- [x, y] op z into [x op z, y op z]
- if z is a constant which (for op | its bitwise not) has n
- consecutive least significant bits cleared followed by m 1
- consecutive bits set immediately above it and either
- m + n == precision, or (x >> (m + n)) == (y >> (m + n)).
- The least significant n bits of all the values in the range are
- cleared or set, the m bits above it are preserved and any bits
- above these are required to be the same for all values in the
- range. */
- if (vr0p && range_int_cst_p (vr0p))
+ [x, y] op z into [x op z, y op z]. */
+ if (vr0p && range_int_cst_p (vr0p)
+ && range_easy_mask_min_max (code, wi::to_wide (vr0p->min),
+ wi::to_wide (vr0p->max),
+ wi::to_wide (vr1p->min)))
{
- wide_int w = wi::to_wide (vr1p->min);
- int m = 0, n = 0;
- if (code == BIT_IOR_EXPR)
- w = ~w;
- if (wi::eq_p (w, 0))
- n = TYPE_PRECISION (expr_type);
- else
- {
- n = wi::ctz (w);
- w = ~(w | wi::mask (n, false, w.get_precision ()));
- if (wi::eq_p (w, 0))
- m = TYPE_PRECISION (expr_type) - n;
- else
- m = wi::ctz (w) - n;
- }
- wide_int mask = wi::mask (m + n, true, w.get_precision ());
- if ((mask & wi::to_wide (vr0p->min))
- == (mask & wi::to_wide (vr0p->max)))
- {
- min = int_const_binop (code, vr0p->min, vr1p->min);
- max = int_const_binop (code, vr0p->max, vr1p->min);
- }
+ min = int_const_binop (code, vr0p->min, vr1p->min);
+ max = int_const_binop (code, vr0p->max, vr1p->min);
}
}
extern int operand_less_p (tree, tree);
extern bool find_case_label_range (gswitch *, tree, tree, size_t *, size_t *);
extern bool find_case_label_index (gswitch *, size_t, tree, size_t *);
+extern void zero_nonzero_bits_from_bounds (signop, const wide_int&,
+ const wide_int&, wide_int *,
+ wide_int *);
extern bool zero_nonzero_bits_from_vr (const tree, value_range *,
wide_int *, wide_int *);
+extern bool range_easy_mask_min_max (tree_code,
+ const wide_int &lb, const wide_int &ub,
+ const wide_int &mask);
extern bool overflow_comparison_p (tree_code, tree, tree, bool, tree *);
extern bool range_int_cst_singleton_p (value_range *);
extern int value_inside_range (tree, tree, tree);