+2018-07-07 Aldy Hernandez <aldyh@redhat.com>
+
+ * tree-vrp.c (vrp_int_const_binop): Change overflow type to
+ overflow_type.
+ (combine_bound): Use wide-int overflow calculation instead of
+ rolling our own.
+ * calls.c (maybe_warn_alloc_args_overflow): Change overflow type to
+ overflow_type.
+ * fold-const.c (int_const_binop_2): Same.
+ (extract_muldiv_1): Same.
+ (fold_div_compare): Same.
+ (fold_abs_const): Same.
+ * match.pd: Same.
+ * poly-int.h (add): Same.
+ (sub): Same.
+ (neg): Same.
+ (mul): Same.
+ * predict.c (predict_iv_comparison): Same.
+ * profile-count.c (slow_safe_scale_64bit): Same.
+ * simplify-rtx.c (simplify_const_binary_operation): Same.
+ * tree-chrec.c (tree_fold_binomial): Same.
+ * tree-data-ref.c (split_constant_offset_1): Same.
+ * tree-if-conv.c (idx_within_array_bound): Same.
+ * tree-scalar-evolution.c (iv_can_overflow_p): Same.
+ * tree-ssa-phiopt.c (minmax_replacement): Same.
+ * tree-vect-loop.c (is_nonwrapping_integer_induction): Same.
+ * tree-vect-stmts.c (vect_truncate_gather_scatter_offset): Same.
+ * vr-values.c (vr_values::adjust_range_with_scev): Same.
+ * wide-int.cc (wi::add_large): Same.
+ (wi::mul_internal): Same.
+ (wi::sub_large): Same.
+ (wi::divmod_internal): Same.
+ * wide-int.h: Change overflow type to overflow_type for neg, add,
+ mul, smul, umul, div_trunc, div_floor, div_ceil, div_round,
+ mod_trunc, mod_ceil, mod_round, add_large, sub_large,
+ mul_internal, divmod_internal.
+ (overflow_type): New enum.
+ (accumulate_overflow): New.
+
2018-07-06 Kugan Vivekanandarajah <kugan.vivekanandarajah@linaro.org>
* tree-ssa-phiopt.c (cond_removal_in_popcount_pattern): New.
wide_int x = wi::to_wide (argrange[0][0], szprec);
wide_int y = wi::to_wide (argrange[1][0], szprec);
- bool vflow;
+ wi::overflow_type vflow;
wide_int prod = wi::umul (x, y, &vflow);
if (vflow)
+2018-07-07 Aldy Hernandez <aldyh@redhat.com>
+
+ * decl.c (build_enumerator): Change overflow type to overflow_type.
+ * init.c (build_new_1): Same.
+
2018-07-05 Nathan Sidwell <nathan@acm.org>
* cp/decl.c (decls_match): Check SYSTEM_IMPLICIT_EXTERN_C not
if (TYPE_VALUES (enumtype))
{
tree prev_value;
- bool overflowed;
/* C++03 7.2/4: If no initializer is specified for the first
enumerator, the type is an unspecified integral
value = error_mark_node;
else
{
+ wi::overflow_type overflowed;
tree type = TREE_TYPE (prev_value);
signop sgn = TYPE_SIGN (type);
widest_int wi = wi::add (wi::to_widest (prev_value), 1, sgn,
incremented enumerator value is too large for %<long%>"));
}
if (type == NULL_TREE)
- overflowed = true;
+ overflowed = wi::OVF_UNKNOWN;
else
value = wide_int_to_tree (type, wi);
}
tree inner_nelts_cst = maybe_constant_value (inner_nelts);
if (TREE_CODE (inner_nelts_cst) == INTEGER_CST)
{
- bool overflow;
+ wi::overflow_type overflow;
offset_int result = wi::mul (wi::to_offset (inner_nelts_cst),
inner_nelts_count, SIGNED, &overflow);
if (overflow)
maximum object size and is safe even if we choose not to use
a cookie after all. */
max_size -= wi::to_offset (cookie_size);
- bool overflow;
+ wi::overflow_type overflow;
inner_size = wi::mul (wi::to_offset (size), inner_nelts_count, SIGNED,
&overflow);
if (overflow || wi::gtu_p (inner_size, max_size))
tree t;
tree type = TREE_TYPE (parg1);
signop sign = TYPE_SIGN (type);
- bool overflow = false;
+ wi::overflow_type overflow = wi::OVF_NONE;
wi::tree_to_wide_ref arg1 = wi::to_wide (parg1);
wide_int arg2 = wi::to_wide (parg2, TYPE_PRECISION (type));
if (poly_int_tree_p (arg1) && poly_int_tree_p (arg2))
{
poly_wide_int res;
- bool overflow;
+ wi::overflow_type overflow;
tree type = TREE_TYPE (arg1);
signop sign = TYPE_SIGN (type);
switch (code)
if (tcode == code)
{
bool overflow_p = false;
- bool overflow_mul_p;
+ wi::overflow_type overflow_mul;
signop sign = TYPE_SIGN (ctype);
unsigned prec = TYPE_PRECISION (ctype);
wide_int mul = wi::mul (wi::to_wide (op1, prec),
wi::to_wide (c, prec),
- sign, &overflow_mul_p);
+ sign, &overflow_mul);
overflow_p = TREE_OVERFLOW (c) | TREE_OVERFLOW (op1);
- if (overflow_mul_p
+ if (overflow_mul
&& ((sign == UNSIGNED && tcode != MULT_EXPR) || sign == SIGNED))
overflow_p = true;
if (!overflow_p)
{
tree prod, tmp, type = TREE_TYPE (c1);
signop sign = TYPE_SIGN (type);
- bool overflow;
+ wi::overflow_type overflow;
/* We have to do this the hard way to detect unsigned overflow.
prod = int_const_binop (MULT_EXPR, c1, c2); */
else
wi_offset = wi::to_poly_wide (offset);
- bool overflow;
+ wi::overflow_type overflow;
poly_wide_int units = wi::shwi (bits_to_bytes_round_down (bitpos),
precision);
poly_wide_int total = wi::add (wi_offset, units, UNSIGNED, &overflow);
default:
if (poly_int_tree_p (arg0))
{
- bool overflow;
+ wi::overflow_type overflow;
poly_wide_int res = wi::neg (wi::to_poly_wide (arg0), &overflow);
t = force_fit_type (type, res, 1,
(overflow && ! TYPE_UNSIGNED (type))
/* If the value is unsigned or non-negative, then the absolute value
is the same as the ordinary value. */
wide_int val = wi::to_wide (arg0);
- bool overflow = false;
+ wi::overflow_type overflow = wi::OVF_NONE;
if (!wi::neg_p (val, TYPE_SIGN (TREE_TYPE (arg0))))
;
(simplify
(div (div @0 INTEGER_CST@1) INTEGER_CST@2)
(with {
- bool overflow_p;
+ wi::overflow_type overflow;
wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2),
- TYPE_SIGN (type), &overflow_p);
+ TYPE_SIGN (type), &overflow);
}
- (if (!overflow_p)
+ (if (!overflow)
(div @0 { wide_int_to_tree (type, mul); })
(if (TYPE_UNSIGNED (type)
|| mul != wi::min_value (TYPE_PRECISION (type), SIGNED))
(simplify
(mult (mult @0 INTEGER_CST@1) INTEGER_CST@2)
(with {
- bool overflow_p;
+ wi::overflow_type overflow;
wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2),
- TYPE_SIGN (type), &overflow_p);
+ TYPE_SIGN (type), &overflow);
}
/* Skip folding on overflow: the only special case is @1 * @2 == -INT_MIN,
otherwise undefined overflow implies that @0 must be zero. */
- (if (!overflow_p || TYPE_OVERFLOW_WRAPS (type))
+ (if (!overflow || TYPE_OVERFLOW_WRAPS (type))
(mult @0 { wide_int_to_tree (type, mul); }))))
/* Optimize A / A to 1.0 if we don't care about
&& (cmp == LT_EXPR || cmp == GE_EXPR)))
(with
{
- bool overflow = false;
+ wi::overflow_type overflow = wi::OVF_NONE;
enum tree_code code, cmp_code = cmp;
wide_int real_c1;
wide_int c1 = wi::to_wide (@1);
(if (TREE_CODE (@1) == INTEGER_CST)
(with
{
- bool ovf;
+ wi::overflow_type ovf;
wide_int prod = wi::mul (wi::to_wide (@2), wi::to_wide (@1),
TYPE_SIGN (TREE_TYPE (@1)), &ovf);
}
(if (wi::gt_p (wi::to_wide (@1), 0, TYPE_SIGN (TREE_TYPE (@1))))
(with
{
- bool ovf;
+ wi::overflow_type ovf;
wide_int prod = wi::mul (wi::to_wide (@2), wi::to_wide (@1),
TYPE_SIGN (TREE_TYPE (@1)), &ovf);
}
template<unsigned int N, typename Ca, typename Cb>
inline poly_int<N, WI_BINARY_RESULT (Ca, Cb)>
add (const poly_int_pod<N, Ca> &a, const poly_int_pod<N, Cb> &b,
- signop sgn, bool *overflow)
+ signop sgn, wi::overflow_type *overflow)
{
typedef WI_BINARY_RESULT (Ca, Cb) C;
poly_int<N, C> r;
POLY_SET_COEFF (C, r, 0, wi::add (a.coeffs[0], b.coeffs[0], sgn, overflow));
for (unsigned int i = 1; i < N; i++)
{
- bool suboverflow;
+ wi::overflow_type suboverflow;
POLY_SET_COEFF (C, r, i, wi::add (a.coeffs[i], b.coeffs[i], sgn,
&suboverflow));
- *overflow |= suboverflow;
+ wi::accumulate_overflow (*overflow, suboverflow);
}
return r;
}
template<unsigned int N, typename Ca, typename Cb>
inline poly_int<N, WI_BINARY_RESULT (Ca, Cb)>
sub (const poly_int_pod<N, Ca> &a, const poly_int_pod<N, Cb> &b,
- signop sgn, bool *overflow)
+ signop sgn, wi::overflow_type *overflow)
{
typedef WI_BINARY_RESULT (Ca, Cb) C;
poly_int<N, C> r;
POLY_SET_COEFF (C, r, 0, wi::sub (a.coeffs[0], b.coeffs[0], sgn, overflow));
for (unsigned int i = 1; i < N; i++)
{
- bool suboverflow;
+ wi::overflow_type suboverflow;
POLY_SET_COEFF (C, r, i, wi::sub (a.coeffs[i], b.coeffs[i], sgn,
&suboverflow));
- *overflow |= suboverflow;
+ wi::accumulate_overflow (*overflow, suboverflow);
}
return r;
}
template<unsigned int N, typename Ca>
inline poly_int<N, WI_UNARY_RESULT (Ca)>
-neg (const poly_int_pod<N, Ca> &a, bool *overflow)
+neg (const poly_int_pod<N, Ca> &a, wi::overflow_type *overflow)
{
typedef WI_UNARY_RESULT (Ca) C;
poly_int<N, C> r;
POLY_SET_COEFF (C, r, 0, wi::neg (a.coeffs[0], overflow));
for (unsigned int i = 1; i < N; i++)
{
- bool suboverflow;
+ wi::overflow_type suboverflow;
POLY_SET_COEFF (C, r, i, wi::neg (a.coeffs[i], &suboverflow));
- *overflow |= suboverflow;
+ wi::accumulate_overflow (*overflow, suboverflow);
}
return r;
}
template<unsigned int N, typename Ca, typename Cb>
inline poly_int<N, WI_BINARY_RESULT (Ca, Cb)>
mul (const poly_int_pod<N, Ca> &a, const Cb &b,
- signop sgn, bool *overflow)
+ signop sgn, wi::overflow_type *overflow)
{
typedef WI_BINARY_RESULT (Ca, Cb) C;
poly_int<N, C> r;
POLY_SET_COEFF (C, r, 0, wi::mul (a.coeffs[0], b, sgn, overflow));
for (unsigned int i = 1; i < N; i++)
{
- bool suboverflow;
+ wi::overflow_type suboverflow;
POLY_SET_COEFF (C, r, i, wi::mul (a.coeffs[i], b, sgn, &suboverflow));
- *overflow |= suboverflow;
+ wi::accumulate_overflow (*overflow, suboverflow);
}
return r;
}
&& tree_fits_shwi_p (compare_base))
{
int probability;
- bool overflow, overall_overflow = false;
+ wi::overflow_type overflow;
+ bool overall_overflow = false;
widest_int compare_count, tem;
/* (loop_bound - base) / compare_step */
slow_safe_scale_64bit (uint64_t a, uint64_t b, uint64_t c, uint64_t *res)
{
FIXED_WIDE_INT (128) tmp = a;
- bool overflow;
+ wi::overflow_type overflow;
tmp = wi::udiv_floor (wi::umul (tmp, b, &overflow) + (c / 2), c);
gcc_checking_assert (!overflow);
if (wi::fits_uhwi_p (tmp))
&& CONST_SCALAR_INT_P (op1))
{
wide_int result;
- bool overflow;
+ wi::overflow_type overflow;
rtx_mode_t pop0 = rtx_mode_t (op0, int_mode);
rtx_mode_t pop1 = rtx_mode_t (op1, int_mode);
typedef poly_int<N, wide_int> T;
typedef poly_helper<T> ph;
- bool overflow;
+ wi::overflow_type overflow;
ASSERT_KNOWN_EQ (wi::add (ph::make (wi::uhwi (15, 4),
wi::uhwi (4, 4),
wi::uhwi (2, 4)),
ph::make (wi::uhwi (0, 4),
wi::uhwi (4, 4),
wi::uhwi (2, 4)));
- ASSERT_TRUE (overflow);
+ ASSERT_TRUE ((bool)overflow);
ASSERT_KNOWN_EQ (wi::add (ph::make (wi::uhwi (30, 5),
wi::uhwi (6, 5),
wi::uhwi (11, 5)),
ph::make (wi::uhwi (31, 5),
wi::uhwi (0, 5),
wi::uhwi (30, 5)));
- ASSERT_EQ (overflow, N >= 2);
+ ASSERT_EQ ((bool)overflow, N >= 2);
ASSERT_KNOWN_EQ (wi::add (ph::make (wi::uhwi (1, 6),
wi::uhwi (63, 6),
wi::uhwi (50, 6)),
ph::make (wi::uhwi (62, 6),
wi::uhwi (63, 6),
wi::uhwi (36, 6)));
- ASSERT_EQ (overflow, N == 3);
+ ASSERT_EQ ((bool)overflow, N == 3);
ASSERT_KNOWN_EQ (wi::add (ph::make (wi::shwi (7, 4),
wi::shwi (7, 4),
ph::make (wi::shwi (-8, 4),
wi::shwi (7, 4),
wi::shwi (-8, 4)));
- ASSERT_TRUE (overflow);
+ ASSERT_TRUE ((bool)overflow);
ASSERT_KNOWN_EQ (wi::add (ph::make (wi::shwi (-1, 5),
wi::shwi (6, 5),
wi::shwi (11, 5)),
ph::make (wi::shwi (14, 5),
wi::shwi (-15, 5),
wi::shwi (-4, 5)));
- ASSERT_EQ (overflow, N >= 2);
+ ASSERT_EQ ((bool)overflow, N >= 2);
ASSERT_KNOWN_EQ (wi::add (ph::make (wi::shwi (4, 6),
wi::shwi (0, 6),
wi::shwi (-1, 6)),
ph::make (wi::shwi (-28, 6),
wi::shwi (-32, 6),
wi::shwi (31, 6)));
- ASSERT_EQ (overflow, N == 3);
+ ASSERT_EQ ((bool)overflow, N == 3);
}
/* Test wi::sub for poly_int<N, wide_int>. */
typedef poly_int<N, wide_int> T;
typedef poly_helper<T> ph;
- bool overflow;
+ wi::overflow_type overflow;
ASSERT_KNOWN_EQ (wi::sub (ph::make (wi::uhwi (0, 4),
wi::uhwi (4, 4),
wi::uhwi (2, 4)),
ph::make (wi::uhwi (15, 4),
wi::uhwi (4, 4),
wi::uhwi (2, 4)));
- ASSERT_TRUE (overflow);
+ ASSERT_TRUE ((bool)overflow);
ASSERT_KNOWN_EQ (wi::sub (ph::make (wi::uhwi (30, 5),
wi::uhwi (29, 5),
wi::uhwi (11, 5)),
ph::make (wi::uhwi (29, 5),
wi::uhwi (30, 5),
wi::uhwi (2, 5)));
- ASSERT_EQ (overflow, N >= 2);
+ ASSERT_EQ ((bool)overflow, N >= 2);
ASSERT_KNOWN_EQ (wi::sub (ph::make (wi::uhwi (0, 6),
wi::uhwi (63, 6),
wi::uhwi (0, 6)),
ph::make (wi::uhwi (0, 6),
wi::uhwi (63, 6),
wi::uhwi (12, 6)));
- ASSERT_EQ (overflow, N == 3);
+ ASSERT_EQ ((bool)overflow, N == 3);
ASSERT_KNOWN_EQ (wi::sub (ph::make (wi::shwi (-8, 4),
wi::shwi (5, 4),
ph::make (wi::shwi (7, 4),
wi::shwi (5, 4),
wi::shwi (-7, 4)));
- ASSERT_TRUE (overflow);
+ ASSERT_TRUE ((bool)overflow);
ASSERT_KNOWN_EQ (wi::sub (ph::make (wi::shwi (-1, 5),
wi::shwi (-7, 5),
wi::shwi (0, 5)),
ph::make (wi::shwi (-16, 5),
wi::shwi (14, 5),
wi::shwi (15, 5)));
- ASSERT_EQ (overflow, N >= 2);
+ ASSERT_EQ ((bool)overflow, N >= 2);
ASSERT_KNOWN_EQ (wi::sub (ph::make (wi::shwi (-32, 6),
wi::shwi (-1, 6),
wi::shwi (0, 6)),
ph::make (wi::shwi (0, 6),
wi::shwi (31, 6),
wi::shwi (-32, 6)));
- ASSERT_EQ (overflow, N == 3);
+ ASSERT_EQ ((bool)overflow, N == 3);
}
/* Test wi::mul for poly_int<N, wide_int>. */
typedef poly_int<N, wide_int> T;
typedef poly_helper<T> ph;
- bool overflow;
+ wi::overflow_type overflow;
ASSERT_KNOWN_EQ (wi::mul (ph::make (wi::uhwi (4, 4),
wi::uhwi (3, 4),
wi::uhwi (2, 4)), 4,
ph::make (wi::uhwi (0, 4),
wi::uhwi (12, 4),
wi::uhwi (8, 4)));
- ASSERT_TRUE (overflow);
+ ASSERT_TRUE ((bool)overflow);
ASSERT_KNOWN_EQ (wi::mul (ph::make (wi::uhwi (15, 5),
wi::uhwi (31, 5),
wi::uhwi (7, 5)), 2,
ph::make (wi::uhwi (30, 5),
wi::uhwi (30, 5),
wi::uhwi (14, 5)));
- ASSERT_EQ (overflow, N >= 2);
+ ASSERT_EQ ((bool)overflow, N >= 2);
ASSERT_KNOWN_EQ (wi::mul (ph::make (wi::uhwi (1, 6),
wi::uhwi (0, 6),
wi::uhwi (2, 6)), 63,
ph::make (wi::uhwi (63, 6),
wi::uhwi (0, 6),
wi::uhwi (62, 6)));
- ASSERT_EQ (overflow, N == 3);
+ ASSERT_EQ ((bool)overflow, N == 3);
ASSERT_KNOWN_EQ (wi::mul (ph::make (wi::shwi (-1, 4),
wi::shwi (1, 4),
ph::make (wi::shwi (-8, 4),
wi::shwi (-8, 4),
wi::shwi (0, 4)));
- ASSERT_TRUE (overflow);
+ ASSERT_TRUE ((bool)overflow);
ASSERT_KNOWN_EQ (wi::mul (ph::make (wi::shwi (2, 5),
wi::shwi (-3, 5),
wi::shwi (1, 5)), 6,
ph::make (wi::shwi (12, 5),
wi::shwi (14, 5),
wi::shwi (6, 5)));
- ASSERT_EQ (overflow, N >= 2);
+ ASSERT_EQ ((bool)overflow, N >= 2);
ASSERT_KNOWN_EQ (wi::mul (ph::make (wi::shwi (5, 6),
wi::shwi (-6, 6),
wi::shwi (7, 6)), -5,
ph::make (wi::shwi (-25, 6),
wi::shwi (30, 6),
wi::shwi (29, 6)));
- ASSERT_EQ (overflow, N == 3);
+ ASSERT_EQ ((bool)overflow, N == 3);
}
/* Test wi::neg for poly_int<N, wide_int>. */
typedef poly_int<N, wide_int> T;
typedef poly_helper<T> ph;
- bool overflow;
+ wi::overflow_type overflow;
ASSERT_KNOWN_EQ (wi::neg (ph::make (wi::shwi (-8, 4),
wi::shwi (7, 4),
wi::shwi (-7, 4)), &overflow),
ph::make (wi::shwi (-8, 4),
wi::shwi (-7, 4),
wi::shwi (7, 4)));
- ASSERT_TRUE (overflow);
+ ASSERT_TRUE ((bool)overflow);
ASSERT_KNOWN_EQ (wi::neg (ph::make (wi::shwi (-15, 5),
wi::shwi (-16, 5),
wi::shwi (15, 5)), &overflow),
ph::make (wi::shwi (15, 5),
wi::shwi (-16, 5),
wi::shwi (-15, 5)));
- ASSERT_EQ (overflow, N >= 2);
+ ASSERT_EQ ((bool)overflow, N >= 2);
ASSERT_KNOWN_EQ (wi::neg (ph::make (wi::shwi (-28, 6),
wi::shwi (30, 6),
wi::shwi (-32, 6)), &overflow),
ph::make (wi::shwi (28, 6),
wi::shwi (-30, 6),
wi::shwi (-32, 6)));
- ASSERT_EQ (overflow, N == 3);
+ ASSERT_EQ ((bool)overflow, N == 3);
}
/* Test poly_int<N, C> for things that only make sense when C is an
static tree
tree_fold_binomial (tree type, tree n, unsigned int k)
{
- bool overflow;
+ wi::overflow_type overflow;
unsigned int i;
/* Handle the most frequent cases. */
is known to be [A + TMP_OFF, B + TMP_OFF], with all
operations done in ITYPE. The addition must overflow
at both ends of the range or at neither. */
- bool overflow[2];
+ wi::overflow_type overflow[2];
unsigned int prec = TYPE_PRECISION (itype);
wide_int woff = wi::to_wide (tmp_off, prec);
wide_int op0_min = wi::add (var_min, woff, sgn, &overflow[0]);
wi::add (var_max, woff, sgn, &overflow[1]);
- if (overflow[0] != overflow[1])
+ if ((overflow[0] != wi::OVF_NONE) != (overflow[1] != wi::OVF_NONE))
return false;
/* Calculate (ssizetype) OP0 - (ssizetype) TMP_VAR. */
static bool
idx_within_array_bound (tree ref, tree *idx, void *dta)
{
- bool overflow;
+ wi::overflow_type overflow;
widest_int niter, valid_niter, delta, wi_step;
tree ev, init, step;
tree low, high;
&& wi::le_p (base_max, type_max, sgn));
/* Account the possible increment in the last ieration. */
- bool overflow = false;
+ wi::overflow_type overflow = wi::OVF_NONE;
nit = wi::add (nit, 1, SIGNED, &overflow);
if (overflow)
return true;
the type. */
if (sgn == UNSIGNED || !wi::neg_p (step_max))
{
- bool overflow = false;
+ wi::overflow_type overflow = wi::OVF_NONE;
if (wi::gtu_p (wi::mul (step_max, nit2, UNSIGNED, &overflow),
type_max - base_max)
|| overflow)
/* If step can be negative, check that nit*(-step) <= base_min-type_min. */
if (sgn == SIGNED && wi::neg_p (step_min))
{
- bool overflow = false, overflow2 = false;
+ wi::overflow_type overflow, overflow2;
+ overflow = overflow2 = wi::OVF_NONE;
if (wi::gtu_p (wi::mul (wi::neg (step_min, &overflow2),
nit2, UNSIGNED, &overflow),
base_min - type_min)
enum tree_code code;
tree type, ev, base, e;
wide_int extreme;
- bool folded_casts, overflow;
+ bool folded_casts;
iv->base = NULL_TREE;
iv->step = NULL_TREE;
code = GT_EXPR;
extreme = wi::max_value (type);
}
- overflow = false;
+ wi::overflow_type overflow = wi::OVF_NONE;
extreme = wi::sub (extreme, wi::to_wide (iv->step),
TYPE_SIGN (type), &overflow);
if (overflow)
{
if (cmp == LT_EXPR)
{
- bool overflow;
+ wi::overflow_type overflow;
wide_int alt = wi::sub (wi::to_wide (larger), 1,
TYPE_SIGN (TREE_TYPE (larger)),
&overflow);
}
else
{
- bool overflow;
+ wi::overflow_type overflow;
wide_int alt = wi::add (wi::to_wide (larger), 1,
TYPE_SIGN (TREE_TYPE (larger)),
&overflow);
Likewise larger >= CST is equivalent to larger > CST-1. */
if (TREE_CODE (smaller) == INTEGER_CST)
{
+ wi::overflow_type overflow;
if (cmp == GT_EXPR)
{
- bool overflow;
wide_int alt = wi::add (wi::to_wide (smaller), 1,
TYPE_SIGN (TREE_TYPE (smaller)),
&overflow);
}
else
{
- bool overflow;
wide_int alt = wi::sub (wi::to_wide (smaller), 1,
TYPE_SIGN (TREE_TYPE (smaller)),
&overflow);
tree step = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo);
tree lhs_type = TREE_TYPE (gimple_phi_result (stmt));
widest_int ni, max_loop_value, lhs_max;
- bool overflow = false;
+ wi::overflow_type overflow = wi::OVF_NONE;
/* Make sure the loop is integer based. */
if (TREE_CODE (base) != INTEGER_CST
/* Try scales of 1 and the element size. */
int scales[] = { 1, vect_get_scalar_dr_size (dr) };
- bool overflow_p = false;
+ wi::overflow_type overflow = wi::OVF_NONE;
for (int i = 0; i < 2; ++i)
{
int scale = scales[i];
/* See whether we can calculate (COUNT - 1) * STEP / SCALE
in OFFSET_BITS bits. */
- widest_int range = wi::mul (count, factor, SIGNED, &overflow_p);
- if (overflow_p)
+ widest_int range = wi::mul (count, factor, SIGNED, &overflow);
+ if (overflow)
continue;
signop sign = range >= 0 ? UNSIGNED : SIGNED;
if (wi::min_precision (range, sign) > element_bits)
{
- overflow_p = true;
+ overflow = wi::OVF_UNKNOWN;
continue;
}
return true;
}
- if (overflow_p && dump_enabled_p ())
+ if (overflow && dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"truncating gather/scatter offset to %d bits"
" might change its value.\n", element_bits);
static bool
vrp_int_const_binop (enum tree_code code, tree val1, tree val2, wide_int *res)
{
- bool overflow = false;
+ wi::overflow_type overflow = wi::OVF_NONE;
signop sign = TYPE_SIGN (TREE_TYPE (val1));
switch (code)
if over/underflow occurred. */
static void
-combine_bound (enum tree_code code, wide_int &wi, int &ovf,
+combine_bound (enum tree_code code, wide_int &wi, wi::overflow_type &ovf,
tree type, tree op0, tree op1)
{
bool minus_p = (code == MINUS_EXPR);
if (op0 && op1)
{
if (minus_p)
- {
- wi = wi::to_wide (op0) - wi::to_wide (op1);
-
- /* Check for overflow. */
- if (wi::cmp (0, wi::to_wide (op1), sgn)
- != wi::cmp (wi, wi::to_wide (op0), sgn))
- ovf = wi::cmp (wi::to_wide (op0),
- wi::to_wide (op1), sgn);
- }
+ wi = wi::sub (wi::to_wide (op0), wi::to_wide (op1), sgn, &ovf);
else
- {
- wi = wi::to_wide (op0) + wi::to_wide (op1);
-
- /* Check for overflow. */
- if (wi::cmp (wi::to_wide (op1), 0, sgn)
- != wi::cmp (wi, wi::to_wide (op0), sgn))
- ovf = wi::cmp (wi::to_wide (op0), wi, sgn);
- }
+ wi = wi::add (wi::to_wide (op0), wi::to_wide (op1), sgn, &ovf);
}
else if (op0)
wi = wi::to_wide (op0);
else if (op1)
{
if (minus_p)
- {
- wi = -wi::to_wide (op1);
-
- /* Check for overflow. */
- if (sgn == SIGNED
- && wi::neg_p (wi::to_wide (op1))
- && wi::neg_p (wi))
- ovf = 1;
- else if (sgn == UNSIGNED && wi::to_wide (op1) != 0)
- ovf = -1;
- }
+ wi = wi::neg (wi::to_wide (op1), &ovf);
else
wi = wi::to_wide (op1);
}
set_value_range_with_overflow (value_range &vr,
tree type,
const wide_int &wmin, const wide_int &wmax,
- int min_ovf, int max_ovf)
+ wi::overflow_type min_ovf,
+ wi::overflow_type max_ovf)
{
const signop sgn = TYPE_SIGN (type);
const unsigned int prec = TYPE_PRECISION (type);
range kind and bounds appropriately. */
wide_int tmin = wide_int::from (wmin, prec, sgn);
wide_int tmax = wide_int::from (wmax, prec, sgn);
- if (min_ovf == max_ovf)
+ if ((min_ovf != wi::OVF_NONE) == (max_ovf != wi::OVF_NONE))
{
/* No overflow or both overflow or underflow. The
range kind stays VR_RANGE. */
vr.min = wide_int_to_tree (type, tmin);
vr.max = wide_int_to_tree (type, tmax);
}
- else if ((min_ovf == -1 && max_ovf == 0)
- || (max_ovf == 1 && min_ovf == 0))
+ else if ((min_ovf == wi::OVF_UNDERFLOW && max_ovf == wi::OVF_NONE)
+ || (max_ovf == wi::OVF_OVERFLOW && min_ovf == wi::OVF_NONE))
{
/* Min underflow or max overflow. The range kind
changes to VR_ANTI_RANGE. */
value. */
wide_int type_min = wi::min_value (prec, sgn);
wide_int type_max = wi::max_value (prec, sgn);
- if (min_ovf == -1)
+ if (min_ovf == wi::OVF_UNDERFLOW)
vr.min = wide_int_to_tree (type, type_min);
- else if (min_ovf == 1)
+ else if (min_ovf == wi::OVF_OVERFLOW)
vr.min = wide_int_to_tree (type, type_max);
else
vr.min = wide_int_to_tree (type, wmin);
- if (max_ovf == -1)
+ if (max_ovf == wi::OVF_UNDERFLOW)
vr.max = wide_int_to_tree (type, type_min);
- else if (max_ovf == 1)
+ else if (max_ovf == wi::OVF_OVERFLOW)
vr.max = wide_int_to_tree (type, type_max);
else
vr.max = wide_int_to_tree (type, wmax);
&& neg_max_op0 == (minus_p ? neg_max_op1 : !neg_max_op1))))
{
wide_int wmin, wmax;
- int min_ovf = 0;
- int max_ovf = 0;
+ wi::overflow_type min_ovf = wi::OVF_NONE;
+ wi::overflow_type max_ovf = wi::OVF_NONE;
/* Build the bounds. */
combine_bound (code, wmin, min_ovf, expr_type, min_op0, min_op1);
/* If we have overflow for the constant part and the resulting
range will be symbolic, drop to VR_VARYING. */
- if ((min_ovf && sym_min_op0 != sym_min_op1)
- || (max_ovf && sym_max_op0 != sym_max_op1))
+ if (((bool)min_ovf && sym_min_op0 != sym_min_op1)
+ || ((bool)max_ovf && sym_max_op0 != sym_max_op1))
{
set_value_range_to_varying (vr);
return;
{
value_range maxvr = VR_INITIALIZER;
signop sgn = TYPE_SIGN (TREE_TYPE (step));
- bool overflow;
+ wi::overflow_type overflow;
widest_int wtmp = wi::mul (wi::to_widest (step), nit, sgn,
&overflow);
wi::add_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
unsigned int op0len, const HOST_WIDE_INT *op1,
unsigned int op1len, unsigned int prec,
- signop sgn, bool *overflow)
+ signop sgn, wi::overflow_type *overflow)
{
unsigned HOST_WIDE_INT o0 = 0;
unsigned HOST_WIDE_INT o1 = 0;
val[len] = mask0 + mask1 + carry;
len++;
if (overflow)
- *overflow = (sgn == UNSIGNED && carry);
+ *overflow
+ = (sgn == UNSIGNED && carry) ? wi::OVF_OVERFLOW : wi::OVF_NONE;
}
else if (overflow)
{
if (sgn == SIGNED)
{
unsigned HOST_WIDE_INT x = (val[len - 1] ^ o0) & (val[len - 1] ^ o1);
- *overflow = (HOST_WIDE_INT) (x << shift) < 0;
+ if ((HOST_WIDE_INT) (x << shift) < 0)
+ {
+ if (o0 > (unsigned HOST_WIDE_INT) val[len - 1])
+ *overflow = wi::OVF_UNDERFLOW;
+ else if (o0 < (unsigned HOST_WIDE_INT) val[len - 1])
+ *overflow = wi::OVF_OVERFLOW;
+ else
+ *overflow = wi::OVF_NONE;
+ }
+ else
+ *overflow = wi::OVF_NONE;
}
else
{
x <<= shift;
o0 <<= shift;
if (old_carry)
- *overflow = (x <= o0);
+ *overflow = (x <= o0) ? wi::OVF_OVERFLOW : wi::OVF_NONE;
else
- *overflow = (x < o0);
+ *overflow = (x < o0) ? wi::OVF_OVERFLOW : wi::OVF_NONE;
}
}
made to see if it overflows. Unfortunately there is no better way
to check for overflow than to do this. If OVERFLOW is nonnull,
record in *OVERFLOW whether the result overflowed. SGN controls
- the signedness and is used to check overflow or if HIGH is set. */
+ the signedness and is used to check overflow or if HIGH is set.
+
+ NOTE: Overflow type for signed overflow is not yet implemented. */
unsigned int
wi::mul_internal (HOST_WIDE_INT *val, const HOST_WIDE_INT *op1val,
unsigned int op1len, const HOST_WIDE_INT *op2val,
unsigned int op2len, unsigned int prec, signop sgn,
- bool *overflow, bool high)
+ wi::overflow_type *overflow, bool high)
{
unsigned HOST_WIDE_INT o0, o1, k, t;
unsigned int i;
just make sure that we never attempt to set it. */
bool needs_overflow = (overflow != 0);
if (needs_overflow)
- *overflow = false;
+ *overflow = wi::OVF_NONE;
wide_int_ref op1 = wi::storage_ref (op1val, op1len, prec);
wide_int_ref op2 = wi::storage_ref (op2val, op2len, prec);
unsigned HOST_WIDE_INT upper;
umul_ppmm (upper, val[0], op1.ulow (), op2.ulow ());
if (needs_overflow)
- *overflow = (upper != 0);
+ /* Unsigned overflow can only be +OVERFLOW. */
+ *overflow = (upper != 0) ? wi::OVF_OVERFLOW : wi::OVF_NONE;
if (high)
val[0] = upper;
return 1;
if (sgn == SIGNED)
{
if ((HOST_WIDE_INT) r != sext_hwi (r, prec))
- *overflow = true;
+ /* FIXME: Signed overflow type is not implemented yet. */
+ *overflow = OVF_UNKNOWN;
}
else
{
if ((r >> prec) != 0)
- *overflow = true;
+ /* Unsigned overflow can only be +OVERFLOW. */
+ *overflow = OVF_OVERFLOW;
}
}
val[0] = high ? r >> prec : r;
for (i = half_blocks_needed; i < half_blocks_needed * 2; i++)
if (((HOST_WIDE_INT)(r[i] & mask)) != top)
- *overflow = true;
+ /* FIXME: Signed overflow type is not implemented yet. */
+ *overflow = (sgn == UNSIGNED) ? wi::OVF_OVERFLOW : wi::OVF_UNKNOWN;
}
int r_offset = high ? half_blocks_needed : 0;
wi::sub_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
unsigned int op0len, const HOST_WIDE_INT *op1,
unsigned int op1len, unsigned int prec,
- signop sgn, bool *overflow)
+ signop sgn, wi::overflow_type *overflow)
{
unsigned HOST_WIDE_INT o0 = 0;
unsigned HOST_WIDE_INT o1 = 0;
val[len] = mask0 - mask1 - borrow;
len++;
if (overflow)
- *overflow = (sgn == UNSIGNED && borrow);
+ *overflow = (sgn == UNSIGNED && borrow) ? OVF_UNDERFLOW : OVF_NONE;
}
else if (overflow)
{
if (sgn == SIGNED)
{
unsigned HOST_WIDE_INT x = (o0 ^ o1) & (val[len - 1] ^ o0);
- *overflow = (HOST_WIDE_INT) (x << shift) < 0;
+ if ((HOST_WIDE_INT) (x << shift) < 0)
+ {
+ if (o0 > o1)
+ *overflow = OVF_UNDERFLOW;
+ else if (o0 < o1)
+ *overflow = OVF_OVERFLOW;
+ else
+ *overflow = OVF_NONE;
+ }
+ else
+ *overflow = OVF_NONE;
}
else
{
x <<= shift;
o0 <<= shift;
if (old_borrow)
- *overflow = (x >= o0);
+ *overflow = (x >= o0) ? OVF_UNDERFLOW : OVF_NONE;
else
- *overflow = (x > o0);
+ *overflow = (x > o0) ? OVF_UNDERFLOW : OVF_NONE;
}
}
unsigned int dividend_len, unsigned int dividend_prec,
const HOST_WIDE_INT *divisor_val, unsigned int divisor_len,
unsigned int divisor_prec, signop sgn,
- bool *oflow)
+ wi::overflow_type *oflow)
{
unsigned int dividend_blocks_needed = 2 * BLOCKS_NEEDED (dividend_prec);
unsigned int divisor_blocks_needed = 2 * BLOCKS_NEEDED (divisor_prec);
*remainder_len = 1;
remainder[0] = 0;
}
- if (oflow != 0)
- *oflow = true;
+ if (oflow)
+ *oflow = OVF_OVERFLOW;
if (quotient)
for (unsigned int i = 0; i < dividend_len; ++i)
quotient[i] = dividend_val[i];
}
if (oflow)
- *oflow = false;
+ *oflow = OVF_NONE;
/* Do it on the host if you can. */
if (sgn == SIGNED
{
int prec = precs[i];
int offset = offsets[j];
- bool overflow;
+ wi::overflow_type overflow;
wide_int sum, diff;
sum = wi::add (wi::max_value (prec, UNSIGNED) - offset, 1,
UNSIGNED, &overflow);
ASSERT_EQ (sum, -offset);
- ASSERT_EQ (overflow, offset == 0);
+ ASSERT_EQ (overflow != wi::OVF_NONE, offset == 0);
sum = wi::add (1, wi::max_value (prec, UNSIGNED) - offset,
UNSIGNED, &overflow);
ASSERT_EQ (sum, -offset);
- ASSERT_EQ (overflow, offset == 0);
+ ASSERT_EQ (overflow != wi::OVF_NONE, offset == 0);
diff = wi::sub (wi::max_value (prec, UNSIGNED) - offset,
wi::max_value (prec, UNSIGNED),
UNSIGNED, &overflow);
ASSERT_EQ (diff, -offset);
- ASSERT_EQ (overflow, offset != 0);
+ ASSERT_EQ (overflow != wi::OVF_NONE, offset != 0);
diff = wi::sub (wi::max_value (prec, UNSIGNED) - offset,
wi::max_value (prec, UNSIGNED) - 1,
UNSIGNED, &overflow);
ASSERT_EQ (diff, 1 - offset);
- ASSERT_EQ (overflow, offset > 1);
+ ASSERT_EQ (overflow != wi::OVF_NONE, offset > 1);
}
}
namespace wi
{
+ /* Operations that calculate overflow do so even for
+ TYPE_OVERFLOW_WRAPS types. For example, adding 1 to +MAX_INT in
+ an unsigned int is 0 and does not overflow in C/C++, but wi::add
+ will set the overflow argument in case it's needed for further
+ analysis.
+
+ For operations that require overflow, these are the different
+ types of overflow. */
+ enum overflow_type {
+ OVF_NONE = 0,
+ OVF_UNDERFLOW = -1,
+ OVF_OVERFLOW = 1,
+ /* There was an overflow, but we are unsure whether it was an
+ overflow or an underflow. */
+ OVF_UNKNOWN = 2
+ };
+
/* Classifies an integer based on its precision. */
enum precision_type {
/* The integer has both a precision and defined signedness. This allows
UNARY_FUNCTION bit_not (const T &);
UNARY_FUNCTION neg (const T &);
- UNARY_FUNCTION neg (const T &, bool *);
+ UNARY_FUNCTION neg (const T &, overflow_type *);
UNARY_FUNCTION abs (const T &);
UNARY_FUNCTION ext (const T &, unsigned int, signop);
UNARY_FUNCTION sext (const T &, unsigned int);
BINARY_FUNCTION bit_or_not (const T1 &, const T2 &);
BINARY_FUNCTION bit_xor (const T1 &, const T2 &);
BINARY_FUNCTION add (const T1 &, const T2 &);
- BINARY_FUNCTION add (const T1 &, const T2 &, signop, bool *);
+ BINARY_FUNCTION add (const T1 &, const T2 &, signop, overflow_type *);
BINARY_FUNCTION sub (const T1 &, const T2 &);
- BINARY_FUNCTION sub (const T1 &, const T2 &, signop, bool *);
+ BINARY_FUNCTION sub (const T1 &, const T2 &, signop, overflow_type *);
BINARY_FUNCTION mul (const T1 &, const T2 &);
- BINARY_FUNCTION mul (const T1 &, const T2 &, signop, bool *);
- BINARY_FUNCTION smul (const T1 &, const T2 &, bool *);
- BINARY_FUNCTION umul (const T1 &, const T2 &, bool *);
+ BINARY_FUNCTION mul (const T1 &, const T2 &, signop, overflow_type *);
+ BINARY_FUNCTION smul (const T1 &, const T2 &, overflow_type *);
+ BINARY_FUNCTION umul (const T1 &, const T2 &, overflow_type *);
BINARY_FUNCTION mul_high (const T1 &, const T2 &, signop);
- BINARY_FUNCTION div_trunc (const T1 &, const T2 &, signop, bool * = 0);
+ BINARY_FUNCTION div_trunc (const T1 &, const T2 &, signop,
+ overflow_type * = 0);
BINARY_FUNCTION sdiv_trunc (const T1 &, const T2 &);
BINARY_FUNCTION udiv_trunc (const T1 &, const T2 &);
- BINARY_FUNCTION div_floor (const T1 &, const T2 &, signop, bool * = 0);
+ BINARY_FUNCTION div_floor (const T1 &, const T2 &, signop,
+ overflow_type * = 0);
BINARY_FUNCTION udiv_floor (const T1 &, const T2 &);
BINARY_FUNCTION sdiv_floor (const T1 &, const T2 &);
- BINARY_FUNCTION div_ceil (const T1 &, const T2 &, signop, bool * = 0);
+ BINARY_FUNCTION div_ceil (const T1 &, const T2 &, signop,
+ overflow_type * = 0);
BINARY_FUNCTION udiv_ceil (const T1 &, const T2 &);
- BINARY_FUNCTION div_round (const T1 &, const T2 &, signop, bool * = 0);
+ BINARY_FUNCTION div_round (const T1 &, const T2 &, signop,
+ overflow_type * = 0);
BINARY_FUNCTION divmod_trunc (const T1 &, const T2 &, signop,
WI_BINARY_RESULT (T1, T2) *);
BINARY_FUNCTION gcd (const T1 &, const T2 &, signop = UNSIGNED);
- BINARY_FUNCTION mod_trunc (const T1 &, const T2 &, signop, bool * = 0);
+ BINARY_FUNCTION mod_trunc (const T1 &, const T2 &, signop,
+ overflow_type * = 0);
BINARY_FUNCTION smod_trunc (const T1 &, const T2 &);
BINARY_FUNCTION umod_trunc (const T1 &, const T2 &);
- BINARY_FUNCTION mod_floor (const T1 &, const T2 &, signop, bool * = 0);
+ BINARY_FUNCTION mod_floor (const T1 &, const T2 &, signop,
+ overflow_type * = 0);
BINARY_FUNCTION umod_floor (const T1 &, const T2 &);
- BINARY_FUNCTION mod_ceil (const T1 &, const T2 &, signop, bool * = 0);
- BINARY_FUNCTION mod_round (const T1 &, const T2 &, signop, bool * = 0);
+ BINARY_FUNCTION mod_ceil (const T1 &, const T2 &, signop,
+ overflow_type * = 0);
+ BINARY_FUNCTION mod_round (const T1 &, const T2 &, signop,
+ overflow_type * = 0);
template <typename T1, typename T2>
bool multiple_of_p (const T1 &, const T2 &, signop);
template <typename T>
unsigned int min_precision (const T &, signop);
+
+ static inline void accumulate_overflow (overflow_type &, overflow_type);
}
namespace wi
const HOST_WIDE_INT *, unsigned int, unsigned int);
unsigned int add_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int,
const HOST_WIDE_INT *, unsigned int, unsigned int,
- signop, bool *);
+ signop, overflow_type *);
unsigned int sub_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int,
const HOST_WIDE_INT *, unsigned int, unsigned int,
- signop, bool *);
+ signop, overflow_type *);
unsigned int mul_internal (HOST_WIDE_INT *, const HOST_WIDE_INT *,
unsigned int, const HOST_WIDE_INT *,
- unsigned int, unsigned int, signop, bool *,
- bool);
+ unsigned int, unsigned int, signop,
+ overflow_type *, bool);
unsigned int divmod_internal (HOST_WIDE_INT *, unsigned int *,
HOST_WIDE_INT *, const HOST_WIDE_INT *,
unsigned int, unsigned int,
const HOST_WIDE_INT *,
unsigned int, unsigned int,
- signop, bool *);
+ signop, overflow_type *);
}
/* Return the number of bits that integer X can hold. */
return sub (0, x);
}
-/* Return -x. Indicate in *OVERFLOW if X is the minimum signed value. */
+/* Return -x. Indicate in *OVERFLOW if performing the negation would
+ cause an overflow. */
template <typename T>
inline WI_UNARY_RESULT (T)
-wi::neg (const T &x, bool *overflow)
+wi::neg (const T &x, overflow_type *overflow)
{
- *overflow = only_sign_bit_p (x);
+ *overflow = only_sign_bit_p (x) ? OVF_OVERFLOW : OVF_NONE;
return sub (0, x);
}
and indicate in *OVERFLOW whether the operation overflowed. */
template <typename T1, typename T2>
inline WI_BINARY_RESULT (T1, T2)
-wi::add (const T1 &x, const T2 &y, signop sgn, bool *overflow)
+wi::add (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow)
{
WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
unsigned int precision = get_precision (result);
unsigned HOST_WIDE_INT yl = yi.ulow ();
unsigned HOST_WIDE_INT resultl = xl + yl;
if (sgn == SIGNED)
- *overflow = (((resultl ^ xl) & (resultl ^ yl))
- >> (precision - 1)) & 1;
+ {
+ if ((((resultl ^ xl) & (resultl ^ yl))
+ >> (precision - 1)) & 1)
+ {
+ if (xl > resultl)
+ *overflow = OVF_UNDERFLOW;
+ else if (xl < resultl)
+ *overflow = OVF_OVERFLOW;
+ else
+ *overflow = OVF_NONE;
+ }
+ else
+ *overflow = OVF_NONE;
+ }
else
*overflow = ((resultl << (HOST_BITS_PER_WIDE_INT - precision))
- < (xl << (HOST_BITS_PER_WIDE_INT - precision)));
+ < (xl << (HOST_BITS_PER_WIDE_INT - precision)))
+ ? OVF_OVERFLOW : OVF_NONE;
val[0] = resultl;
result.set_len (1);
}
and indicate in *OVERFLOW whether the operation overflowed. */
template <typename T1, typename T2>
inline WI_BINARY_RESULT (T1, T2)
-wi::sub (const T1 &x, const T2 &y, signop sgn, bool *overflow)
+wi::sub (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow)
{
WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
unsigned int precision = get_precision (result);
unsigned HOST_WIDE_INT yl = yi.ulow ();
unsigned HOST_WIDE_INT resultl = xl - yl;
if (sgn == SIGNED)
- *overflow = (((xl ^ yl) & (resultl ^ xl)) >> (precision - 1)) & 1;
+ {
+ if ((((xl ^ yl) & (resultl ^ xl)) >> (precision - 1)) & 1)
+ {
+ if (xl > yl)
+ *overflow = OVF_UNDERFLOW;
+ else if (xl < yl)
+ *overflow = OVF_OVERFLOW;
+ else
+ *overflow = OVF_NONE;
+ }
+ else
+ *overflow = OVF_NONE;
+ }
else
*overflow = ((resultl << (HOST_BITS_PER_WIDE_INT - precision))
- > (xl << (HOST_BITS_PER_WIDE_INT - precision)));
+ > (xl << (HOST_BITS_PER_WIDE_INT - precision)))
+ ? OVF_UNDERFLOW : OVF_NONE;
val[0] = resultl;
result.set_len (1);
}
and indicate in *OVERFLOW whether the operation overflowed. */
template <typename T1, typename T2>
inline WI_BINARY_RESULT (T1, T2)
-wi::mul (const T1 &x, const T2 &y, signop sgn, bool *overflow)
+wi::mul (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow)
{
WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
unsigned int precision = get_precision (result);
*OVERFLOW whether the operation overflowed. */
template <typename T1, typename T2>
inline WI_BINARY_RESULT (T1, T2)
-wi::smul (const T1 &x, const T2 &y, bool *overflow)
+wi::smul (const T1 &x, const T2 &y, overflow_type *overflow)
{
return mul (x, y, SIGNED, overflow);
}
/* Return X * Y, treating both X and Y as unsigned values. Indicate in
- *OVERFLOW whether the operation overflowed. */
+ *OVERFLOW if the result overflows. */
template <typename T1, typename T2>
inline WI_BINARY_RESULT (T1, T2)
-wi::umul (const T1 &x, const T2 &y, bool *overflow)
+wi::umul (const T1 &x, const T2 &y, overflow_type *overflow)
{
return mul (x, y, UNSIGNED, overflow);
}
overflows. */
template <typename T1, typename T2>
inline WI_BINARY_RESULT (T1, T2)
-wi::div_trunc (const T1 &x, const T2 &y, signop sgn, bool *overflow)
+wi::div_trunc (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow)
{
WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
unsigned int precision = get_precision (quotient);
overflows. */
template <typename T1, typename T2>
inline WI_BINARY_RESULT (T1, T2)
-wi::div_floor (const T1 &x, const T2 &y, signop sgn, bool *overflow)
+wi::div_floor (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow)
{
WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);
overflows. */
template <typename T1, typename T2>
inline WI_BINARY_RESULT (T1, T2)
-wi::div_ceil (const T1 &x, const T2 &y, signop sgn, bool *overflow)
+wi::div_ceil (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow)
{
WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);
in *OVERFLOW if the result overflows. */
template <typename T1, typename T2>
inline WI_BINARY_RESULT (T1, T2)
-wi::div_round (const T1 &x, const T2 &y, signop sgn, bool *overflow)
+wi::div_round (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow)
{
WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);
in *OVERFLOW if the division overflows. */
template <typename T1, typename T2>
inline WI_BINARY_RESULT (T1, T2)
-wi::mod_trunc (const T1 &x, const T2 &y, signop sgn, bool *overflow)
+wi::mod_trunc (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow)
{
WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);
unsigned int precision = get_precision (remainder);
in *OVERFLOW if the division overflows. */
template <typename T1, typename T2>
inline WI_BINARY_RESULT (T1, T2)
-wi::mod_floor (const T1 &x, const T2 &y, signop sgn, bool *overflow)
+wi::mod_floor (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow)
{
WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);
in *OVERFLOW if the division overflows. */
template <typename T1, typename T2>
inline WI_BINARY_RESULT (T1, T2)
-wi::mod_ceil (const T1 &x, const T2 &y, signop sgn, bool *overflow)
+wi::mod_ceil (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow)
{
WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);
given by SGN. Indicate in *OVERFLOW if the division overflows. */
template <typename T1, typename T2>
inline WI_BINARY_RESULT (T1, T2)
-wi::mod_round (const T1 &x, const T2 &y, signop sgn, bool *overflow)
+wi::mod_round (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow)
{
WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);
return shifted_mask <T> (bit, 1, false);
}
+/* Accumulate a set of overflows into OVERFLOW. */
+
+static inline void
+wi::accumulate_overflow (wi::overflow_type &overflow,
+ wi::overflow_type suboverflow)
+{
+ if (!suboverflow)
+ return;
+ if (!overflow)
+ overflow = suboverflow;
+ else if (overflow != suboverflow)
+ overflow = wi::OVF_UNKNOWN;
+}
+
#endif /* WIDE_INT_H */