+2017-10-09 Richard Sandiford <richard.sandiford@linaro.org>
+
+ * wide-int.h (WI_BINARY_OPERATOR_RESULT): New macro.
+ (WI_BINARY_PREDICATE_RESULT): Likewise.
+ (wi::binary_traits::operator_result): New type.
+ (wi::binary_traits::predicate_result): Likewise.
+ (generic_wide_int::operator~, unary generic_wide_int::operator-)
+ (generic_wide_int::operator==, generic_wide_int::operator!=)
+ (generic_wide_int::operator&, generic_wide_int::and_not)
+ (generic_wide_int::operator|, generic_wide_int::or_not)
+ (generic_wide_int::operator^, generic_wide_int::operator+
+ (binary generic_wide_int::operator-, generic_wide_int::operator*):
+ Delete.
+ (operator~, unary operator-, operator==, operator!=, operator&)
+ (operator|, operator^, operator+, binary operator-, operator*): New
+ functions.
+ * expr.c (get_inner_reference): Use wi::bit_and_not.
+ * fold-const.c (fold_binary_loc): Likewise.
+ * ipa-prop.c (ipa_compute_jump_functions_for_edge): Likewise.
+ * tree-ssa-ccp.c (get_value_from_alignment): Likewise.
+ (bit_value_binop): Likewise.
+ * tree-ssa-math-opts.c (find_bswap_or_nop_load): Likewise.
+ * tree-vrp.c (zero_nonzero_bits_from_vr): Likewise.
+ (extract_range_from_binary_expr_1): Likewise.
+ (masked_increment): Likewise.
+ (simplify_bit_ops_using_ranges): Likewise.
+
2017-10-09 Martin Jambor <mjambor@suse.cz>
PR hsa/82416
if (wi::neg_p (bit_offset) || !wi::fits_shwi_p (bit_offset))
{
offset_int mask = wi::mask <offset_int> (LOG2_BITS_PER_UNIT, false);
- offset_int tem = bit_offset.and_not (mask);
+ offset_int tem = wi::bit_and_not (bit_offset, mask);
/* TEM is the bitpos rounded to BITS_PER_UNIT towards -Inf.
Subtract it to BIT_OFFSET and add it (scaled) to OFFSET. */
bit_offset -= tem;
TYPE_PRECISION (TREE_TYPE (arg1)));
/* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
- if (msk.and_not (c1 | c2) == 0)
+ if (wi::bit_and_not (msk, c1 | c2) == 0)
{
tem = fold_convert_loc (loc, type, TREE_OPERAND (arg0, 0));
return fold_build2_loc (loc, BIT_IOR_EXPR, type, tem, arg1);
mode which allows further optimizations. */
c1 &= msk;
c2 &= msk;
- wide_int c3 = c1.and_not (c2);
+ wide_int c3 = wi::bit_and_not (c1, c2);
for (w = BITS_PER_UNIT; w <= width; w <<= 1)
{
wide_int mask = wi::mask (w, false,
TYPE_PRECISION (type));
- if (((c1 | c2) & mask) == mask && c1.and_not (mask) == 0)
+ if (((c1 | c2) & mask) == mask
+ && wi::bit_and_not (c1, mask) == 0)
{
c3 = mask;
break;
unsigned align;
get_pointer_alignment_1 (arg, &align, &bitpos);
- widest_int mask
- = wi::mask<widest_int>(TYPE_PRECISION (TREE_TYPE (arg)), false)
- .and_not (align / BITS_PER_UNIT - 1);
+ widest_int mask = wi::bit_and_not
+ (wi::mask<widest_int> (TYPE_PRECISION (TREE_TYPE (arg)), false),
+ align / BITS_PER_UNIT - 1);
widest_int value = bitpos / BITS_PER_UNIT;
ipa_set_jfunc_bits (jfunc, value, mask);
}
gcc_assert (TREE_CODE (expr) == ADDR_EXPR);
get_pointer_alignment_1 (expr, &align, &bitpos);
- val.mask = (POINTER_TYPE_P (type) || TYPE_UNSIGNED (type)
- ? wi::mask <widest_int> (TYPE_PRECISION (type), false)
- : -1).and_not (align / BITS_PER_UNIT - 1);
+ val.mask = wi::bit_and_not
+ (POINTER_TYPE_P (type) || TYPE_UNSIGNED (type)
+ ? wi::mask <widest_int> (TYPE_PRECISION (type), false)
+ : -1,
+ align / BITS_PER_UNIT - 1);
val.lattice_val
= wi::sext (val.mask, TYPE_PRECISION (type)) == -1 ? VARYING : CONSTANT;
if (val.lattice_val == CONSTANT)
case BIT_IOR_EXPR:
/* The mask is constant where there is a known
set bit, (m1 | m2) & ~((v1 & ~m1) | (v2 & ~m2)). */
- *mask = (r1mask | r2mask)
- .and_not (r1val.and_not (r1mask) | r2val.and_not (r2mask));
+ *mask = wi::bit_and_not (r1mask | r2mask,
+ wi::bit_and_not (r1val, r1mask)
+ | wi::bit_and_not (r2val, r2mask));
*val = r1val | r2val;
break;
{
/* Do the addition with unknown bits set to zero, to give carry-ins of
zero wherever possible. */
- widest_int lo = r1val.and_not (r1mask) + r2val.and_not (r2mask);
+ widest_int lo = (wi::bit_and_not (r1val, r1mask)
+ + wi::bit_and_not (r2val, r2mask));
lo = wi::ext (lo, width, sgn);
/* Do the addition with unknown bits set to one, to give carry-ins of
one wherever possible. */
case NE_EXPR:
{
widest_int m = r1mask | r2mask;
- if (r1val.and_not (m) != r2val.and_not (m))
+ if (wi::bit_and_not (r1val, m) != wi::bit_and_not (r2val, m))
{
*mask = 0;
*val = ((code == EQ_EXPR) ? 0 : 1);
/* If we know the most significant bits we know the values
value ranges by means of treating varying bits as zero
or one. Do a cross comparison of the max/min pairs. */
- maxmin = wi::cmp (o1val | o1mask, o2val.and_not (o2mask), sgn);
- minmax = wi::cmp (o1val.and_not (o1mask), o2val | o2mask, sgn);
+ maxmin = wi::cmp (o1val | o1mask,
+ wi::bit_and_not (o2val, o2mask), sgn);
+ minmax = wi::cmp (wi::bit_and_not (o1val, o1mask),
+ o2val | o2mask, sgn);
if (maxmin < 0) /* o1 is less than o2. */
{
*mask = 0;
if (wi::neg_p (bit_offset))
{
offset_int mask = wi::mask <offset_int> (LOG2_BITS_PER_UNIT, false);
- offset_int tem = bit_offset.and_not (mask);
+ offset_int tem = wi::bit_and_not (bit_offset, mask);
/* TEM is the bitpos rounded to BITS_PER_UNIT towards -Inf.
Subtract it to BIT_OFFSET and add it (scaled) to OFFSET. */
bit_offset -= tem;
wide_int mask = wi::mask (wi::floor_log2 (xor_mask), false,
may_be_nonzero->get_precision ());
*may_be_nonzero = *may_be_nonzero | mask;
- *must_be_nonzero = must_be_nonzero->and_not (mask);
+ *must_be_nonzero = wi::bit_and_not (*must_be_nonzero, mask);
}
}
wide_int result_zero_bits = ((must_be_nonzero0 & must_be_nonzero1)
| ~(may_be_nonzero0 | may_be_nonzero1));
wide_int result_one_bits
- = (must_be_nonzero0.and_not (may_be_nonzero1)
- | must_be_nonzero1.and_not (may_be_nonzero0));
+ = (wi::bit_and_not (must_be_nonzero0, may_be_nonzero1)
+ | wi::bit_and_not (must_be_nonzero1, may_be_nonzero0));
max = wide_int_to_tree (expr_type, ~result_zero_bits);
min = wide_int_to_tree (expr_type, result_one_bits);
/* If the range has all positive or all negative values the
if ((res & bit) == 0)
continue;
res = bit - 1;
- res = (val + bit).and_not (res);
+ res = wi::bit_and_not (val + bit, res);
res &= mask;
if (wi::gtu_p (res, val))
return res ^ sgnbit;
switch (gimple_assign_rhs_code (stmt))
{
case BIT_AND_EXPR:
- mask = may_be_nonzero0.and_not (must_be_nonzero1);
+ mask = wi::bit_and_not (may_be_nonzero0, must_be_nonzero1);
if (mask == 0)
{
op = op0;
break;
}
- mask = may_be_nonzero1.and_not (must_be_nonzero0);
+ mask = wi::bit_and_not (may_be_nonzero1, must_be_nonzero0);
if (mask == 0)
{
op = op1;
}
break;
case BIT_IOR_EXPR:
- mask = may_be_nonzero0.and_not (must_be_nonzero1);
+ mask = wi::bit_and_not (may_be_nonzero0, must_be_nonzero1);
if (mask == 0)
{
op = op1;
break;
}
- mask = may_be_nonzero1.and_not (must_be_nonzero0);
+ mask = wi::bit_and_not (may_be_nonzero1, must_be_nonzero0);
if (mask == 0)
{
op = op0;
#define WI_BINARY_RESULT(T1, T2) \
typename wi::binary_traits <T1, T2>::result_type
+/* Likewise for binary operators, which excludes the case in which neither
+ T1 nor T2 is a wide-int-based type. */
+#define WI_BINARY_OPERATOR_RESULT(T1, T2) \
+ typename wi::binary_traits <T1, T2>::operator_result
+
/* The type of result produced by T1 << T2. Leads to substitution failure
if the operation isn't supported. Defined purely for brevity. */
#define WI_SIGNED_SHIFT_RESULT(T1, T2) \
typename wi::binary_traits <T1, T2>::signed_shift_result_type
+/* The type of result produced by a sign-agnostic binary predicate on
+ types T1 and T2. This is bool if wide-int operations make sense for
+ T1 and T2 and leads to substitution failure otherwise. */
+#define WI_BINARY_PREDICATE_RESULT(T1, T2) \
+ typename wi::binary_traits <T1, T2>::predicate_result
+
/* The type of result produced by a signed binary predicate on types T1 and T2.
This is bool if signed comparisons make sense for T1 and T2 and leads to
substitution failure otherwise. */
struct binary_traits <T1, T2, FLEXIBLE_PRECISION, FLEXIBLE_PRECISION>
{
typedef widest_int result_type;
+ /* Don't define operators for this combination. */
};
template <typename T1, typename T2>
struct binary_traits <T1, T2, FLEXIBLE_PRECISION, VAR_PRECISION>
{
typedef wide_int result_type;
+ typedef result_type operator_result;
+ typedef bool predicate_result;
};
template <typename T1, typename T2>
so as not to confuse gengtype. */
typedef generic_wide_int < fixed_wide_int_storage
<int_traits <T2>::precision> > result_type;
+ typedef result_type operator_result;
+ typedef bool predicate_result;
typedef bool signed_predicate_result;
};
struct binary_traits <T1, T2, VAR_PRECISION, FLEXIBLE_PRECISION>
{
typedef wide_int result_type;
+ typedef result_type operator_result;
+ typedef bool predicate_result;
};
template <typename T1, typename T2>
so as not to confuse gengtype. */
typedef generic_wide_int < fixed_wide_int_storage
<int_traits <T1>::precision> > result_type;
+ typedef result_type operator_result;
+ typedef bool predicate_result;
typedef result_type signed_shift_result_type;
typedef bool signed_predicate_result;
};
template <typename T1, typename T2>
struct binary_traits <T1, T2, CONST_PRECISION, CONST_PRECISION>
{
+ STATIC_ASSERT (int_traits <T1>::precision == int_traits <T2>::precision);
/* Spelled out explicitly (rather than through FIXED_WIDE_INT)
so as not to confuse gengtype. */
- STATIC_ASSERT (int_traits <T1>::precision == int_traits <T2>::precision);
typedef generic_wide_int < fixed_wide_int_storage
<int_traits <T1>::precision> > result_type;
+ typedef result_type operator_result;
+ typedef bool predicate_result;
typedef result_type signed_shift_result_type;
typedef bool signed_predicate_result;
};
struct binary_traits <T1, T2, VAR_PRECISION, VAR_PRECISION>
{
typedef wide_int result_type;
+ typedef result_type operator_result;
+ typedef bool predicate_result;
};
}
template <typename T>
generic_wide_int &operator = (const T &);
-#define BINARY_PREDICATE(OP, F) \
- template <typename T> \
- bool OP (const T &c) const { return wi::F (*this, c); }
-
-#define UNARY_OPERATOR(OP, F) \
- WI_UNARY_RESULT (generic_wide_int) OP () const { return wi::F (*this); }
-
-#define BINARY_OPERATOR(OP, F) \
- template <typename T> \
- WI_BINARY_RESULT (generic_wide_int, T) \
- OP (const T &c) const { return wi::F (*this, c); }
-
#define ASSIGNMENT_OPERATOR(OP, F) \
template <typename T> \
generic_wide_int &OP (const T &c) { return (*this = wi::F (*this, c)); }
#define INCDEC_OPERATOR(OP, DELTA) \
generic_wide_int &OP () { *this += DELTA; return *this; }
- UNARY_OPERATOR (operator ~, bit_not)
- UNARY_OPERATOR (operator -, neg)
- BINARY_PREDICATE (operator ==, eq_p)
- BINARY_PREDICATE (operator !=, ne_p)
- BINARY_OPERATOR (operator &, bit_and)
- BINARY_OPERATOR (and_not, bit_and_not)
- BINARY_OPERATOR (operator |, bit_or)
- BINARY_OPERATOR (or_not, bit_or_not)
- BINARY_OPERATOR (operator ^, bit_xor)
- BINARY_OPERATOR (operator +, add)
- BINARY_OPERATOR (operator -, sub)
- BINARY_OPERATOR (operator *, mul)
ASSIGNMENT_OPERATOR (operator &=, bit_and)
ASSIGNMENT_OPERATOR (operator |=, bit_or)
ASSIGNMENT_OPERATOR (operator ^=, bit_xor)
INCDEC_OPERATOR (operator ++, 1)
INCDEC_OPERATOR (operator --, -1)
-#undef BINARY_PREDICATE
-#undef UNARY_OPERATOR
-#undef BINARY_OPERATOR
#undef SHIFT_ASSIGNMENT_OPERATOR
#undef ASSIGNMENT_OPERATOR
#undef INCDEC_OPERATOR
#undef SIGNED_BINARY_PREDICATE
+#define UNARY_OPERATOR(OP, F) \
+ template<typename T> \
+ WI_UNARY_RESULT (generic_wide_int<T>) \
+ OP (const generic_wide_int<T> &x) \
+ { \
+ return wi::F (x); \
+ }
+
+#define BINARY_PREDICATE(OP, F) \
+ template<typename T1, typename T2> \
+ WI_BINARY_PREDICATE_RESULT (T1, T2) \
+ OP (const T1 &x, const T2 &y) \
+ { \
+ return wi::F (x, y); \
+ }
+
+#define BINARY_OPERATOR(OP, F) \
+ template<typename T1, typename T2> \
+ WI_BINARY_OPERATOR_RESULT (T1, T2) \
+ OP (const T1 &x, const T2 &y) \
+ { \
+ return wi::F (x, y); \
+ }
+
+UNARY_OPERATOR (operator ~, bit_not)
+UNARY_OPERATOR (operator -, neg)
+BINARY_PREDICATE (operator ==, eq_p)
+BINARY_PREDICATE (operator !=, ne_p)
+BINARY_OPERATOR (operator &, bit_and)
+BINARY_OPERATOR (operator |, bit_or)
+BINARY_OPERATOR (operator ^, bit_xor)
+BINARY_OPERATOR (operator +, add)
+BINARY_OPERATOR (operator -, sub)
+BINARY_OPERATOR (operator *, mul)
+
+#undef UNARY_OPERATOR
+#undef BINARY_PREDICATE
+#undef BINARY_OPERATOR
+
template <typename T1, typename T2>
inline WI_SIGNED_SHIFT_RESULT (T1, T2)
operator << (const T1 &x, const T2 &y)