From: Richard Sandiford Date: Mon, 9 Oct 2017 10:51:45 +0000 (+0000) Subject: Allow non-wi wi X-Git-Url: https://git.libre-soc.org/?a=commitdiff_plain;h=7b27cb4b510c4c0ae8446140929380aba4a9f79a;p=gcc.git Allow non-wi wi This patch uses global rather than member operators for wide-int.h, so that the first operand can be a non-wide-int type. The patch also removes the and_not and or_not member functions. It was already inconsistent to have member functions for these two operations (one of which was never used) and not other wi:: ones like udiv. After the operator change, we'd have the additional inconsistency that "non-wi & wi" would work but "non-wi.and_not (wi)" wouldn't. 2017-10-09 Richard Sandiford gcc/ * wide-int.h (WI_BINARY_OPERATOR_RESULT): New macro. (WI_BINARY_PREDICATE_RESULT): Likewise. (wi::binary_traits::operator_result): New type. (wi::binary_traits::predicate_result): Likewise. (generic_wide_int::operator~, unary generic_wide_int::operator-) (generic_wide_int::operator==, generic_wide_int::operator!=) (generic_wide_int::operator&, generic_wide_int::and_not) (generic_wide_int::operator|, generic_wide_int::or_not) (generic_wide_int::operator^, generic_wide_int::operator+ (binary generic_wide_int::operator-, generic_wide_int::operator*): Delete. (operator~, unary operator-, operator==, operator!=, operator&) (operator|, operator^, operator+, binary operator-, operator*): New functions. * expr.c (get_inner_reference): Use wi::bit_and_not. * fold-const.c (fold_binary_loc): Likewise. * ipa-prop.c (ipa_compute_jump_functions_for_edge): Likewise. * tree-ssa-ccp.c (get_value_from_alignment): Likewise. (bit_value_binop): Likewise. * tree-ssa-math-opts.c (find_bswap_or_nop_load): Likewise. * tree-vrp.c (zero_nonzero_bits_from_vr): Likewise. (extract_range_from_binary_expr_1): Likewise. (masked_increment): Likewise. (simplify_bit_ops_using_ranges): Likewise. From-SVN: r253539 --- diff --git a/gcc/ChangeLog b/gcc/ChangeLog index 5718175a321..0cf8731d2fb 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,30 @@ +2017-10-09 Richard Sandiford + + * wide-int.h (WI_BINARY_OPERATOR_RESULT): New macro. + (WI_BINARY_PREDICATE_RESULT): Likewise. + (wi::binary_traits::operator_result): New type. + (wi::binary_traits::predicate_result): Likewise. + (generic_wide_int::operator~, unary generic_wide_int::operator-) + (generic_wide_int::operator==, generic_wide_int::operator!=) + (generic_wide_int::operator&, generic_wide_int::and_not) + (generic_wide_int::operator|, generic_wide_int::or_not) + (generic_wide_int::operator^, generic_wide_int::operator+ + (binary generic_wide_int::operator-, generic_wide_int::operator*): + Delete. + (operator~, unary operator-, operator==, operator!=, operator&) + (operator|, operator^, operator+, binary operator-, operator*): New + functions. + * expr.c (get_inner_reference): Use wi::bit_and_not. + * fold-const.c (fold_binary_loc): Likewise. + * ipa-prop.c (ipa_compute_jump_functions_for_edge): Likewise. + * tree-ssa-ccp.c (get_value_from_alignment): Likewise. + (bit_value_binop): Likewise. + * tree-ssa-math-opts.c (find_bswap_or_nop_load): Likewise. + * tree-vrp.c (zero_nonzero_bits_from_vr): Likewise. + (extract_range_from_binary_expr_1): Likewise. + (masked_increment): Likewise. + (simplify_bit_ops_using_ranges): Likewise. + 2017-10-09 Martin Jambor PR hsa/82416 diff --git a/gcc/expr.c b/gcc/expr.c index 134ee731c29..baaef260320 100644 --- a/gcc/expr.c +++ b/gcc/expr.c @@ -7153,7 +7153,7 @@ get_inner_reference (tree exp, HOST_WIDE_INT *pbitsize, if (wi::neg_p (bit_offset) || !wi::fits_shwi_p (bit_offset)) { offset_int mask = wi::mask (LOG2_BITS_PER_UNIT, false); - offset_int tem = bit_offset.and_not (mask); + offset_int tem = wi::bit_and_not (bit_offset, mask); /* TEM is the bitpos rounded to BITS_PER_UNIT towards -Inf. Subtract it to BIT_OFFSET and add it (scaled) to OFFSET. */ bit_offset -= tem; diff --git a/gcc/fold-const.c b/gcc/fold-const.c index d8dc56cea6b..aac62f83321 100644 --- a/gcc/fold-const.c +++ b/gcc/fold-const.c @@ -9888,7 +9888,7 @@ fold_binary_loc (location_t loc, TYPE_PRECISION (TREE_TYPE (arg1))); /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */ - if (msk.and_not (c1 | c2) == 0) + if (wi::bit_and_not (msk, c1 | c2) == 0) { tem = fold_convert_loc (loc, type, TREE_OPERAND (arg0, 0)); return fold_build2_loc (loc, BIT_IOR_EXPR, type, tem, arg1); @@ -9899,12 +9899,13 @@ fold_binary_loc (location_t loc, mode which allows further optimizations. */ c1 &= msk; c2 &= msk; - wide_int c3 = c1.and_not (c2); + wide_int c3 = wi::bit_and_not (c1, c2); for (w = BITS_PER_UNIT; w <= width; w <<= 1) { wide_int mask = wi::mask (w, false, TYPE_PRECISION (type)); - if (((c1 | c2) & mask) == mask && c1.and_not (mask) == 0) + if (((c1 | c2) & mask) == mask + && wi::bit_and_not (c1, mask) == 0) { c3 = mask; break; diff --git a/gcc/ipa-prop.c b/gcc/ipa-prop.c index 51f62218501..8fbb6435427 100644 --- a/gcc/ipa-prop.c +++ b/gcc/ipa-prop.c @@ -1931,9 +1931,9 @@ ipa_compute_jump_functions_for_edge (struct ipa_func_body_info *fbi, unsigned align; get_pointer_alignment_1 (arg, &align, &bitpos); - widest_int mask - = wi::mask(TYPE_PRECISION (TREE_TYPE (arg)), false) - .and_not (align / BITS_PER_UNIT - 1); + widest_int mask = wi::bit_and_not + (wi::mask (TYPE_PRECISION (TREE_TYPE (arg)), false), + align / BITS_PER_UNIT - 1); widest_int value = bitpos / BITS_PER_UNIT; ipa_set_jfunc_bits (jfunc, value, mask); } diff --git a/gcc/tree-ssa-ccp.c b/gcc/tree-ssa-ccp.c index 9811640c2a5..df409af2d83 100644 --- a/gcc/tree-ssa-ccp.c +++ b/gcc/tree-ssa-ccp.c @@ -569,9 +569,11 @@ get_value_from_alignment (tree expr) gcc_assert (TREE_CODE (expr) == ADDR_EXPR); get_pointer_alignment_1 (expr, &align, &bitpos); - val.mask = (POINTER_TYPE_P (type) || TYPE_UNSIGNED (type) - ? wi::mask (TYPE_PRECISION (type), false) - : -1).and_not (align / BITS_PER_UNIT - 1); + val.mask = wi::bit_and_not + (POINTER_TYPE_P (type) || TYPE_UNSIGNED (type) + ? wi::mask (TYPE_PRECISION (type), false) + : -1, + align / BITS_PER_UNIT - 1); val.lattice_val = wi::sext (val.mask, TYPE_PRECISION (type)) == -1 ? VARYING : CONSTANT; if (val.lattice_val == CONSTANT) @@ -1308,8 +1310,9 @@ bit_value_binop (enum tree_code code, signop sgn, int width, case BIT_IOR_EXPR: /* The mask is constant where there is a known set bit, (m1 | m2) & ~((v1 & ~m1) | (v2 & ~m2)). */ - *mask = (r1mask | r2mask) - .and_not (r1val.and_not (r1mask) | r2val.and_not (r2mask)); + *mask = wi::bit_and_not (r1mask | r2mask, + wi::bit_and_not (r1val, r1mask) + | wi::bit_and_not (r2val, r2mask)); *val = r1val | r2val; break; @@ -1395,7 +1398,8 @@ bit_value_binop (enum tree_code code, signop sgn, int width, { /* Do the addition with unknown bits set to zero, to give carry-ins of zero wherever possible. */ - widest_int lo = r1val.and_not (r1mask) + r2val.and_not (r2mask); + widest_int lo = (wi::bit_and_not (r1val, r1mask) + + wi::bit_and_not (r2val, r2mask)); lo = wi::ext (lo, width, sgn); /* Do the addition with unknown bits set to one, to give carry-ins of one wherever possible. */ @@ -1447,7 +1451,7 @@ bit_value_binop (enum tree_code code, signop sgn, int width, case NE_EXPR: { widest_int m = r1mask | r2mask; - if (r1val.and_not (m) != r2val.and_not (m)) + if (wi::bit_and_not (r1val, m) != wi::bit_and_not (r2val, m)) { *mask = 0; *val = ((code == EQ_EXPR) ? 0 : 1); @@ -1486,8 +1490,10 @@ bit_value_binop (enum tree_code code, signop sgn, int width, /* If we know the most significant bits we know the values value ranges by means of treating varying bits as zero or one. Do a cross comparison of the max/min pairs. */ - maxmin = wi::cmp (o1val | o1mask, o2val.and_not (o2mask), sgn); - minmax = wi::cmp (o1val.and_not (o1mask), o2val | o2mask, sgn); + maxmin = wi::cmp (o1val | o1mask, + wi::bit_and_not (o2val, o2mask), sgn); + minmax = wi::cmp (wi::bit_and_not (o1val, o1mask), + o2val | o2mask, sgn); if (maxmin < 0) /* o1 is less than o2. */ { *mask = 0; diff --git a/gcc/tree-ssa-math-opts.c b/gcc/tree-ssa-math-opts.c index 818290cf47c..17d62a82e8b 100644 --- a/gcc/tree-ssa-math-opts.c +++ b/gcc/tree-ssa-math-opts.c @@ -2138,7 +2138,7 @@ find_bswap_or_nop_load (gimple *stmt, tree ref, struct symbolic_number *n) if (wi::neg_p (bit_offset)) { offset_int mask = wi::mask (LOG2_BITS_PER_UNIT, false); - offset_int tem = bit_offset.and_not (mask); + offset_int tem = wi::bit_and_not (bit_offset, mask); /* TEM is the bitpos rounded to BITS_PER_UNIT towards -Inf. Subtract it to BIT_OFFSET and add it (scaled) to OFFSET. */ bit_offset -= tem; diff --git a/gcc/tree-vrp.c b/gcc/tree-vrp.c index 3dba3562789..3e8be2688fb 100644 --- a/gcc/tree-vrp.c +++ b/gcc/tree-vrp.c @@ -1769,7 +1769,7 @@ zero_nonzero_bits_from_vr (const tree expr_type, wide_int mask = wi::mask (wi::floor_log2 (xor_mask), false, may_be_nonzero->get_precision ()); *may_be_nonzero = *may_be_nonzero | mask; - *must_be_nonzero = must_be_nonzero->and_not (mask); + *must_be_nonzero = wi::bit_and_not (*must_be_nonzero, mask); } } @@ -2975,8 +2975,8 @@ extract_range_from_binary_expr_1 (value_range *vr, wide_int result_zero_bits = ((must_be_nonzero0 & must_be_nonzero1) | ~(may_be_nonzero0 | may_be_nonzero1)); wide_int result_one_bits - = (must_be_nonzero0.and_not (may_be_nonzero1) - | must_be_nonzero1.and_not (may_be_nonzero0)); + = (wi::bit_and_not (must_be_nonzero0, may_be_nonzero1) + | wi::bit_and_not (must_be_nonzero1, may_be_nonzero0)); max = wide_int_to_tree (expr_type, ~result_zero_bits); min = wide_int_to_tree (expr_type, result_one_bits); /* If the range has all positive or all negative values the @@ -4877,7 +4877,7 @@ masked_increment (const wide_int &val_in, const wide_int &mask, if ((res & bit) == 0) continue; res = bit - 1; - res = (val + bit).and_not (res); + res = wi::bit_and_not (val + bit, res); res &= mask; if (wi::gtu_p (res, val)) return res ^ sgnbit; @@ -9538,13 +9538,13 @@ simplify_bit_ops_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt) switch (gimple_assign_rhs_code (stmt)) { case BIT_AND_EXPR: - mask = may_be_nonzero0.and_not (must_be_nonzero1); + mask = wi::bit_and_not (may_be_nonzero0, must_be_nonzero1); if (mask == 0) { op = op0; break; } - mask = may_be_nonzero1.and_not (must_be_nonzero0); + mask = wi::bit_and_not (may_be_nonzero1, must_be_nonzero0); if (mask == 0) { op = op1; @@ -9552,13 +9552,13 @@ simplify_bit_ops_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt) } break; case BIT_IOR_EXPR: - mask = may_be_nonzero0.and_not (must_be_nonzero1); + mask = wi::bit_and_not (may_be_nonzero0, must_be_nonzero1); if (mask == 0) { op = op1; break; } - mask = may_be_nonzero1.and_not (must_be_nonzero0); + mask = wi::bit_and_not (may_be_nonzero1, must_be_nonzero0); if (mask == 0) { op = op0; diff --git a/gcc/wide-int.h b/gcc/wide-int.h index 61d9aab2a83..56bc5345ba4 100644 --- a/gcc/wide-int.h +++ b/gcc/wide-int.h @@ -262,11 +262,22 @@ along with GCC; see the file COPYING3. If not see #define WI_BINARY_RESULT(T1, T2) \ typename wi::binary_traits ::result_type +/* Likewise for binary operators, which excludes the case in which neither + T1 nor T2 is a wide-int-based type. */ +#define WI_BINARY_OPERATOR_RESULT(T1, T2) \ + typename wi::binary_traits ::operator_result + /* The type of result produced by T1 << T2. Leads to substitution failure if the operation isn't supported. Defined purely for brevity. */ #define WI_SIGNED_SHIFT_RESULT(T1, T2) \ typename wi::binary_traits ::signed_shift_result_type +/* The type of result produced by a sign-agnostic binary predicate on + types T1 and T2. This is bool if wide-int operations make sense for + T1 and T2 and leads to substitution failure otherwise. */ +#define WI_BINARY_PREDICATE_RESULT(T1, T2) \ + typename wi::binary_traits ::predicate_result + /* The type of result produced by a signed binary predicate on types T1 and T2. This is bool if signed comparisons make sense for T1 and T2 and leads to substitution failure otherwise. */ @@ -382,12 +393,15 @@ namespace wi struct binary_traits { typedef widest_int result_type; + /* Don't define operators for this combination. */ }; template struct binary_traits { typedef wide_int result_type; + typedef result_type operator_result; + typedef bool predicate_result; }; template @@ -397,6 +411,8 @@ namespace wi so as not to confuse gengtype. */ typedef generic_wide_int < fixed_wide_int_storage ::precision> > result_type; + typedef result_type operator_result; + typedef bool predicate_result; typedef bool signed_predicate_result; }; @@ -404,6 +420,8 @@ namespace wi struct binary_traits { typedef wide_int result_type; + typedef result_type operator_result; + typedef bool predicate_result; }; template @@ -413,6 +431,8 @@ namespace wi so as not to confuse gengtype. */ typedef generic_wide_int < fixed_wide_int_storage ::precision> > result_type; + typedef result_type operator_result; + typedef bool predicate_result; typedef result_type signed_shift_result_type; typedef bool signed_predicate_result; }; @@ -420,11 +440,13 @@ namespace wi template struct binary_traits { + STATIC_ASSERT (int_traits ::precision == int_traits ::precision); /* Spelled out explicitly (rather than through FIXED_WIDE_INT) so as not to confuse gengtype. */ - STATIC_ASSERT (int_traits ::precision == int_traits ::precision); typedef generic_wide_int < fixed_wide_int_storage ::precision> > result_type; + typedef result_type operator_result; + typedef bool predicate_result; typedef result_type signed_shift_result_type; typedef bool signed_predicate_result; }; @@ -433,6 +455,8 @@ namespace wi struct binary_traits { typedef wide_int result_type; + typedef result_type operator_result; + typedef bool predicate_result; }; } @@ -675,18 +699,6 @@ public: template generic_wide_int &operator = (const T &); -#define BINARY_PREDICATE(OP, F) \ - template \ - bool OP (const T &c) const { return wi::F (*this, c); } - -#define UNARY_OPERATOR(OP, F) \ - WI_UNARY_RESULT (generic_wide_int) OP () const { return wi::F (*this); } - -#define BINARY_OPERATOR(OP, F) \ - template \ - WI_BINARY_RESULT (generic_wide_int, T) \ - OP (const T &c) const { return wi::F (*this, c); } - #define ASSIGNMENT_OPERATOR(OP, F) \ template \ generic_wide_int &OP (const T &c) { return (*this = wi::F (*this, c)); } @@ -699,18 +711,6 @@ public: #define INCDEC_OPERATOR(OP, DELTA) \ generic_wide_int &OP () { *this += DELTA; return *this; } - UNARY_OPERATOR (operator ~, bit_not) - UNARY_OPERATOR (operator -, neg) - BINARY_PREDICATE (operator ==, eq_p) - BINARY_PREDICATE (operator !=, ne_p) - BINARY_OPERATOR (operator &, bit_and) - BINARY_OPERATOR (and_not, bit_and_not) - BINARY_OPERATOR (operator |, bit_or) - BINARY_OPERATOR (or_not, bit_or_not) - BINARY_OPERATOR (operator ^, bit_xor) - BINARY_OPERATOR (operator +, add) - BINARY_OPERATOR (operator -, sub) - BINARY_OPERATOR (operator *, mul) ASSIGNMENT_OPERATOR (operator &=, bit_and) ASSIGNMENT_OPERATOR (operator |=, bit_or) ASSIGNMENT_OPERATOR (operator ^=, bit_xor) @@ -722,9 +722,6 @@ public: INCDEC_OPERATOR (operator ++, 1) INCDEC_OPERATOR (operator --, -1) -#undef BINARY_PREDICATE -#undef UNARY_OPERATOR -#undef BINARY_OPERATOR #undef SHIFT_ASSIGNMENT_OPERATOR #undef ASSIGNMENT_OPERATOR #undef INCDEC_OPERATOR @@ -3123,6 +3120,45 @@ SIGNED_BINARY_PREDICATE (operator >=, ges_p) #undef SIGNED_BINARY_PREDICATE +#define UNARY_OPERATOR(OP, F) \ + template \ + WI_UNARY_RESULT (generic_wide_int) \ + OP (const generic_wide_int &x) \ + { \ + return wi::F (x); \ + } + +#define BINARY_PREDICATE(OP, F) \ + template \ + WI_BINARY_PREDICATE_RESULT (T1, T2) \ + OP (const T1 &x, const T2 &y) \ + { \ + return wi::F (x, y); \ + } + +#define BINARY_OPERATOR(OP, F) \ + template \ + WI_BINARY_OPERATOR_RESULT (T1, T2) \ + OP (const T1 &x, const T2 &y) \ + { \ + return wi::F (x, y); \ + } + +UNARY_OPERATOR (operator ~, bit_not) +UNARY_OPERATOR (operator -, neg) +BINARY_PREDICATE (operator ==, eq_p) +BINARY_PREDICATE (operator !=, ne_p) +BINARY_OPERATOR (operator &, bit_and) +BINARY_OPERATOR (operator |, bit_or) +BINARY_OPERATOR (operator ^, bit_xor) +BINARY_OPERATOR (operator +, add) +BINARY_OPERATOR (operator -, sub) +BINARY_OPERATOR (operator *, mul) + +#undef UNARY_OPERATOR +#undef BINARY_PREDICATE +#undef BINARY_OPERATOR + template inline WI_SIGNED_SHIFT_RESULT (T1, T2) operator << (const T1 &x, const T2 &y)