From: Uros Bizjak Date: Wed, 20 Jul 2016 15:47:33 +0000 (+0200) Subject: cse.c: Use HOST_WIDE_INT_M1 instead of ~(HOST_WIDE_INT) 0. X-Git-Url: https://git.libre-soc.org/?a=commitdiff_plain;h=dd4786fe817eb2f3336120d1b0c9ae51aeb9f94f;p=gcc.git cse.c: Use HOST_WIDE_INT_M1 instead of ~(HOST_WIDE_INT) 0. * cse.c: Use HOST_WIDE_INT_M1 instead of ~(HOST_WIDE_INT) 0. * combine.c: Use HOST_WIDE_INT_M1U instead of ~(unsigned HOST_WIDE_INT) 0. * double-int.h: Ditto. * dse.c: Ditto. * dwarf2asm.c:Ditto. * expmed.c: Ditto. * genmodes.c: Ditto. * match.pd: Ditto. * read-rtl.c: Ditto. * tree-ssa-loop-ivopts.c: Ditto. * tree-ssa-loop-prefetch.c: Ditto. * tree-vect-generic.c: Ditto. * tree-vect-patterns.c: Ditto. * tree.c: Ditto. From-SVN: r238529 --- diff --git a/gcc/ChangeLog b/gcc/ChangeLog index 36c13358443..2763af1d4e7 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,21 @@ +2016-07-20 Uros Bizjak + + * cse.c: Use HOST_WIDE_INT_M1 instead of ~(HOST_WIDE_INT) 0. + * combine.c: Use HOST_WIDE_INT_M1U instead of + ~(unsigned HOST_WIDE_INT) 0. + * double-int.h: Ditto. + * dse.c: Ditto. + * dwarf2asm.c:Ditto. + * expmed.c: Ditto. + * genmodes.c: Ditto. + * match.pd: Ditto. + * read-rtl.c: Ditto. + * tree-ssa-loop-ivopts.c: Ditto. + * tree-ssa-loop-prefetch.c: Ditto. + * tree-vect-generic.c: Ditto. + * tree-vect-patterns.c: Ditto. + * tree.c: Ditto. + 2016-07-20 Georg-Johann Lay * gcc/config/avr.c (avr_legitimize_address) [AVR_TINY]: Force diff --git a/gcc/combine.c b/gcc/combine.c index 1e5ee8e9514..1becc3c719d 100644 --- a/gcc/combine.c +++ b/gcc/combine.c @@ -1660,7 +1660,7 @@ update_rsp_from_reg_equal (reg_stat_type *rsp, rtx_insn *insn, const_rtx set, } /* Don't call nonzero_bits if it cannot change anything. */ - if (rsp->nonzero_bits != ~(unsigned HOST_WIDE_INT) 0) + if (rsp->nonzero_bits != HOST_WIDE_INT_M1U) { bits = nonzero_bits (src, nonzero_bits_mode); if (reg_equal && bits) @@ -6541,7 +6541,7 @@ simplify_set (rtx x) if (GET_MODE_CLASS (mode) == MODE_INT && HWI_COMPUTABLE_MODE_P (mode)) { - src = force_to_mode (src, mode, ~(unsigned HOST_WIDE_INT) 0, 0); + src = force_to_mode (src, mode, HOST_WIDE_INT_M1U, 0); SUBST (SET_SRC (x), src); } @@ -7446,7 +7446,7 @@ make_extraction (machine_mode mode, rtx inner, HOST_WIDE_INT pos, else new_rtx = force_to_mode (inner, tmode, len >= HOST_BITS_PER_WIDE_INT - ? ~(unsigned HOST_WIDE_INT) 0 + ? HOST_WIDE_INT_M1U : (HOST_WIDE_INT_1U << len) - 1, 0); @@ -7635,7 +7635,7 @@ make_extraction (machine_mode mode, rtx inner, HOST_WIDE_INT pos, inner = force_to_mode (inner, wanted_inner_mode, pos_rtx || len + orig_pos >= HOST_BITS_PER_WIDE_INT - ? ~(unsigned HOST_WIDE_INT) 0 + ? HOST_WIDE_INT_M1U : (((HOST_WIDE_INT_1U << len) - 1) << orig_pos), 0); @@ -8110,7 +8110,7 @@ make_compound_operation (rtx x, enum rtx_code in_code) && subreg_lowpart_p (x)) { rtx newer - = force_to_mode (tem, mode, ~(unsigned HOST_WIDE_INT) 0, 0); + = force_to_mode (tem, mode, HOST_WIDE_INT_M1U, 0); /* If we have something other than a SUBREG, we might have done an expansion, so rerun ourselves. */ @@ -8390,7 +8390,7 @@ force_to_mode (rtx x, machine_mode mode, unsigned HOST_WIDE_INT mask, do not know, we need to assume that all bits up to the highest-order bit in MASK will be needed. This is how we form such a mask. */ if (mask & (HOST_WIDE_INT_1U << (HOST_BITS_PER_WIDE_INT - 1))) - fuller_mask = ~(unsigned HOST_WIDE_INT) 0; + fuller_mask = HOST_WIDE_INT_M1U; else fuller_mask = ((HOST_WIDE_INT_1U << (floor_log2 (mask) + 1)) - 1); @@ -8733,7 +8733,7 @@ force_to_mode (rtx x, machine_mode mode, unsigned HOST_WIDE_INT mask, if (GET_MODE_PRECISION (GET_MODE (x)) > HOST_BITS_PER_WIDE_INT) { - nonzero = ~(unsigned HOST_WIDE_INT) 0; + nonzero = HOST_WIDE_INT_M1U; /* GET_MODE_PRECISION (GET_MODE (x)) - INTVAL (XEXP (x, 1)) is the number of bits a full-width mask would have set. @@ -9496,7 +9496,7 @@ make_field_assignment (rtx x) dest); src = force_to_mode (src, mode, GET_MODE_PRECISION (mode) >= HOST_BITS_PER_WIDE_INT - ? ~(unsigned HOST_WIDE_INT) 0 + ? HOST_WIDE_INT_M1U : (HOST_WIDE_INT_1U << len) - 1, 0); diff --git a/gcc/cse.c b/gcc/cse.c index 6a5ccb5f309..61d2d7e3c3f 100644 --- a/gcc/cse.c +++ b/gcc/cse.c @@ -4565,7 +4565,7 @@ cse_insn (rtx_insn *insn) else shift = INTVAL (pos); if (INTVAL (width) == HOST_BITS_PER_WIDE_INT) - mask = ~(HOST_WIDE_INT) 0; + mask = HOST_WIDE_INT_M1; else mask = (HOST_WIDE_INT_1 << INTVAL (width)) - 1; val = (val >> shift) & mask; @@ -5233,7 +5233,7 @@ cse_insn (rtx_insn *insn) else shift = INTVAL (pos); if (INTVAL (width) == HOST_BITS_PER_WIDE_INT) - mask = ~(HOST_WIDE_INT) 0; + mask = HOST_WIDE_INT_M1; else mask = (HOST_WIDE_INT_1 << INTVAL (width)) - 1; val &= ~(mask << shift); diff --git a/gcc/double-int.h b/gcc/double-int.h index fd84b4bafeb..6f59c1b9717 100644 --- a/gcc/double-int.h +++ b/gcc/double-int.h @@ -365,7 +365,7 @@ double_int::operator ^ (double_int b) const void dump_double_int (FILE *, double_int, bool); -#define ALL_ONES (~((unsigned HOST_WIDE_INT) 0)) +#define ALL_ONES HOST_WIDE_INT_M1U /* The operands of the following comparison functions must be processed with double_int_ext, if their precision is less than diff --git a/gcc/dse.c b/gcc/dse.c index b300fb77c1f..89c3f94d0f0 100644 --- a/gcc/dse.c +++ b/gcc/dse.c @@ -288,7 +288,7 @@ struct store_info static unsigned HOST_WIDE_INT lowpart_bitmask (int n) { - unsigned HOST_WIDE_INT mask = ~(unsigned HOST_WIDE_INT) 0; + unsigned HOST_WIDE_INT mask = HOST_WIDE_INT_M1U; return mask >> (HOST_BITS_PER_WIDE_INT - n); } diff --git a/gcc/dwarf2asm.c b/gcc/dwarf2asm.c index bf5ded8c01b..ae81445dc84 100644 --- a/gcc/dwarf2asm.c +++ b/gcc/dwarf2asm.c @@ -97,7 +97,7 @@ dw2_asm_output_data (int size, unsigned HOST_WIDE_INT value, va_start (ap, comment); if (size * 8 < HOST_BITS_PER_WIDE_INT) - value &= ~(~(unsigned HOST_WIDE_INT) 0 << (size * 8)); + value &= ~(HOST_WIDE_INT_M1U << (size * 8)); if (op) { diff --git a/gcc/expmed.c b/gcc/expmed.c index 0b0abbcf283..f776e54809e 100644 --- a/gcc/expmed.c +++ b/gcc/expmed.c @@ -3513,7 +3513,7 @@ invert_mod2n (unsigned HOST_WIDE_INT x, int n) int nbit = 3; mask = (n == HOST_BITS_PER_WIDE_INT - ? ~(unsigned HOST_WIDE_INT) 0 + ? HOST_WIDE_INT_M1U : (HOST_WIDE_INT_1U << n) - 1); while (nbit < n) @@ -4423,7 +4423,7 @@ expand_divmod (int rem_flag, enum tree_code code, machine_mode mode, || size - 1 >= BITS_PER_WORD) goto fail1; - ml |= (~(unsigned HOST_WIDE_INT) 0) << (size - 1); + ml |= HOST_WIDE_INT_M1U << (size - 1); mlr = gen_int_mode (ml, compute_mode); extra_cost = (shift_cost (speed, compute_mode, post_shift) + shift_cost (speed, compute_mode, size - 1) diff --git a/gcc/genmodes.c b/gcc/genmodes.c index 59faae98244..097cc80f33e 100644 --- a/gcc/genmodes.c +++ b/gcc/genmodes.c @@ -1409,7 +1409,7 @@ emit_mode_mask (void) puts ("\ #define MODE_MASK(m) \\\n\ ((m) >= HOST_BITS_PER_WIDE_INT) \\\n\ - ? ~(unsigned HOST_WIDE_INT) 0 \\\n\ + ? HOST_WIDE_INT_M1U \\\n\ : (HOST_WIDE_INT_1U << (m)) - 1\n"); for_all_modes (c, m) diff --git a/gcc/match.pd b/gcc/match.pd index 836f7d8f704..21bf6177cdd 100644 --- a/gcc/match.pd +++ b/gcc/match.pd @@ -1487,7 +1487,7 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT) is all ones. */ } } - zerobits = ~(unsigned HOST_WIDE_INT) 0; + zerobits = HOST_WIDE_INT_M1U; if (shiftc < prec) { zerobits >>= HOST_BITS_PER_WIDE_INT - shiftc; @@ -1522,7 +1522,7 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT) break; } (if (prec < HOST_BITS_PER_WIDE_INT - || newmask == ~(unsigned HOST_WIDE_INT) 0) + || newmask == HOST_WIDE_INT_M1U) (with { tree newmaskt = build_int_cst_type (TREE_TYPE (@2), newmask); } (if (!tree_int_cst_equal (newmaskt, @2)) diff --git a/gcc/read-rtl.c b/gcc/read-rtl.c index dc3a336b45b..a705859a9db 100644 --- a/gcc/read-rtl.c +++ b/gcc/read-rtl.c @@ -711,7 +711,7 @@ atoll (const char *p) if (new_wide < tmp_wide) { /* Return INT_MAX equiv on overflow. */ - tmp_wide = (~(unsigned HOST_WIDE_INT) 0) >> 1; + tmp_wide = HOST_WIDE_INT_M1U >> 1; break; } tmp_wide = new_wide; diff --git a/gcc/tree-ssa-loop-ivopts.c b/gcc/tree-ssa-loop-ivopts.c index ed6bac990f4..62ba71bd18a 100644 --- a/gcc/tree-ssa-loop-ivopts.c +++ b/gcc/tree-ssa-loop-ivopts.c @@ -4217,7 +4217,7 @@ get_address_cost (bool symbol_present, bool var_present, } bits = GET_MODE_BITSIZE (address_mode); - mask = ~(~(unsigned HOST_WIDE_INT) 0 << (bits - 1) << 1); + mask = ~(HOST_WIDE_INT_M1U << (bits - 1) << 1); offset &= mask; if ((offset >> (bits - 1) & 1)) offset |= ~mask; diff --git a/gcc/tree-ssa-loop-prefetch.c b/gcc/tree-ssa-loop-prefetch.c index e43ced656f5..26cf0a0078a 100644 --- a/gcc/tree-ssa-loop-prefetch.c +++ b/gcc/tree-ssa-loop-prefetch.c @@ -233,7 +233,7 @@ struct mem_ref_group /* Assigned to PREFETCH_BEFORE when all iterations are to be prefetched. */ -#define PREFETCH_ALL (~(unsigned HOST_WIDE_INT) 0) +#define PREFETCH_ALL HOST_WIDE_INT_M1U /* Do not generate a prefetch if the unroll factor is significantly less than what is required by the prefetch. This is to avoid redundant diff --git a/gcc/tree-vect-generic.c b/gcc/tree-vect-generic.c index 5c4798ac2bf..9f0ec656bad 100644 --- a/gcc/tree-vect-generic.c +++ b/gcc/tree-vect-generic.c @@ -575,7 +575,7 @@ expand_vector_divmod (gimple_stmt_iterator *gsi, tree type, tree op0, if (ml >= HOST_WIDE_INT_1U << (prec - 1)) { this_mode = 4 + (d < 0); - ml |= (~(unsigned HOST_WIDE_INT) 0) << (prec - 1); + ml |= HOST_WIDE_INT_M1U << (prec - 1); } else this_mode = 2 + (d < 0); diff --git a/gcc/tree-vect-patterns.c b/gcc/tree-vect-patterns.c index d78f92d2efb..2457844857d 100644 --- a/gcc/tree-vect-patterns.c +++ b/gcc/tree-vect-patterns.c @@ -2861,7 +2861,7 @@ vect_recog_divmod_pattern (vec *stmts, if (ml >= HOST_WIDE_INT_1U << (prec - 1)) { add = true; - ml |= (~(unsigned HOST_WIDE_INT) 0) << (prec - 1); + ml |= HOST_WIDE_INT_M1U << (prec - 1); } if (post_shift >= prec) return NULL; diff --git a/gcc/tree.c b/gcc/tree.c index c08ac25b31e..661d385d43c 100644 --- a/gcc/tree.c +++ b/gcc/tree.c @@ -11338,9 +11338,9 @@ int_cst_value (const_tree x) { bool negative = ((val >> (bits - 1)) & 1) != 0; if (negative) - val |= (~(unsigned HOST_WIDE_INT) 0) << (bits - 1) << 1; + val |= HOST_WIDE_INT_M1U << (bits - 1) << 1; else - val &= ~((~(unsigned HOST_WIDE_INT) 0) << (bits - 1) << 1); + val &= ~(HOST_WIDE_INT_M1U << (bits - 1) << 1); } return val;