From 146ec50fd5c73a6712d739697cd33567b42c2d3f Mon Sep 17 00:00:00 2001 From: Jason Merrill Date: Fri, 16 Sep 2016 15:20:44 -0400 Subject: [PATCH] Add inline functions for various bitwise operations. * hwint.h (least_bit_hwi, pow2_or_zerop, pow2p_hwi, ctz_or_zero): New. * hwint.c (exact_log2): Use pow2p_hwi. (ctz_hwi, ffs_hwi): Use least_bit_hwi. * alias.c (memrefs_conflict_p): Use pow2_or_zerop. * builtins.c (get_object_alignment_2, get_object_alignment) (get_pointer_alignment, fold_builtin_atomic_always_lock_free): Use least_bit_hwi. * calls.c (compute_argument_addresses, store_one_arg): Use least_bit_hwi. * cfgexpand.c (expand_one_stack_var_at): Use least_bit_hwi. * combine.c (force_to_mode): Use least_bit_hwi. * emit-rtl.c (set_mem_attributes_minus_bitpos, adjust_address_1): Use least_bit_hwi. * expmed.c (synth_mult, expand_divmod): Use ctz_or_zero, ctz_hwi. (init_expmed_one_conv): Use pow2p_hwi. * fold-const.c (round_up_loc, round_down_loc): Use pow2_or_zerop. (fold_binary_loc): Use pow2p_hwi. * function.c (assign_parm_find_stack_rtl): Use least_bit_hwi. * gimple-fold.c (gimple_fold_builtin_memory_op): Use pow2p_hwi. * gimple-ssa-strength-reduction.c (replace_ref): Use least_bit_hwi. * hsa-gen.c (gen_hsa_addr_with_align, hsa_bitmemref_alignment): Use least_bit_hwi. * ipa-cp.c (ipcp_alignment_lattice::meet_with_1): Use least_bit_hwi. * ipa-prop.c (ipa_modify_call_arguments): Use least_bit_hwi. * omp-low.c (oacc_loop_fixed_partitions) (oacc_loop_auto_partitions): Use least_bit_hwi. * rtlanal.c (nonzero_bits1): Use ctz_or_zero. * stor-layout.c (place_field): Use least_bit_hwi. * tree-pretty-print.c (dump_generic_node): Use pow2p_hwi. * tree-sra.c (build_ref_for_offset): Use least_bit_hwi. * tree-ssa-ccp.c (ccp_finalize): Use least_bit_hwi. * tree-ssa-math-opts.c (bswap_replace): Use least_bit_hwi. * tree-ssa-strlen.c (handle_builtin_memcmp): Use pow2p_hwi. * tree-vect-data-refs.c (vect_analyze_group_access_1) (vect_grouped_store_supported, vect_grouped_load_supported) (vect_permute_load_chain, vect_shift_permute_load_chain) (vect_transform_grouped_load): Use pow2p_hwi. * tree-vect-generic.c (expand_vector_divmod): Use ctz_or_zero. * tree-vect-patterns.c (vect_recog_divmod_pattern): Use ctz_or_zero. * tree-vect-stmts.c (vectorizable_mask_load_store): Use least_bit_hwi. * tsan.c (instrument_expr): Use least_bit_hwi. * var-tracking.c (negative_power_of_two_p): Use pow2_or_zerop. From-SVN: r240194 --- gcc/ChangeLog | 54 +++++++++++++++++++++++++++++ gcc/alias.c | 4 +-- gcc/builtins.c | 10 +++--- gcc/calls.c | 6 ++-- gcc/cfgexpand.c | 2 +- gcc/combine.c | 24 ++++++------- gcc/cp/ChangeLog | 7 ++++ gcc/cp/class.c | 4 +-- gcc/cp/decl.c | 2 +- gcc/cp/parser.c | 2 +- gcc/cse.c | 8 ++--- gcc/emit-rtl.c | 4 +-- gcc/expmed.c | 20 +++++------ gcc/expr.c | 2 +- gcc/fold-const.c | 6 ++-- gcc/function.c | 2 +- gcc/gimple-fold.c | 2 +- gcc/gimple-ssa-strength-reduction.c | 2 +- gcc/hsa-gen.c | 4 +-- gcc/hwint.c | 6 ++-- gcc/hwint.h | 35 ++++++++++++++++++- gcc/ifcvt.c | 4 +-- gcc/ipa-cp.c | 2 +- gcc/ipa-prop.c | 2 +- gcc/omp-low.c | 4 +-- gcc/rtlanal.c | 4 +-- gcc/stor-layout.c | 12 +++---- gcc/tree-pretty-print.c | 2 +- gcc/tree-sra.c | 2 +- gcc/tree-ssa-ccp.c | 2 +- gcc/tree-ssa-math-opts.c | 2 +- gcc/tree-ssa-strlen.c | 2 +- gcc/tree-vect-data-refs.c | 14 ++++---- gcc/tree-vect-generic.c | 2 +- gcc/tree-vect-patterns.c | 2 +- gcc/tree-vect-stmts.c | 4 +-- gcc/tsan.c | 2 +- gcc/var-tracking.c | 2 +- gcc/varasm.c | 2 +- 39 files changed, 180 insertions(+), 92 deletions(-) diff --git a/gcc/ChangeLog b/gcc/ChangeLog index 11f09af1f32..660bd9580b4 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,57 @@ +2016-09-16 Jason Merrill + + * hwint.h (least_bit_hwi, pow2_or_zerop, pow2p_hwi, ctz_or_zero): + New. + * hwint.c (exact_log2): Use pow2p_hwi. + (ctz_hwi, ffs_hwi): Use least_bit_hwi. + * alias.c (memrefs_conflict_p): Use pow2_or_zerop. + * builtins.c (get_object_alignment_2, get_object_alignment) + (get_pointer_alignment, fold_builtin_atomic_always_lock_free): Use + least_bit_hwi. + * calls.c (compute_argument_addresses, store_one_arg): Use + least_bit_hwi. + * cfgexpand.c (expand_one_stack_var_at): Use least_bit_hwi. + * combine.c (force_to_mode): Use least_bit_hwi. + (contains_muldiv, find_split_point, combine_simplify_rtx) + (simplify_if_then_else, simplify_set, force_to_mode) + (if_then_else_cond, simplify_and_const_int_1) + (simplify_compare_const): Use pow2p_hwi. + * cse.c (fold_rtx): Use pow2p_hwi. + * emit-rtl.c (set_mem_attributes_minus_bitpos, adjust_address_1): + Use least_bit_hwi. + * expmed.c (synth_mult, expand_divmod): Use ctz_or_zero, ctz_hwi. + (init_expmed_one_conv): Use pow2p_hwi. + * expr.c (is_aligning_offset): Use pow2p_hwi. + * fold-const.c (round_up_loc, round_down_loc): Use pow2_or_zerop. + (fold_binary_loc): Use pow2p_hwi. + * function.c (assign_parm_find_stack_rtl): Use least_bit_hwi. + * gimple-fold.c (gimple_fold_builtin_memory_op): Use pow2p_hwi. + * gimple-ssa-strength-reduction.c (replace_ref): Use least_bit_hwi. + * hsa-gen.c (gen_hsa_addr_with_align, hsa_bitmemref_alignment): + Use least_bit_hwi. + * ifcvt.c (noce_try_store_flag_constants): Use pow2p_hwi. + * ipa-cp.c (ipcp_alignment_lattice::meet_with_1): Use least_bit_hwi. + * ipa-prop.c (ipa_modify_call_arguments): Use least_bit_hwi. + * omp-low.c (oacc_loop_fixed_partitions) + (oacc_loop_auto_partitions): Use least_bit_hwi. + * rtlanal.c (nonzero_bits1): Use ctz_or_zero. + * stor-layout.c (place_field): Use least_bit_hwi. + * tree-pretty-print.c (dump_generic_node): Use pow2p_hwi. + * tree-sra.c (build_ref_for_offset): Use least_bit_hwi. + * tree-ssa-ccp.c (ccp_finalize): Use least_bit_hwi. + * tree-ssa-math-opts.c (bswap_replace): Use least_bit_hwi. + * tree-ssa-strlen.c (handle_builtin_memcmp): Use pow2p_hwi. + * tree-vect-data-refs.c (vect_analyze_group_access_1) + (vect_grouped_store_supported, vect_grouped_load_supported) + (vect_permute_load_chain, vect_shift_permute_load_chain) + (vect_transform_grouped_load): Use pow2p_hwi. + * tree-vect-generic.c (expand_vector_divmod): Use ctz_or_zero. + * tree-vect-patterns.c (vect_recog_divmod_pattern): Use ctz_or_zero. + * tree-vect-stmts.c (vectorizable_mask_load_store): Use + least_bit_hwi. + * tsan.c (instrument_expr): Use least_bit_hwi. + * var-tracking.c (negative_power_of_two_p): Use pow2_or_zerop. + 2016-09-16 Andreas Schwab * config/ia64/ia64.h (ASM_OUTPUT_DWARF_OFFSET): Use parameter diff --git a/gcc/alias.c b/gcc/alias.c index f4b5a924b33..277125e01ae 100644 --- a/gcc/alias.c +++ b/gcc/alias.c @@ -2534,7 +2534,7 @@ memrefs_conflict_p (int xsize, rtx x, int ysize, rtx y, HOST_WIDE_INT c) { HOST_WIDE_INT sc = INTVAL (XEXP (x, 1)); unsigned HOST_WIDE_INT uc = sc; - if (sc < 0 && -uc == (uc & -uc)) + if (sc < 0 && pow2_or_zerop (-uc)) { if (xsize > 0) xsize = -xsize; @@ -2549,7 +2549,7 @@ memrefs_conflict_p (int xsize, rtx x, int ysize, rtx y, HOST_WIDE_INT c) { HOST_WIDE_INT sc = INTVAL (XEXP (y, 1)); unsigned HOST_WIDE_INT uc = sc; - if (sc < 0 && -uc == (uc & -uc)) + if (sc < 0 && pow2_or_zerop (-uc)) { if (ysize > 0) ysize = -ysize; diff --git a/gcc/builtins.c b/gcc/builtins.c index e779c71ba40..189aeac6912 100644 --- a/gcc/builtins.c +++ b/gcc/builtins.c @@ -305,7 +305,7 @@ get_object_alignment_2 (tree exp, unsigned int *alignp, { ptr_bitmask = TREE_INT_CST_LOW (TREE_OPERAND (addr, 1)); ptr_bitmask *= BITS_PER_UNIT; - align = ptr_bitmask & -ptr_bitmask; + align = least_bit_hwi (ptr_bitmask); addr = TREE_OPERAND (addr, 0); } @@ -325,7 +325,7 @@ get_object_alignment_2 (tree exp, unsigned int *alignp, unsigned HOST_WIDE_INT step = 1; if (TMR_STEP (exp)) step = TREE_INT_CST_LOW (TMR_STEP (exp)); - align = MIN (align, (step & -step) * BITS_PER_UNIT); + align = MIN (align, least_bit_hwi (step) * BITS_PER_UNIT); } if (TMR_INDEX2 (exp)) align = BITS_PER_UNIT; @@ -404,7 +404,7 @@ get_object_alignment (tree exp) ptr & (align - 1) == bitpos. */ if (bitpos != 0) - align = (bitpos & -bitpos); + align = least_bit_hwi (bitpos); return align; } @@ -502,7 +502,7 @@ get_pointer_alignment (tree exp) ptr & (align - 1) == bitpos. */ if (bitpos != 0) - align = (bitpos & -bitpos); + align = least_bit_hwi (bitpos); return align; } @@ -5559,7 +5559,7 @@ fold_builtin_atomic_always_lock_free (tree arg0, tree arg1) /* Either this argument is null, or it's a fake pointer encoding the alignment of the object. */ - val = val & -val; + val = least_bit_hwi (val); val *= BITS_PER_UNIT; if (val == 0 || mode_align < val) diff --git a/gcc/calls.c b/gcc/calls.c index 4ad3e34d677..86369e9cfdc 100644 --- a/gcc/calls.c +++ b/gcc/calls.c @@ -1805,7 +1805,7 @@ compute_argument_addresses (struct arg_data *args, rtx argblock, int num_actuals else if (CONST_INT_P (offset)) { align = INTVAL (offset) * BITS_PER_UNIT | boundary; - align = align & -align; + align = least_bit_hwi (align); } set_mem_align (args[i].stack, align); @@ -5026,7 +5026,7 @@ store_one_arg (struct arg_data *arg, rtx argblock, int flags, int pad = used - size; if (pad) { - unsigned int pad_align = (pad & -pad) * BITS_PER_UNIT; + unsigned int pad_align = least_bit_hwi (pad) * BITS_PER_UNIT; parm_align = MIN (parm_align, pad_align); } } @@ -5086,7 +5086,7 @@ store_one_arg (struct arg_data *arg, rtx argblock, int flags, parm_align = BITS_PER_UNIT; else if (excess) { - unsigned int excess_align = (excess & -excess) * BITS_PER_UNIT; + unsigned int excess_align = least_bit_hwi (excess) * BITS_PER_UNIT; parm_align = MIN (parm_align, excess_align); } } diff --git a/gcc/cfgexpand.c b/gcc/cfgexpand.c index dfa301de9cc..4190f7f0aed 100644 --- a/gcc/cfgexpand.c +++ b/gcc/cfgexpand.c @@ -1008,7 +1008,7 @@ expand_one_stack_var_at (tree decl, rtx base, unsigned base_align, important, we'll simply use the alignment that is already set. */ if (base == virtual_stack_vars_rtx) offset -= frame_phase; - align = offset & -offset; + align = least_bit_hwi (offset); align *= BITS_PER_UNIT; if (align == 0 || align > base_align) align = base_align; diff --git a/gcc/combine.c b/gcc/combine.c index 1e43d48cc56..b22a2748029 100644 --- a/gcc/combine.c +++ b/gcc/combine.c @@ -2251,7 +2251,7 @@ contains_muldiv (rtx x) case MULT: return ! (CONST_INT_P (XEXP (x, 1)) - && exact_log2 (UINTVAL (XEXP (x, 1))) >= 0); + && pow2p_hwi (UINTVAL (XEXP (x, 1)))); default: if (BINARY_P (x)) return contains_muldiv (XEXP (x, 0)) @@ -5100,7 +5100,7 @@ find_split_point (rtx *loc, rtx_insn *insn, bool set_src) instead if this isn't a multiply by a power of two. */ if (set_src && code == MINUS && GET_CODE (XEXP (x, 1)) == MULT && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT - && exact_log2 (INTVAL (XEXP (XEXP (x, 1), 1))) < 0) + && !pow2p_hwi (INTVAL (XEXP (XEXP (x, 1), 1)))) { machine_mode mode = GET_MODE (x); unsigned HOST_WIDE_INT this_int = INTVAL (XEXP (XEXP (x, 1), 1)); @@ -5916,7 +5916,7 @@ combine_simplify_rtx (rtx x, machine_mode op0_mode, int in_dest, (and (const_int pow2-1)) */ if (GET_CODE (XEXP (x, 1)) == AND && CONST_INT_P (XEXP (XEXP (x, 1), 1)) - && exact_log2 (-UINTVAL (XEXP (XEXP (x, 1), 1))) >= 0 + && pow2p_hwi (-UINTVAL (XEXP (XEXP (x, 1), 1))) && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0))) return simplify_and_const_int (NULL_RTX, mode, XEXP (x, 0), -INTVAL (XEXP (XEXP (x, 1), 1)) - 1); @@ -6236,7 +6236,7 @@ simplify_if_then_else (rtx x) not equal to zero. Similarly if it is known to be -1 or 0. */ if (true_code == EQ && true_val == const0_rtx - && exact_log2 (nzb = nonzero_bits (from, GET_MODE (from))) >= 0) + && pow2p_hwi (nzb = nonzero_bits (from, GET_MODE (from)))) { false_code = EQ; false_val = gen_int_mode (nzb, GET_MODE (from)); @@ -6673,7 +6673,7 @@ simplify_set (rtx x) || (old_code == EQ && new_code == NE)) && ! other_changed_previously && op1 == const0_rtx && HWI_COMPUTABLE_MODE_P (GET_MODE (op0)) - && exact_log2 (mask = nonzero_bits (op0, GET_MODE (op0))) >= 0) + && pow2p_hwi (mask = nonzero_bits (op0, GET_MODE (op0)))) { rtx pat = PATTERN (other_insn), note = 0; @@ -8525,7 +8525,7 @@ force_to_mode (rtx x, machine_mode mode, unsigned HOST_WIDE_INT mask, smask |= HOST_WIDE_INT_M1U << width; if (CONST_INT_P (XEXP (x, 1)) - && exact_log2 (- smask) >= 0 + && pow2p_hwi (- smask) && (nonzero_bits (XEXP (x, 0), mode) & ~smask) == 0 && (INTVAL (XEXP (x, 1)) & ~smask) != 0) return force_to_mode (plus_constant (GET_MODE (x), XEXP (x, 0), @@ -8557,7 +8557,7 @@ force_to_mode (rtx x, machine_mode mode, unsigned HOST_WIDE_INT mask, /* If X is (minus C Y) where C's least set bit is larger than any bit in the mask, then we may replace with (neg Y). */ if (CONST_INT_P (XEXP (x, 0)) - && ((UINTVAL (XEXP (x, 0)) & -UINTVAL (XEXP (x, 0))) > mask)) + && least_bit_hwi (UINTVAL (XEXP (x, 0))) > mask) { x = simplify_gen_unary (NEG, GET_MODE (x), XEXP (x, 1), GET_MODE (x)); @@ -8701,7 +8701,7 @@ force_to_mode (rtx x, machine_mode mode, unsigned HOST_WIDE_INT mask, && ((INTVAL (XEXP (x, 1)) + num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0)))) >= GET_MODE_PRECISION (GET_MODE (x))) - && exact_log2 (mask + 1) >= 0 + && pow2p_hwi (mask + 1) /* Number of bits left after the shift must be more than the mask needs. */ && ((INTVAL (XEXP (x, 1)) + exact_log2 (mask + 1)) @@ -8875,7 +8875,7 @@ force_to_mode (rtx x, machine_mode mode, unsigned HOST_WIDE_INT mask, if ((mask & ~STORE_FLAG_VALUE) == 0 && XEXP (x, 1) == const0_rtx && GET_MODE (XEXP (x, 0)) == mode - && exact_log2 (nonzero_bits (XEXP (x, 0), mode)) >= 0 + && pow2p_hwi (nonzero_bits (XEXP (x, 0), mode)) && (nonzero_bits (XEXP (x, 0), mode) == (unsigned HOST_WIDE_INT) STORE_FLAG_VALUE)) return force_to_mode (XEXP (x, 0), mode, mask, next_select); @@ -9105,7 +9105,7 @@ if_then_else_cond (rtx x, rtx *ptrue, rtx *pfalse) /* Likewise for 0 or a single bit. */ else if (HWI_COMPUTABLE_MODE_P (mode) - && exact_log2 (nz = nonzero_bits (x, mode)) >= 0) + && pow2p_hwi (nz = nonzero_bits (x, mode))) { *ptrue = gen_int_mode (nz, mode), *pfalse = const0_rtx; return x; @@ -9793,7 +9793,7 @@ simplify_and_const_int_1 (machine_mode mode, rtx varop, may eliminate it. */ if (GET_CODE (varop) == PLUS - && exact_log2 (constop + 1) >= 0) + && pow2p_hwi (constop + 1)) { rtx o0, o1; @@ -11335,7 +11335,7 @@ simplify_compare_const (enum rtx_code code, machine_mode mode, && (code == EQ || code == NE || code == GE || code == GEU || code == LT || code == LTU) && mode_width - 1 < HOST_BITS_PER_WIDE_INT - && exact_log2 (const_op & GET_MODE_MASK (mode)) >= 0 + && pow2p_hwi (const_op & GET_MODE_MASK (mode)) && (nonzero_bits (op0, mode) == (unsigned HOST_WIDE_INT) (const_op & GET_MODE_MASK (mode)))) { diff --git a/gcc/cp/ChangeLog b/gcc/cp/ChangeLog index 26d730eb626..1b5140fe3d0 100644 --- a/gcc/cp/ChangeLog +++ b/gcc/cp/ChangeLog @@ -1,3 +1,10 @@ +2016-09-16 Jason Merrill + + * class.c (check_bases, set_one_vmethod_tm_attributes): Use + least_bit_hwi. + * decl.c (cxx_init_decl_processing): Use pow2p_hwi. + * parser.c (cp_parser_cilk_simd_vectorlength): Use pow2p_hwi. + 2016-09-14 Jakub Jelinek PR c++/77549 diff --git a/gcc/cp/class.c b/gcc/cp/class.c index f7147e629bd..150f46f6e19 100644 --- a/gcc/cp/class.c +++ b/gcc/cp/class.c @@ -1843,7 +1843,7 @@ check_bases (tree t, doesn't define its own, then the current class inherits one. */ if (seen_tm_mask && !find_tm_attribute (TYPE_ATTRIBUTES (t))) { - tree tm_attr = tm_mask_to_attr (seen_tm_mask & -seen_tm_mask); + tree tm_attr = tm_mask_to_attr (least_bit_hwi (seen_tm_mask)); TYPE_ATTRIBUTES (t) = tree_cons (tm_attr, NULL, TYPE_ATTRIBUTES (t)); } } @@ -5074,7 +5074,7 @@ set_one_vmethod_tm_attributes (tree type, tree fndecl) restrictive one. */ else if (tm_attr == NULL) { - apply_tm_attr (fndecl, tm_mask_to_attr (found & -found)); + apply_tm_attr (fndecl, tm_mask_to_attr (least_bit_hwi (found))); } /* Otherwise validate that we're not weaker than a function that is being overridden. */ diff --git a/gcc/cp/decl.c b/gcc/cp/decl.c index 9d91387c7f1..2f444656842 100644 --- a/gcc/cp/decl.c +++ b/gcc/cp/decl.c @@ -4133,7 +4133,7 @@ cxx_init_decl_processing (void) current_lang_name = lang_name_cplusplus; if (aligned_new_threshhold > 1 - && exact_log2 (aligned_new_threshhold) == -1) + && !pow2p_hwi (aligned_new_threshhold)) { error ("-faligned-new=%d is not a power of two", aligned_new_threshhold); aligned_new_threshhold = 1; diff --git a/gcc/cp/parser.c b/gcc/cp/parser.c index d704593e6aa..fb88021e237 100644 --- a/gcc/cp/parser.c +++ b/gcc/cp/parser.c @@ -37753,7 +37753,7 @@ cp_parser_cilk_simd_vectorlength (cp_parser *parser, tree clauses, || !INTEGRAL_TYPE_P (TREE_TYPE (expr))) error_at (loc, "vectorlength must be an integer constant"); else if (TREE_CONSTANT (expr) - && exact_log2 (TREE_INT_CST_LOW (expr)) == -1) + && !pow2p_hwi (TREE_INT_CST_LOW (expr))) error_at (loc, "vectorlength must be a power of 2"); else { diff --git a/gcc/cse.c b/gcc/cse.c index 0bfd7ff1669..99949f0abd5 100644 --- a/gcc/cse.c +++ b/gcc/cse.c @@ -3643,13 +3643,13 @@ fold_rtx (rtx x, rtx_insn *insn) if (code == PLUS && const_arg1 == inner_const && ((HAVE_PRE_INCREMENT - && exact_log2 (INTVAL (const_arg1)) >= 0) + && pow2p_hwi (INTVAL (const_arg1))) || (HAVE_POST_INCREMENT - && exact_log2 (INTVAL (const_arg1)) >= 0) + && pow2p_hwi (INTVAL (const_arg1))) || (HAVE_PRE_DECREMENT - && exact_log2 (- INTVAL (const_arg1)) >= 0) + && pow2p_hwi (- INTVAL (const_arg1))) || (HAVE_POST_DECREMENT - && exact_log2 (- INTVAL (const_arg1)) >= 0))) + && pow2p_hwi (- INTVAL (const_arg1))))) break; /* ??? Vector mode shifts by scalar diff --git a/gcc/emit-rtl.c b/gcc/emit-rtl.c index a7246089222..9e0bda2cc41 100644 --- a/gcc/emit-rtl.c +++ b/gcc/emit-rtl.c @@ -1964,7 +1964,7 @@ set_mem_attributes_minus_bitpos (rtx ref, tree t, int objectp, get_object_alignment_1 (t, &obj_align, &obj_bitpos); obj_bitpos = (obj_bitpos - bitpos) & (obj_align - 1); if (obj_bitpos != 0) - obj_align = (obj_bitpos & -obj_bitpos); + obj_align = least_bit_hwi (obj_bitpos); attrs.align = MAX (attrs.align, obj_align); } @@ -2298,7 +2298,7 @@ adjust_address_1 (rtx memref, machine_mode mode, HOST_WIDE_INT offset, if zero. */ if (offset != 0) { - max_align = (offset & -offset) * BITS_PER_UNIT; + max_align = least_bit_hwi (offset) * BITS_PER_UNIT; attrs.align = MIN (attrs.align, max_align); } diff --git a/gcc/expmed.c b/gcc/expmed.c index a5da8836f21..2f789a2f075 100644 --- a/gcc/expmed.c +++ b/gcc/expmed.c @@ -127,10 +127,10 @@ init_expmed_one_conv (struct init_expmed_rtl *all, machine_mode to_mode, comparison purposes here, reduce the bit size by one in that case. */ if (GET_MODE_CLASS (to_mode) == MODE_PARTIAL_INT - && exact_log2 (to_size) != -1) + && pow2p_hwi (to_size)) to_size --; if (GET_MODE_CLASS (from_mode) == MODE_PARTIAL_INT - && exact_log2 (from_size) != -1) + && pow2p_hwi (from_size)) from_size --; /* Assume cost of zero-extend and sign-extend is the same. */ @@ -2635,7 +2635,7 @@ synth_mult (struct algorithm *alg_out, unsigned HOST_WIDE_INT t, if ((t & 1) == 0) { do_alg_shift: - m = floor_log2 (t & -t); /* m = number of low zero bits */ + m = ctz_or_zero (t); /* m = number of low zero bits */ if (m < maxm) { q = t >> m; @@ -2872,9 +2872,8 @@ synth_mult (struct algorithm *alg_out, unsigned HOST_WIDE_INT t, { do_alg_add_t2_m: q = t - 1; - q = q & -q; - m = exact_log2 (q); - if (m >= 0 && m < maxm) + m = ctz_hwi (q); + if (q && m < maxm) { op_cost = shiftadd_cost (speed, mode, m); new_limit.cost = best_cost.cost - op_cost; @@ -2896,9 +2895,8 @@ synth_mult (struct algorithm *alg_out, unsigned HOST_WIDE_INT t, do_alg_sub_t2_m: q = t + 1; - q = q & -q; - m = exact_log2 (q); - if (m >= 0 && m < maxm) + m = ctz_hwi (q); + if (q && m < maxm) { op_cost = shiftsub0_cost (speed, mode, m); new_limit.cost = best_cost.cost - op_cost; @@ -4214,7 +4212,7 @@ expand_divmod (int rem_flag, enum tree_code code, machine_mode mode, initial right shift. */ if (mh != 0 && (d & 1) == 0) { - pre_shift = floor_log2 (d & -d); + pre_shift = ctz_or_zero (d); mh = choose_multiplier (d >> pre_shift, size, size - pre_shift, &ml, &post_shift, &dummy); @@ -4872,7 +4870,7 @@ expand_divmod (int rem_flag, enum tree_code code, machine_mode mode, int pre_shift; rtx t1; - pre_shift = floor_log2 (d & -d); + pre_shift = ctz_or_zero (d); ml = invert_mod2n (d >> pre_shift, size); t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0, pre_shift, NULL_RTX, unsignedp); diff --git a/gcc/expr.c b/gcc/expr.c index 391a8058654..73e000efca6 100644 --- a/gcc/expr.c +++ b/gcc/expr.c @@ -11065,7 +11065,7 @@ is_aligning_offset (const_tree offset, const_tree exp) || !tree_fits_uhwi_p (TREE_OPERAND (offset, 1)) || compare_tree_int (TREE_OPERAND (offset, 1), BIGGEST_ALIGNMENT / BITS_PER_UNIT) <= 0 - || exact_log2 (tree_to_uhwi (TREE_OPERAND (offset, 1)) + 1) < 0) + || !pow2p_hwi (tree_to_uhwi (TREE_OPERAND (offset, 1)) + 1)) return 0; /* Look at the first operand of BIT_AND_EXPR and strip any conversion. diff --git a/gcc/fold-const.c b/gcc/fold-const.c index e5c20528b7e..e040b2a279b 100644 --- a/gcc/fold-const.c +++ b/gcc/fold-const.c @@ -10006,7 +10006,7 @@ fold_binary_loc (location_t loc, mode which allows further optimizations. */ int pop = wi::popcount (warg1); if (!(pop >= BITS_PER_UNIT - && exact_log2 (pop) != -1 + && pow2p_hwi (pop) && wi::mask (pop, false, warg1.get_precision ()) == warg1)) return fold_build2_loc (loc, code, type, op0, wide_int_to_tree (type, masked)); @@ -14252,7 +14252,7 @@ round_up_loc (location_t loc, tree value, unsigned int divisor) } /* If divisor is a power of two, simplify this to bit manipulation. */ - if (divisor == (divisor & -divisor)) + if (pow2_or_zerop (divisor)) { if (TREE_CODE (value) == INTEGER_CST) { @@ -14315,7 +14315,7 @@ round_down_loc (location_t loc, tree value, int divisor) } /* If divisor is a power of two, simplify this to bit manipulation. */ - if (divisor == (divisor & -divisor)) + if (pow2_or_zerop (divisor)) { tree t; diff --git a/gcc/function.c b/gcc/function.c index 53bad8736e9..94ed786dbc7 100644 --- a/gcc/function.c +++ b/gcc/function.c @@ -2716,7 +2716,7 @@ assign_parm_find_stack_rtl (tree parm, struct assign_parm_data_one *data) else if (CONST_INT_P (offset_rtx)) { align = INTVAL (offset_rtx) * BITS_PER_UNIT | boundary; - align = align & -align; + align = least_bit_hwi (align); } set_mem_align (stack_parm, align); diff --git a/gcc/gimple-fold.c b/gcc/gimple-fold.c index fbbe52004aa..2e0bd806987 100644 --- a/gcc/gimple-fold.c +++ b/gcc/gimple-fold.c @@ -699,7 +699,7 @@ gimple_fold_builtin_memory_op (gimple_stmt_iterator *gsi, && !c_strlen (src, 2)) { unsigned ilen = tree_to_uhwi (len); - if (exact_log2 (ilen) != -1) + if (pow2p_hwi (ilen)) { tree type = lang_hooks.types.type_for_size (ilen * 8, 1); if (type diff --git a/gcc/gimple-ssa-strength-reduction.c b/gcc/gimple-ssa-strength-reduction.c index 9e0b3d1d162..68115ee7896 100644 --- a/gcc/gimple-ssa-strength-reduction.c +++ b/gcc/gimple-ssa-strength-reduction.c @@ -1874,7 +1874,7 @@ replace_ref (tree *expr, slsr_cand_t c) requirement for the data type. See PR58041. */ get_object_alignment_1 (*expr, &align, &misalign); if (misalign != 0) - align = (misalign & -misalign); + align = least_bit_hwi (misalign); if (align < TYPE_ALIGN (acc_type)) acc_type = build_aligned_type (acc_type, align); diff --git a/gcc/hsa-gen.c b/gcc/hsa-gen.c index 314bb5b231c..4d1b2767441 100644 --- a/gcc/hsa-gen.c +++ b/gcc/hsa-gen.c @@ -2207,7 +2207,7 @@ gen_hsa_addr_with_align (tree ref, hsa_bb *hbb, BrigAlignment8_t *output_align) unsigned align = hsa_byte_alignment (addr->m_symbol->m_align); unsigned misalign = addr->m_imm_offset & (align - 1); if (misalign) - align = (misalign & -misalign); + align = least_bit_hwi (misalign); *output_align = hsa_alignment_encoding (BITS_PER_UNIT * align); } return addr; @@ -2434,7 +2434,7 @@ hsa_bitmemref_alignment (tree ref) BrigAlignment8_t base = hsa_object_alignment (ref); if (byte_bits == 0) return base; - return MIN (base, hsa_alignment_encoding (byte_bits & -byte_bits)); + return MIN (base, hsa_alignment_encoding (least_bit_hwi (byte_bits))); } /* Generate HSAIL instructions loading something into register DEST. RHS is diff --git a/gcc/hwint.c b/gcc/hwint.c index b936c52e5c1..53730285dd7 100644 --- a/gcc/hwint.c +++ b/gcc/hwint.c @@ -74,7 +74,7 @@ ceil_log2 (unsigned HOST_WIDE_INT x) int exact_log2 (unsigned HOST_WIDE_INT x) { - if (x != (x & -x)) + if (!pow2p_hwi (x)) return -1; return floor_log2 (x); } @@ -85,7 +85,7 @@ exact_log2 (unsigned HOST_WIDE_INT x) int ctz_hwi (unsigned HOST_WIDE_INT x) { - return x ? floor_log2 (x & -x) : HOST_BITS_PER_WIDE_INT; + return x ? floor_log2 (least_bit_hwi (x)) : HOST_BITS_PER_WIDE_INT; } /* Similarly for most significant bits. */ @@ -102,7 +102,7 @@ clz_hwi (unsigned HOST_WIDE_INT x) int ffs_hwi (unsigned HOST_WIDE_INT x) { - return 1 + floor_log2 (x & -x); + return 1 + floor_log2 (least_bit_hwi (x)); } /* Return the number of set bits in X. */ diff --git a/gcc/hwint.h b/gcc/hwint.h index 6b4d53737a2..ea87b2158b1 100644 --- a/gcc/hwint.h +++ b/gcc/hwint.h @@ -134,6 +134,31 @@ typedef HOST_WIDE_INT __gcc_host_wide_int__; #endif /* Inline functions operating on HOST_WIDE_INT. */ + +/* Return X with all but the lowest bit masked off. */ + +static inline unsigned HOST_WIDE_INT +least_bit_hwi (unsigned HOST_WIDE_INT x) +{ + return (x & -x); +} + +/* True if X is zero or a power of two. */ + +static inline bool +pow2_or_zerop (unsigned HOST_WIDE_INT x) +{ + return least_bit_hwi (x) == x; +} + +/* True if X is a power of two. */ + +static inline bool +pow2p_hwi (unsigned HOST_WIDE_INT x) +{ + return x && pow2_or_zerop (x); +} + #if GCC_VERSION < 3004 extern int clz_hwi (unsigned HOST_WIDE_INT x); @@ -222,7 +247,7 @@ ceil_log2 (unsigned HOST_WIDE_INT x) static inline int exact_log2 (unsigned HOST_WIDE_INT x) { - return x == (x & -x) && x ? ctz_hwi (x) : -1; + return pow2p_hwi (x) ? ctz_hwi (x) : -1; } #endif /* GCC_VERSION >= 3004 */ @@ -238,6 +263,14 @@ extern HOST_WIDE_INT pos_mul_hwi (HOST_WIDE_INT, HOST_WIDE_INT); extern HOST_WIDE_INT mul_hwi (HOST_WIDE_INT, HOST_WIDE_INT); extern HOST_WIDE_INT least_common_multiple (HOST_WIDE_INT, HOST_WIDE_INT); +/* Like ctz_hwi, except 0 when x == 0. */ + +static inline int +ctz_or_zero (unsigned HOST_WIDE_INT x) +{ + return ffs_hwi (x) - 1; +} + /* Sign extend SRC starting from PREC. */ static inline HOST_WIDE_INT diff --git a/gcc/ifcvt.c b/gcc/ifcvt.c index ecdfc2e2bf3..24542f00848 100644 --- a/gcc/ifcvt.c +++ b/gcc/ifcvt.c @@ -1417,11 +1417,11 @@ noce_try_store_flag_constants (struct noce_if_info *if_info) gcc_unreachable (); } /* Is this (cond) ? 2^n : 0? */ - else if (ifalse == 0 && exact_log2 (itrue) >= 0 + else if (ifalse == 0 && pow2p_hwi (itrue) && STORE_FLAG_VALUE == 1) normalize = 1; /* Is this (cond) ? 0 : 2^n? */ - else if (itrue == 0 && exact_log2 (ifalse) >= 0 && can_reverse + else if (itrue == 0 && pow2p_hwi (ifalse) && can_reverse && STORE_FLAG_VALUE == 1) { normalize = 1; diff --git a/gcc/ipa-cp.c b/gcc/ipa-cp.c index 5ff7bedc2ec..60fcd023bf8 100644 --- a/gcc/ipa-cp.c +++ b/gcc/ipa-cp.c @@ -929,7 +929,7 @@ ipcp_alignment_lattice::meet_with_1 (unsigned new_align, unsigned new_misalign) if (misalign != (new_misalign % align)) { int diff = abs ((int) misalign - (int) (new_misalign % align)); - align = (unsigned) diff & -diff; + align = least_bit_hwi (diff); if (align) misalign = misalign % align; else diff --git a/gcc/ipa-prop.c b/gcc/ipa-prop.c index 16297817f95..b86a4a06218 100644 --- a/gcc/ipa-prop.c +++ b/gcc/ipa-prop.c @@ -4170,7 +4170,7 @@ ipa_modify_call_arguments (struct cgraph_edge *cs, gcall *stmt, * BITS_PER_UNIT); misalign = misalign & (align - 1); if (misalign != 0) - align = (misalign & -misalign); + align = least_bit_hwi (misalign); if (align < TYPE_ALIGN (type)) type = build_aligned_type (type, align); base = force_gimple_operand_gsi (&gsi, base, diff --git a/gcc/omp-low.c b/gcc/omp-low.c index c890e83a401..bf52d849d9e 100644 --- a/gcc/omp-low.c +++ b/gcc/omp-low.c @@ -19452,7 +19452,7 @@ oacc_loop_fixed_partitions (oacc_loop *loop, unsigned outer_mask) } else { - unsigned outermost = this_mask & -this_mask; + unsigned outermost = least_bit_hwi (this_mask); if (outermost && outermost <= outer_mask) { @@ -19533,7 +19533,7 @@ oacc_loop_auto_partitions (oacc_loop *loop, unsigned outer_mask) /* Determine the outermost partitioning used within this loop. */ this_mask = loop->inner | GOMP_DIM_MASK (GOMP_DIM_MAX); - this_mask = (this_mask & -this_mask); + this_mask = least_bit_hwi (this_mask); /* Pick the partitioning just inside that one. */ this_mask >>= 1; diff --git a/gcc/rtlanal.c b/gcc/rtlanal.c index 69bc4fdd5e1..470c143fcf2 100644 --- a/gcc/rtlanal.c +++ b/gcc/rtlanal.c @@ -4483,8 +4483,8 @@ nonzero_bits1 (const_rtx x, machine_mode mode, const_rtx known_x, int sign_index = GET_MODE_PRECISION (GET_MODE (x)) - 1; int width0 = floor_log2 (nz0) + 1; int width1 = floor_log2 (nz1) + 1; - int low0 = floor_log2 (nz0 & -nz0); - int low1 = floor_log2 (nz1 & -nz1); + int low0 = ctz_or_zero (nz0); + int low1 = ctz_or_zero (nz1); unsigned HOST_WIDE_INT op0_maybe_minusp = nz0 & (HOST_WIDE_INT_1U << sign_index); unsigned HOST_WIDE_INT op1_maybe_minusp diff --git a/gcc/stor-layout.c b/gcc/stor-layout.c index cf7171430f9..07eac874908 100644 --- a/gcc/stor-layout.c +++ b/gcc/stor-layout.c @@ -1169,14 +1169,12 @@ place_field (record_layout_info rli, tree field) /* Work out the known alignment so far. Note that A & (-A) is the value of the least-significant bit in A that is one. */ if (! integer_zerop (rli->bitpos)) - known_align = (tree_to_uhwi (rli->bitpos) - & - tree_to_uhwi (rli->bitpos)); + known_align = least_bit_hwi (tree_to_uhwi (rli->bitpos)); else if (integer_zerop (rli->offset)) known_align = 0; else if (tree_fits_uhwi_p (rli->offset)) known_align = (BITS_PER_UNIT - * (tree_to_uhwi (rli->offset) - & - tree_to_uhwi (rli->offset))); + * least_bit_hwi (tree_to_uhwi (rli->offset))); else known_align = rli->offset_align; @@ -1479,14 +1477,12 @@ place_field (record_layout_info rli, tree field) approximate this by seeing if its position changed), lay out the field again; perhaps we can use an integral mode for it now. */ if (! integer_zerop (DECL_FIELD_BIT_OFFSET (field))) - actual_align = (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field)) - & - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field))); + actual_align = least_bit_hwi (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field))); else if (integer_zerop (DECL_FIELD_OFFSET (field))) actual_align = MAX (BIGGEST_ALIGNMENT, rli->record_align); else if (tree_fits_uhwi_p (DECL_FIELD_OFFSET (field))) actual_align = (BITS_PER_UNIT - * (tree_to_uhwi (DECL_FIELD_OFFSET (field)) - & - tree_to_uhwi (DECL_FIELD_OFFSET (field)))); + * least_bit_hwi (tree_to_uhwi (DECL_FIELD_OFFSET (field)))); else actual_align = DECL_OFFSET_ALIGN (field); /* ACTUAL_ALIGN is still the actual alignment *within the record* . diff --git a/gcc/tree-pretty-print.c b/gcc/tree-pretty-print.c index 734ecda7c56..0915fd4243e 100644 --- a/gcc/tree-pretty-print.c +++ b/gcc/tree-pretty-print.c @@ -1353,7 +1353,7 @@ dump_generic_node (pretty_printer *pp, tree node, int spc, int flags, ? "unsigned long long" : "signed long long")); else if (TYPE_PRECISION (node) >= CHAR_TYPE_SIZE - && exact_log2 (TYPE_PRECISION (node)) != -1) + && pow2p_hwi (TYPE_PRECISION (node))) { pp_string (pp, (TYPE_UNSIGNED (node) ? "uint" : "int")); pp_decimal_int (pp, TYPE_PRECISION (node)); diff --git a/gcc/tree-sra.c b/gcc/tree-sra.c index 3c7e4c0c9cc..4a2ff0d174c 100644 --- a/gcc/tree-sra.c +++ b/gcc/tree-sra.c @@ -1680,7 +1680,7 @@ build_ref_for_offset (location_t loc, tree base, HOST_WIDE_INT offset, misalign = (misalign + offset) & (align - 1); if (misalign != 0) - align = (misalign & -misalign); + align = least_bit_hwi (misalign); if (align != TYPE_ALIGN (exp_type)) exp_type = build_aligned_type (exp_type, align); diff --git a/gcc/tree-ssa-ccp.c b/gcc/tree-ssa-ccp.c index d5a05608f78..b6ccb59d241 100644 --- a/gcc/tree-ssa-ccp.c +++ b/gcc/tree-ssa-ccp.c @@ -928,7 +928,7 @@ ccp_finalize (bool nonzero_p) /* Trailing mask bits specify the alignment, trailing value bits the misalignment. */ tem = val->mask.to_uhwi (); - align = (tem & -tem); + align = least_bit_hwi (tem); if (align > 1) set_ptr_info_alignment (get_ptr_info (name), align, (TREE_INT_CST_LOW (val->value) diff --git a/gcc/tree-ssa-math-opts.c b/gcc/tree-ssa-math-opts.c index b93bcf348d1..0cea1a8472d 100644 --- a/gcc/tree-ssa-math-opts.c +++ b/gcc/tree-ssa-math-opts.c @@ -2647,7 +2647,7 @@ bswap_replace (gimple *cur_stmt, gimple *src_stmt, tree fndecl, unsigned HOST_WIDE_INT l = (load_offset * BITS_PER_UNIT) & (align - 1); if (l) - align = l & -l; + align = least_bit_hwi (l); } } diff --git a/gcc/tree-ssa-strlen.c b/gcc/tree-ssa-strlen.c index 9d7b4df4b1e..339812e91c0 100644 --- a/gcc/tree-ssa-strlen.c +++ b/gcc/tree-ssa-strlen.c @@ -1983,7 +1983,7 @@ handle_builtin_memcmp (gimple_stmt_iterator *gsi) if (tree_fits_uhwi_p (len) && (leni = tree_to_uhwi (len)) <= GET_MODE_SIZE (word_mode) - && exact_log2 (leni) != -1) + && pow2p_hwi (leni)) { leni *= CHAR_TYPE_SIZE; unsigned align1 = get_pointer_alignment (arg1); diff --git a/gcc/tree-vect-data-refs.c b/gcc/tree-vect-data-refs.c index e908c865fbd..03c4a66103e 100644 --- a/gcc/tree-vect-data-refs.c +++ b/gcc/tree-vect-data-refs.c @@ -2241,7 +2241,7 @@ vect_analyze_group_access_1 (struct data_reference *dr) if (DR_IS_READ (dr) && (dr_step % type_size) == 0 && groupsize > 0 - && exact_log2 (groupsize) != -1) + && pow2p_hwi (groupsize)) { GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = stmt; GROUP_SIZE (vinfo_for_stmt (stmt)) = groupsize; @@ -4736,7 +4736,7 @@ vect_grouped_store_supported (tree vectype, unsigned HOST_WIDE_INT count) else { /* If length is not equal to 3 then only power of 2 is supported. */ - gcc_assert (exact_log2 (count) != -1); + gcc_assert (pow2p_hwi (count)); for (i = 0; i < nelt / 2; i++) { @@ -4914,7 +4914,7 @@ vect_permute_store_chain (vec dr_chain, else { /* If length is not equal to 3 then only power of 2 is supported. */ - gcc_assert (exact_log2 (length) != -1); + gcc_assert (pow2p_hwi (length)); for (i = 0, n = nelt / 2; i < n; i++) { @@ -5309,7 +5309,7 @@ vect_grouped_load_supported (tree vectype, bool single_element_p, else { /* If length is not equal to 3 then only power of 2 is supported. */ - gcc_assert (exact_log2 (count) != -1); + gcc_assert (pow2p_hwi (count)); for (i = 0; i < nelt; i++) sel[i] = i * 2; if (can_vec_perm_p (mode, false, sel)) @@ -5483,7 +5483,7 @@ vect_permute_load_chain (vec dr_chain, else { /* If length is not equal to 3 then only power of 2 is supported. */ - gcc_assert (exact_log2 (length) != -1); + gcc_assert (pow2p_hwi (length)); for (i = 0; i < nelt; ++i) sel[i] = i * 2; @@ -5632,7 +5632,7 @@ vect_shift_permute_load_chain (vec dr_chain, memcpy (result_chain->address (), dr_chain.address (), length * sizeof (tree)); - if (exact_log2 (length) != -1 && LOOP_VINFO_VECT_FACTOR (loop_vinfo) > 4) + if (pow2p_hwi (length) && LOOP_VINFO_VECT_FACTOR (loop_vinfo) > 4) { unsigned int j, log_length = exact_log2 (length); for (i = 0; i < nelt / 2; ++i) @@ -5880,7 +5880,7 @@ vect_transform_grouped_load (gimple *stmt, vec dr_chain, int size, get chain for loads group using vect_shift_permute_load_chain. */ mode = TYPE_MODE (STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt))); if (targetm.sched.reassociation_width (VEC_PERM_EXPR, mode) > 1 - || exact_log2 (size) != -1 + || pow2p_hwi (size) || !vect_shift_permute_load_chain (dr_chain, size, stmt, gsi, &result_chain)) vect_permute_load_chain (dr_chain, size, stmt, gsi, &result_chain); diff --git a/gcc/tree-vect-generic.c b/gcc/tree-vect-generic.c index 9f0ec656bad..5d4273fcd0e 100644 --- a/gcc/tree-vect-generic.c +++ b/gcc/tree-vect-generic.c @@ -494,7 +494,7 @@ expand_vector_divmod (gimple_stmt_iterator *gsi, tree type, tree op0, || (!has_vector_shift && pre_shift != -1)) { if (has_vector_shift) - pre_shift = floor_log2 (d & -d); + pre_shift = ctz_or_zero (d); else if (pre_shift == -1) { unsigned int j; diff --git a/gcc/tree-vect-patterns.c b/gcc/tree-vect-patterns.c index 7e6e45d1d54..3dfbc7bb63d 100644 --- a/gcc/tree-vect-patterns.c +++ b/gcc/tree-vect-patterns.c @@ -2736,7 +2736,7 @@ vect_recog_divmod_pattern (vec *stmts, for even divisors, using an initial right shift. */ if (mh != 0 && (d & 1) == 0) { - pre_shift = floor_log2 (d & -d); + pre_shift = ctz_or_zero (d); mh = choose_multiplier (d >> pre_shift, prec, prec - pre_shift, &ml, &post_shift, &dummy_int); gcc_assert (!mh); diff --git a/gcc/tree-vect-stmts.c b/gcc/tree-vect-stmts.c index 6a6167ba504..a83071c1a70 100644 --- a/gcc/tree-vect-stmts.c +++ b/gcc/tree-vect-stmts.c @@ -2340,7 +2340,7 @@ vectorizable_mask_load_store (gimple *stmt, gimple_stmt_iterator *gsi, set_ptr_info_alignment (get_ptr_info (dataref_ptr), align, misalign); tree ptr = build_int_cst (TREE_TYPE (gimple_call_arg (stmt, 1)), - misalign ? misalign & -misalign : align); + misalign ? least_bit_hwi (misalign) : align); new_stmt = gimple_build_call_internal (IFN_MASK_STORE, 4, dataref_ptr, ptr, vec_mask, vec_rhs); @@ -2390,7 +2390,7 @@ vectorizable_mask_load_store (gimple *stmt, gimple_stmt_iterator *gsi, set_ptr_info_alignment (get_ptr_info (dataref_ptr), align, misalign); tree ptr = build_int_cst (TREE_TYPE (gimple_call_arg (stmt, 1)), - misalign ? misalign & -misalign : align); + misalign ? least_bit_hwi (misalign) : align); new_stmt = gimple_build_call_internal (IFN_MASK_LOAD, 3, dataref_ptr, ptr, vec_mask); diff --git a/gcc/tsan.c b/gcc/tsan.c index d69432e5294..aba21f81bd6 100644 --- a/gcc/tsan.c +++ b/gcc/tsan.c @@ -175,7 +175,7 @@ instrument_expr (gimple_stmt_iterator gsi, tree expr, bool is_write) if ((align - 1) & bitpos) { align = (align - 1) & bitpos; - align = align & -align; + align = least_bit_hwi (align); } expr = build_fold_addr_expr (unshare_expr (base)); expr = build2 (MEM_REF, char_type_node, expr, diff --git a/gcc/var-tracking.c b/gcc/var-tracking.c index fdad87459fb..07b3e079aaa 100644 --- a/gcc/var-tracking.c +++ b/gcc/var-tracking.c @@ -1983,7 +1983,7 @@ static bool negative_power_of_two_p (HOST_WIDE_INT i) { unsigned HOST_WIDE_INT x = -(unsigned HOST_WIDE_INT)i; - return x == (x & -x); + return pow2_or_zerop (x); } /* Strip constant offsets and alignments off of LOC. Return the base diff --git a/gcc/varasm.c b/gcc/varasm.c index 00a9b30e73c..ba866ce8044 100644 --- a/gcc/varasm.c +++ b/gcc/varasm.c @@ -2632,7 +2632,7 @@ assemble_trampoline_template (void) static inline unsigned min_align (unsigned int a, unsigned int b) { - return (a | b) & -(a | b); + return least_bit_hwi (a | b); } /* Return the assembler directive for creating a given kind of integer -- 2.30.2