+2016-09-16 Jason Merrill <jason@redhat.com>
+
+ * hwint.h (least_bit_hwi, pow2_or_zerop, pow2p_hwi, ctz_or_zero):
+ New.
+ * hwint.c (exact_log2): Use pow2p_hwi.
+ (ctz_hwi, ffs_hwi): Use least_bit_hwi.
+ * alias.c (memrefs_conflict_p): Use pow2_or_zerop.
+ * builtins.c (get_object_alignment_2, get_object_alignment)
+ (get_pointer_alignment, fold_builtin_atomic_always_lock_free): Use
+ least_bit_hwi.
+ * calls.c (compute_argument_addresses, store_one_arg): Use
+ least_bit_hwi.
+ * cfgexpand.c (expand_one_stack_var_at): Use least_bit_hwi.
+ * combine.c (force_to_mode): Use least_bit_hwi.
+ (contains_muldiv, find_split_point, combine_simplify_rtx)
+ (simplify_if_then_else, simplify_set, force_to_mode)
+ (if_then_else_cond, simplify_and_const_int_1)
+ (simplify_compare_const): Use pow2p_hwi.
+ * cse.c (fold_rtx): Use pow2p_hwi.
+ * emit-rtl.c (set_mem_attributes_minus_bitpos, adjust_address_1):
+ Use least_bit_hwi.
+ * expmed.c (synth_mult, expand_divmod): Use ctz_or_zero, ctz_hwi.
+ (init_expmed_one_conv): Use pow2p_hwi.
+ * expr.c (is_aligning_offset): Use pow2p_hwi.
+ * fold-const.c (round_up_loc, round_down_loc): Use pow2_or_zerop.
+ (fold_binary_loc): Use pow2p_hwi.
+ * function.c (assign_parm_find_stack_rtl): Use least_bit_hwi.
+ * gimple-fold.c (gimple_fold_builtin_memory_op): Use pow2p_hwi.
+ * gimple-ssa-strength-reduction.c (replace_ref): Use least_bit_hwi.
+ * hsa-gen.c (gen_hsa_addr_with_align, hsa_bitmemref_alignment):
+ Use least_bit_hwi.
+ * ifcvt.c (noce_try_store_flag_constants): Use pow2p_hwi.
+ * ipa-cp.c (ipcp_alignment_lattice::meet_with_1): Use least_bit_hwi.
+ * ipa-prop.c (ipa_modify_call_arguments): Use least_bit_hwi.
+ * omp-low.c (oacc_loop_fixed_partitions)
+ (oacc_loop_auto_partitions): Use least_bit_hwi.
+ * rtlanal.c (nonzero_bits1): Use ctz_or_zero.
+ * stor-layout.c (place_field): Use least_bit_hwi.
+ * tree-pretty-print.c (dump_generic_node): Use pow2p_hwi.
+ * tree-sra.c (build_ref_for_offset): Use least_bit_hwi.
+ * tree-ssa-ccp.c (ccp_finalize): Use least_bit_hwi.
+ * tree-ssa-math-opts.c (bswap_replace): Use least_bit_hwi.
+ * tree-ssa-strlen.c (handle_builtin_memcmp): Use pow2p_hwi.
+ * tree-vect-data-refs.c (vect_analyze_group_access_1)
+ (vect_grouped_store_supported, vect_grouped_load_supported)
+ (vect_permute_load_chain, vect_shift_permute_load_chain)
+ (vect_transform_grouped_load): Use pow2p_hwi.
+ * tree-vect-generic.c (expand_vector_divmod): Use ctz_or_zero.
+ * tree-vect-patterns.c (vect_recog_divmod_pattern): Use ctz_or_zero.
+ * tree-vect-stmts.c (vectorizable_mask_load_store): Use
+ least_bit_hwi.
+ * tsan.c (instrument_expr): Use least_bit_hwi.
+ * var-tracking.c (negative_power_of_two_p): Use pow2_or_zerop.
+
2016-09-16 Andreas Schwab <schwab@suse.de>
* config/ia64/ia64.h (ASM_OUTPUT_DWARF_OFFSET): Use parameter
{
HOST_WIDE_INT sc = INTVAL (XEXP (x, 1));
unsigned HOST_WIDE_INT uc = sc;
- if (sc < 0 && -uc == (uc & -uc))
+ if (sc < 0 && pow2_or_zerop (-uc))
{
if (xsize > 0)
xsize = -xsize;
{
HOST_WIDE_INT sc = INTVAL (XEXP (y, 1));
unsigned HOST_WIDE_INT uc = sc;
- if (sc < 0 && -uc == (uc & -uc))
+ if (sc < 0 && pow2_or_zerop (-uc))
{
if (ysize > 0)
ysize = -ysize;
{
ptr_bitmask = TREE_INT_CST_LOW (TREE_OPERAND (addr, 1));
ptr_bitmask *= BITS_PER_UNIT;
- align = ptr_bitmask & -ptr_bitmask;
+ align = least_bit_hwi (ptr_bitmask);
addr = TREE_OPERAND (addr, 0);
}
unsigned HOST_WIDE_INT step = 1;
if (TMR_STEP (exp))
step = TREE_INT_CST_LOW (TMR_STEP (exp));
- align = MIN (align, (step & -step) * BITS_PER_UNIT);
+ align = MIN (align, least_bit_hwi (step) * BITS_PER_UNIT);
}
if (TMR_INDEX2 (exp))
align = BITS_PER_UNIT;
ptr & (align - 1) == bitpos. */
if (bitpos != 0)
- align = (bitpos & -bitpos);
+ align = least_bit_hwi (bitpos);
return align;
}
ptr & (align - 1) == bitpos. */
if (bitpos != 0)
- align = (bitpos & -bitpos);
+ align = least_bit_hwi (bitpos);
return align;
}
/* Either this argument is null, or it's a fake pointer encoding
the alignment of the object. */
- val = val & -val;
+ val = least_bit_hwi (val);
val *= BITS_PER_UNIT;
if (val == 0 || mode_align < val)
else if (CONST_INT_P (offset))
{
align = INTVAL (offset) * BITS_PER_UNIT | boundary;
- align = align & -align;
+ align = least_bit_hwi (align);
}
set_mem_align (args[i].stack, align);
int pad = used - size;
if (pad)
{
- unsigned int pad_align = (pad & -pad) * BITS_PER_UNIT;
+ unsigned int pad_align = least_bit_hwi (pad) * BITS_PER_UNIT;
parm_align = MIN (parm_align, pad_align);
}
}
parm_align = BITS_PER_UNIT;
else if (excess)
{
- unsigned int excess_align = (excess & -excess) * BITS_PER_UNIT;
+ unsigned int excess_align = least_bit_hwi (excess) * BITS_PER_UNIT;
parm_align = MIN (parm_align, excess_align);
}
}
important, we'll simply use the alignment that is already set. */
if (base == virtual_stack_vars_rtx)
offset -= frame_phase;
- align = offset & -offset;
+ align = least_bit_hwi (offset);
align *= BITS_PER_UNIT;
if (align == 0 || align > base_align)
align = base_align;
case MULT:
return ! (CONST_INT_P (XEXP (x, 1))
- && exact_log2 (UINTVAL (XEXP (x, 1))) >= 0);
+ && pow2p_hwi (UINTVAL (XEXP (x, 1))));
default:
if (BINARY_P (x))
return contains_muldiv (XEXP (x, 0))
instead if this isn't a multiply by a power of two. */
if (set_src && code == MINUS && GET_CODE (XEXP (x, 1)) == MULT
&& GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
- && exact_log2 (INTVAL (XEXP (XEXP (x, 1), 1))) < 0)
+ && !pow2p_hwi (INTVAL (XEXP (XEXP (x, 1), 1))))
{
machine_mode mode = GET_MODE (x);
unsigned HOST_WIDE_INT this_int = INTVAL (XEXP (XEXP (x, 1), 1));
(and <foo> (const_int pow2-1)) */
if (GET_CODE (XEXP (x, 1)) == AND
&& CONST_INT_P (XEXP (XEXP (x, 1), 1))
- && exact_log2 (-UINTVAL (XEXP (XEXP (x, 1), 1))) >= 0
+ && pow2p_hwi (-UINTVAL (XEXP (XEXP (x, 1), 1)))
&& rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
return simplify_and_const_int (NULL_RTX, mode, XEXP (x, 0),
-INTVAL (XEXP (XEXP (x, 1), 1)) - 1);
not equal to zero. Similarly if it is known to be -1 or 0. */
if (true_code == EQ && true_val == const0_rtx
- && exact_log2 (nzb = nonzero_bits (from, GET_MODE (from))) >= 0)
+ && pow2p_hwi (nzb = nonzero_bits (from, GET_MODE (from))))
{
false_code = EQ;
false_val = gen_int_mode (nzb, GET_MODE (from));
|| (old_code == EQ && new_code == NE))
&& ! other_changed_previously && op1 == const0_rtx
&& HWI_COMPUTABLE_MODE_P (GET_MODE (op0))
- && exact_log2 (mask = nonzero_bits (op0, GET_MODE (op0))) >= 0)
+ && pow2p_hwi (mask = nonzero_bits (op0, GET_MODE (op0))))
{
rtx pat = PATTERN (other_insn), note = 0;
smask |= HOST_WIDE_INT_M1U << width;
if (CONST_INT_P (XEXP (x, 1))
- && exact_log2 (- smask) >= 0
+ && pow2p_hwi (- smask)
&& (nonzero_bits (XEXP (x, 0), mode) & ~smask) == 0
&& (INTVAL (XEXP (x, 1)) & ~smask) != 0)
return force_to_mode (plus_constant (GET_MODE (x), XEXP (x, 0),
/* If X is (minus C Y) where C's least set bit is larger than any bit
in the mask, then we may replace with (neg Y). */
if (CONST_INT_P (XEXP (x, 0))
- && ((UINTVAL (XEXP (x, 0)) & -UINTVAL (XEXP (x, 0))) > mask))
+ && least_bit_hwi (UINTVAL (XEXP (x, 0))) > mask)
{
x = simplify_gen_unary (NEG, GET_MODE (x), XEXP (x, 1),
GET_MODE (x));
&& ((INTVAL (XEXP (x, 1))
+ num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0))))
>= GET_MODE_PRECISION (GET_MODE (x)))
- && exact_log2 (mask + 1) >= 0
+ && pow2p_hwi (mask + 1)
/* Number of bits left after the shift must be more than the mask
needs. */
&& ((INTVAL (XEXP (x, 1)) + exact_log2 (mask + 1))
if ((mask & ~STORE_FLAG_VALUE) == 0
&& XEXP (x, 1) == const0_rtx
&& GET_MODE (XEXP (x, 0)) == mode
- && exact_log2 (nonzero_bits (XEXP (x, 0), mode)) >= 0
+ && pow2p_hwi (nonzero_bits (XEXP (x, 0), mode))
&& (nonzero_bits (XEXP (x, 0), mode)
== (unsigned HOST_WIDE_INT) STORE_FLAG_VALUE))
return force_to_mode (XEXP (x, 0), mode, mask, next_select);
/* Likewise for 0 or a single bit. */
else if (HWI_COMPUTABLE_MODE_P (mode)
- && exact_log2 (nz = nonzero_bits (x, mode)) >= 0)
+ && pow2p_hwi (nz = nonzero_bits (x, mode)))
{
*ptrue = gen_int_mode (nz, mode), *pfalse = const0_rtx;
return x;
may eliminate it. */
if (GET_CODE (varop) == PLUS
- && exact_log2 (constop + 1) >= 0)
+ && pow2p_hwi (constop + 1))
{
rtx o0, o1;
&& (code == EQ || code == NE || code == GE || code == GEU
|| code == LT || code == LTU)
&& mode_width - 1 < HOST_BITS_PER_WIDE_INT
- && exact_log2 (const_op & GET_MODE_MASK (mode)) >= 0
+ && pow2p_hwi (const_op & GET_MODE_MASK (mode))
&& (nonzero_bits (op0, mode)
== (unsigned HOST_WIDE_INT) (const_op & GET_MODE_MASK (mode))))
{
+2016-09-16 Jason Merrill <jason@redhat.com>
+
+ * class.c (check_bases, set_one_vmethod_tm_attributes): Use
+ least_bit_hwi.
+ * decl.c (cxx_init_decl_processing): Use pow2p_hwi.
+ * parser.c (cp_parser_cilk_simd_vectorlength): Use pow2p_hwi.
+
2016-09-14 Jakub Jelinek <jakub@redhat.com>
PR c++/77549
doesn't define its own, then the current class inherits one. */
if (seen_tm_mask && !find_tm_attribute (TYPE_ATTRIBUTES (t)))
{
- tree tm_attr = tm_mask_to_attr (seen_tm_mask & -seen_tm_mask);
+ tree tm_attr = tm_mask_to_attr (least_bit_hwi (seen_tm_mask));
TYPE_ATTRIBUTES (t) = tree_cons (tm_attr, NULL, TYPE_ATTRIBUTES (t));
}
}
restrictive one. */
else if (tm_attr == NULL)
{
- apply_tm_attr (fndecl, tm_mask_to_attr (found & -found));
+ apply_tm_attr (fndecl, tm_mask_to_attr (least_bit_hwi (found)));
}
/* Otherwise validate that we're not weaker than a function
that is being overridden. */
current_lang_name = lang_name_cplusplus;
if (aligned_new_threshhold > 1
- && exact_log2 (aligned_new_threshhold) == -1)
+ && !pow2p_hwi (aligned_new_threshhold))
{
error ("-faligned-new=%d is not a power of two", aligned_new_threshhold);
aligned_new_threshhold = 1;
|| !INTEGRAL_TYPE_P (TREE_TYPE (expr)))
error_at (loc, "vectorlength must be an integer constant");
else if (TREE_CONSTANT (expr)
- && exact_log2 (TREE_INT_CST_LOW (expr)) == -1)
+ && !pow2p_hwi (TREE_INT_CST_LOW (expr)))
error_at (loc, "vectorlength must be a power of 2");
else
{
if (code == PLUS && const_arg1 == inner_const
&& ((HAVE_PRE_INCREMENT
- && exact_log2 (INTVAL (const_arg1)) >= 0)
+ && pow2p_hwi (INTVAL (const_arg1)))
|| (HAVE_POST_INCREMENT
- && exact_log2 (INTVAL (const_arg1)) >= 0)
+ && pow2p_hwi (INTVAL (const_arg1)))
|| (HAVE_PRE_DECREMENT
- && exact_log2 (- INTVAL (const_arg1)) >= 0)
+ && pow2p_hwi (- INTVAL (const_arg1)))
|| (HAVE_POST_DECREMENT
- && exact_log2 (- INTVAL (const_arg1)) >= 0)))
+ && pow2p_hwi (- INTVAL (const_arg1)))))
break;
/* ??? Vector mode shifts by scalar
get_object_alignment_1 (t, &obj_align, &obj_bitpos);
obj_bitpos = (obj_bitpos - bitpos) & (obj_align - 1);
if (obj_bitpos != 0)
- obj_align = (obj_bitpos & -obj_bitpos);
+ obj_align = least_bit_hwi (obj_bitpos);
attrs.align = MAX (attrs.align, obj_align);
}
if zero. */
if (offset != 0)
{
- max_align = (offset & -offset) * BITS_PER_UNIT;
+ max_align = least_bit_hwi (offset) * BITS_PER_UNIT;
attrs.align = MIN (attrs.align, max_align);
}
comparison purposes here, reduce the bit size by one in that
case. */
if (GET_MODE_CLASS (to_mode) == MODE_PARTIAL_INT
- && exact_log2 (to_size) != -1)
+ && pow2p_hwi (to_size))
to_size --;
if (GET_MODE_CLASS (from_mode) == MODE_PARTIAL_INT
- && exact_log2 (from_size) != -1)
+ && pow2p_hwi (from_size))
from_size --;
/* Assume cost of zero-extend and sign-extend is the same. */
if ((t & 1) == 0)
{
do_alg_shift:
- m = floor_log2 (t & -t); /* m = number of low zero bits */
+ m = ctz_or_zero (t); /* m = number of low zero bits */
if (m < maxm)
{
q = t >> m;
{
do_alg_add_t2_m:
q = t - 1;
- q = q & -q;
- m = exact_log2 (q);
- if (m >= 0 && m < maxm)
+ m = ctz_hwi (q);
+ if (q && m < maxm)
{
op_cost = shiftadd_cost (speed, mode, m);
new_limit.cost = best_cost.cost - op_cost;
do_alg_sub_t2_m:
q = t + 1;
- q = q & -q;
- m = exact_log2 (q);
- if (m >= 0 && m < maxm)
+ m = ctz_hwi (q);
+ if (q && m < maxm)
{
op_cost = shiftsub0_cost (speed, mode, m);
new_limit.cost = best_cost.cost - op_cost;
initial right shift. */
if (mh != 0 && (d & 1) == 0)
{
- pre_shift = floor_log2 (d & -d);
+ pre_shift = ctz_or_zero (d);
mh = choose_multiplier (d >> pre_shift, size,
size - pre_shift,
&ml, &post_shift, &dummy);
int pre_shift;
rtx t1;
- pre_shift = floor_log2 (d & -d);
+ pre_shift = ctz_or_zero (d);
ml = invert_mod2n (d >> pre_shift, size);
t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
pre_shift, NULL_RTX, unsignedp);
|| !tree_fits_uhwi_p (TREE_OPERAND (offset, 1))
|| compare_tree_int (TREE_OPERAND (offset, 1),
BIGGEST_ALIGNMENT / BITS_PER_UNIT) <= 0
- || exact_log2 (tree_to_uhwi (TREE_OPERAND (offset, 1)) + 1) < 0)
+ || !pow2p_hwi (tree_to_uhwi (TREE_OPERAND (offset, 1)) + 1))
return 0;
/* Look at the first operand of BIT_AND_EXPR and strip any conversion.
mode which allows further optimizations. */
int pop = wi::popcount (warg1);
if (!(pop >= BITS_PER_UNIT
- && exact_log2 (pop) != -1
+ && pow2p_hwi (pop)
&& wi::mask (pop, false, warg1.get_precision ()) == warg1))
return fold_build2_loc (loc, code, type, op0,
wide_int_to_tree (type, masked));
}
/* If divisor is a power of two, simplify this to bit manipulation. */
- if (divisor == (divisor & -divisor))
+ if (pow2_or_zerop (divisor))
{
if (TREE_CODE (value) == INTEGER_CST)
{
}
/* If divisor is a power of two, simplify this to bit manipulation. */
- if (divisor == (divisor & -divisor))
+ if (pow2_or_zerop (divisor))
{
tree t;
else if (CONST_INT_P (offset_rtx))
{
align = INTVAL (offset_rtx) * BITS_PER_UNIT | boundary;
- align = align & -align;
+ align = least_bit_hwi (align);
}
set_mem_align (stack_parm, align);
&& !c_strlen (src, 2))
{
unsigned ilen = tree_to_uhwi (len);
- if (exact_log2 (ilen) != -1)
+ if (pow2p_hwi (ilen))
{
tree type = lang_hooks.types.type_for_size (ilen * 8, 1);
if (type
requirement for the data type. See PR58041. */
get_object_alignment_1 (*expr, &align, &misalign);
if (misalign != 0)
- align = (misalign & -misalign);
+ align = least_bit_hwi (misalign);
if (align < TYPE_ALIGN (acc_type))
acc_type = build_aligned_type (acc_type, align);
unsigned align = hsa_byte_alignment (addr->m_symbol->m_align);
unsigned misalign = addr->m_imm_offset & (align - 1);
if (misalign)
- align = (misalign & -misalign);
+ align = least_bit_hwi (misalign);
*output_align = hsa_alignment_encoding (BITS_PER_UNIT * align);
}
return addr;
BrigAlignment8_t base = hsa_object_alignment (ref);
if (byte_bits == 0)
return base;
- return MIN (base, hsa_alignment_encoding (byte_bits & -byte_bits));
+ return MIN (base, hsa_alignment_encoding (least_bit_hwi (byte_bits)));
}
/* Generate HSAIL instructions loading something into register DEST. RHS is
int
exact_log2 (unsigned HOST_WIDE_INT x)
{
- if (x != (x & -x))
+ if (!pow2p_hwi (x))
return -1;
return floor_log2 (x);
}
int
ctz_hwi (unsigned HOST_WIDE_INT x)
{
- return x ? floor_log2 (x & -x) : HOST_BITS_PER_WIDE_INT;
+ return x ? floor_log2 (least_bit_hwi (x)) : HOST_BITS_PER_WIDE_INT;
}
/* Similarly for most significant bits. */
int
ffs_hwi (unsigned HOST_WIDE_INT x)
{
- return 1 + floor_log2 (x & -x);
+ return 1 + floor_log2 (least_bit_hwi (x));
}
/* Return the number of set bits in X. */
#endif
/* Inline functions operating on HOST_WIDE_INT. */
+
+/* Return X with all but the lowest bit masked off. */
+
+static inline unsigned HOST_WIDE_INT
+least_bit_hwi (unsigned HOST_WIDE_INT x)
+{
+ return (x & -x);
+}
+
+/* True if X is zero or a power of two. */
+
+static inline bool
+pow2_or_zerop (unsigned HOST_WIDE_INT x)
+{
+ return least_bit_hwi (x) == x;
+}
+
+/* True if X is a power of two. */
+
+static inline bool
+pow2p_hwi (unsigned HOST_WIDE_INT x)
+{
+ return x && pow2_or_zerop (x);
+}
+
#if GCC_VERSION < 3004
extern int clz_hwi (unsigned HOST_WIDE_INT x);
static inline int
exact_log2 (unsigned HOST_WIDE_INT x)
{
- return x == (x & -x) && x ? ctz_hwi (x) : -1;
+ return pow2p_hwi (x) ? ctz_hwi (x) : -1;
}
#endif /* GCC_VERSION >= 3004 */
extern HOST_WIDE_INT mul_hwi (HOST_WIDE_INT, HOST_WIDE_INT);
extern HOST_WIDE_INT least_common_multiple (HOST_WIDE_INT, HOST_WIDE_INT);
+/* Like ctz_hwi, except 0 when x == 0. */
+
+static inline int
+ctz_or_zero (unsigned HOST_WIDE_INT x)
+{
+ return ffs_hwi (x) - 1;
+}
+
/* Sign extend SRC starting from PREC. */
static inline HOST_WIDE_INT
gcc_unreachable ();
}
/* Is this (cond) ? 2^n : 0? */
- else if (ifalse == 0 && exact_log2 (itrue) >= 0
+ else if (ifalse == 0 && pow2p_hwi (itrue)
&& STORE_FLAG_VALUE == 1)
normalize = 1;
/* Is this (cond) ? 0 : 2^n? */
- else if (itrue == 0 && exact_log2 (ifalse) >= 0 && can_reverse
+ else if (itrue == 0 && pow2p_hwi (ifalse) && can_reverse
&& STORE_FLAG_VALUE == 1)
{
normalize = 1;
if (misalign != (new_misalign % align))
{
int diff = abs ((int) misalign - (int) (new_misalign % align));
- align = (unsigned) diff & -diff;
+ align = least_bit_hwi (diff);
if (align)
misalign = misalign % align;
else
* BITS_PER_UNIT);
misalign = misalign & (align - 1);
if (misalign != 0)
- align = (misalign & -misalign);
+ align = least_bit_hwi (misalign);
if (align < TYPE_ALIGN (type))
type = build_aligned_type (type, align);
base = force_gimple_operand_gsi (&gsi, base,
}
else
{
- unsigned outermost = this_mask & -this_mask;
+ unsigned outermost = least_bit_hwi (this_mask);
if (outermost && outermost <= outer_mask)
{
/* Determine the outermost partitioning used within this loop. */
this_mask = loop->inner | GOMP_DIM_MASK (GOMP_DIM_MAX);
- this_mask = (this_mask & -this_mask);
+ this_mask = least_bit_hwi (this_mask);
/* Pick the partitioning just inside that one. */
this_mask >>= 1;
int sign_index = GET_MODE_PRECISION (GET_MODE (x)) - 1;
int width0 = floor_log2 (nz0) + 1;
int width1 = floor_log2 (nz1) + 1;
- int low0 = floor_log2 (nz0 & -nz0);
- int low1 = floor_log2 (nz1 & -nz1);
+ int low0 = ctz_or_zero (nz0);
+ int low1 = ctz_or_zero (nz1);
unsigned HOST_WIDE_INT op0_maybe_minusp
= nz0 & (HOST_WIDE_INT_1U << sign_index);
unsigned HOST_WIDE_INT op1_maybe_minusp
/* Work out the known alignment so far. Note that A & (-A) is the
value of the least-significant bit in A that is one. */
if (! integer_zerop (rli->bitpos))
- known_align = (tree_to_uhwi (rli->bitpos)
- & - tree_to_uhwi (rli->bitpos));
+ known_align = least_bit_hwi (tree_to_uhwi (rli->bitpos));
else if (integer_zerop (rli->offset))
known_align = 0;
else if (tree_fits_uhwi_p (rli->offset))
known_align = (BITS_PER_UNIT
- * (tree_to_uhwi (rli->offset)
- & - tree_to_uhwi (rli->offset)));
+ * least_bit_hwi (tree_to_uhwi (rli->offset)));
else
known_align = rli->offset_align;
approximate this by seeing if its position changed), lay out the field
again; perhaps we can use an integral mode for it now. */
if (! integer_zerop (DECL_FIELD_BIT_OFFSET (field)))
- actual_align = (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field))
- & - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field)));
+ actual_align = least_bit_hwi (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field)));
else if (integer_zerop (DECL_FIELD_OFFSET (field)))
actual_align = MAX (BIGGEST_ALIGNMENT, rli->record_align);
else if (tree_fits_uhwi_p (DECL_FIELD_OFFSET (field)))
actual_align = (BITS_PER_UNIT
- * (tree_to_uhwi (DECL_FIELD_OFFSET (field))
- & - tree_to_uhwi (DECL_FIELD_OFFSET (field))));
+ * least_bit_hwi (tree_to_uhwi (DECL_FIELD_OFFSET (field))));
else
actual_align = DECL_OFFSET_ALIGN (field);
/* ACTUAL_ALIGN is still the actual alignment *within the record* .
? "unsigned long long"
: "signed long long"));
else if (TYPE_PRECISION (node) >= CHAR_TYPE_SIZE
- && exact_log2 (TYPE_PRECISION (node)) != -1)
+ && pow2p_hwi (TYPE_PRECISION (node)))
{
pp_string (pp, (TYPE_UNSIGNED (node) ? "uint" : "int"));
pp_decimal_int (pp, TYPE_PRECISION (node));
misalign = (misalign + offset) & (align - 1);
if (misalign != 0)
- align = (misalign & -misalign);
+ align = least_bit_hwi (misalign);
if (align != TYPE_ALIGN (exp_type))
exp_type = build_aligned_type (exp_type, align);
/* Trailing mask bits specify the alignment, trailing value
bits the misalignment. */
tem = val->mask.to_uhwi ();
- align = (tem & -tem);
+ align = least_bit_hwi (tem);
if (align > 1)
set_ptr_info_alignment (get_ptr_info (name), align,
(TREE_INT_CST_LOW (val->value)
unsigned HOST_WIDE_INT l
= (load_offset * BITS_PER_UNIT) & (align - 1);
if (l)
- align = l & -l;
+ align = least_bit_hwi (l);
}
}
if (tree_fits_uhwi_p (len)
&& (leni = tree_to_uhwi (len)) <= GET_MODE_SIZE (word_mode)
- && exact_log2 (leni) != -1)
+ && pow2p_hwi (leni))
{
leni *= CHAR_TYPE_SIZE;
unsigned align1 = get_pointer_alignment (arg1);
if (DR_IS_READ (dr)
&& (dr_step % type_size) == 0
&& groupsize > 0
- && exact_log2 (groupsize) != -1)
+ && pow2p_hwi (groupsize))
{
GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = stmt;
GROUP_SIZE (vinfo_for_stmt (stmt)) = groupsize;
else
{
/* If length is not equal to 3 then only power of 2 is supported. */
- gcc_assert (exact_log2 (count) != -1);
+ gcc_assert (pow2p_hwi (count));
for (i = 0; i < nelt / 2; i++)
{
else
{
/* If length is not equal to 3 then only power of 2 is supported. */
- gcc_assert (exact_log2 (length) != -1);
+ gcc_assert (pow2p_hwi (length));
for (i = 0, n = nelt / 2; i < n; i++)
{
else
{
/* If length is not equal to 3 then only power of 2 is supported. */
- gcc_assert (exact_log2 (count) != -1);
+ gcc_assert (pow2p_hwi (count));
for (i = 0; i < nelt; i++)
sel[i] = i * 2;
if (can_vec_perm_p (mode, false, sel))
else
{
/* If length is not equal to 3 then only power of 2 is supported. */
- gcc_assert (exact_log2 (length) != -1);
+ gcc_assert (pow2p_hwi (length));
for (i = 0; i < nelt; ++i)
sel[i] = i * 2;
memcpy (result_chain->address (), dr_chain.address (),
length * sizeof (tree));
- if (exact_log2 (length) != -1 && LOOP_VINFO_VECT_FACTOR (loop_vinfo) > 4)
+ if (pow2p_hwi (length) && LOOP_VINFO_VECT_FACTOR (loop_vinfo) > 4)
{
unsigned int j, log_length = exact_log2 (length);
for (i = 0; i < nelt / 2; ++i)
get chain for loads group using vect_shift_permute_load_chain. */
mode = TYPE_MODE (STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt)));
if (targetm.sched.reassociation_width (VEC_PERM_EXPR, mode) > 1
- || exact_log2 (size) != -1
+ || pow2p_hwi (size)
|| !vect_shift_permute_load_chain (dr_chain, size, stmt,
gsi, &result_chain))
vect_permute_load_chain (dr_chain, size, stmt, gsi, &result_chain);
|| (!has_vector_shift && pre_shift != -1))
{
if (has_vector_shift)
- pre_shift = floor_log2 (d & -d);
+ pre_shift = ctz_or_zero (d);
else if (pre_shift == -1)
{
unsigned int j;
for even divisors, using an initial right shift. */
if (mh != 0 && (d & 1) == 0)
{
- pre_shift = floor_log2 (d & -d);
+ pre_shift = ctz_or_zero (d);
mh = choose_multiplier (d >> pre_shift, prec, prec - pre_shift,
&ml, &post_shift, &dummy_int);
gcc_assert (!mh);
set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
misalign);
tree ptr = build_int_cst (TREE_TYPE (gimple_call_arg (stmt, 1)),
- misalign ? misalign & -misalign : align);
+ misalign ? least_bit_hwi (misalign) : align);
new_stmt
= gimple_build_call_internal (IFN_MASK_STORE, 4, dataref_ptr,
ptr, vec_mask, vec_rhs);
set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
misalign);
tree ptr = build_int_cst (TREE_TYPE (gimple_call_arg (stmt, 1)),
- misalign ? misalign & -misalign : align);
+ misalign ? least_bit_hwi (misalign) : align);
new_stmt
= gimple_build_call_internal (IFN_MASK_LOAD, 3, dataref_ptr,
ptr, vec_mask);
if ((align - 1) & bitpos)
{
align = (align - 1) & bitpos;
- align = align & -align;
+ align = least_bit_hwi (align);
}
expr = build_fold_addr_expr (unshare_expr (base));
expr = build2 (MEM_REF, char_type_node, expr,
negative_power_of_two_p (HOST_WIDE_INT i)
{
unsigned HOST_WIDE_INT x = -(unsigned HOST_WIDE_INT)i;
- return x == (x & -x);
+ return pow2_or_zerop (x);
}
/* Strip constant offsets and alignments off of LOC. Return the base
static inline unsigned
min_align (unsigned int a, unsigned int b)
{
- return (a | b) & -(a | b);
+ return least_bit_hwi (a | b);
}
/* Return the assembler directive for creating a given kind of integer