offset = INTVAL (XEXP (dest, 2));
dest = XEXP (dest, 0);
if (BITS_BIG_ENDIAN)
- offset = GET_MODE_BITSIZE (GET_MODE (dest)) - width - offset;
+ offset = GET_MODE_PRECISION (GET_MODE (dest)) - width - offset;
}
}
else
{
if (GET_CODE (dest) == STRICT_LOW_PART)
dest = XEXP (dest, 0);
- width = GET_MODE_BITSIZE (GET_MODE (dest));
+ width = GET_MODE_PRECISION (GET_MODE (dest));
offset = 0;
}
if (subreg_lowpart_p (dest))
;
/* Handle the case where inner is twice the size of outer. */
- else if (GET_MODE_BITSIZE (GET_MODE (SET_DEST (temp)))
- == 2 * GET_MODE_BITSIZE (GET_MODE (dest)))
- offset += GET_MODE_BITSIZE (GET_MODE (dest));
+ else if (GET_MODE_PRECISION (GET_MODE (SET_DEST (temp)))
+ == 2 * GET_MODE_PRECISION (GET_MODE (dest)))
+ offset += GET_MODE_PRECISION (GET_MODE (dest));
/* Otherwise give up for now. */
else
offset = -1;
}
if (offset >= 0
- && (GET_MODE_BITSIZE (GET_MODE (SET_DEST (temp)))
+ && (GET_MODE_PRECISION (GET_MODE (SET_DEST (temp)))
<= HOST_BITS_PER_DOUBLE_INT))
{
double_int m, o, i;
(REG_P (temp)
&& VEC_index (reg_stat_type, reg_stat,
REGNO (temp))->nonzero_bits != 0
- && GET_MODE_BITSIZE (GET_MODE (temp)) < BITS_PER_WORD
- && GET_MODE_BITSIZE (GET_MODE (temp)) < HOST_BITS_PER_INT
+ && GET_MODE_PRECISION (GET_MODE (temp)) < BITS_PER_WORD
+ && GET_MODE_PRECISION (GET_MODE (temp)) < HOST_BITS_PER_INT
&& (VEC_index (reg_stat_type, reg_stat,
REGNO (temp))->nonzero_bits
!= GET_MODE_MASK (word_mode))))
(REG_P (temp)
&& VEC_index (reg_stat_type, reg_stat,
REGNO (temp))->nonzero_bits != 0
- && GET_MODE_BITSIZE (GET_MODE (temp)) < BITS_PER_WORD
- && GET_MODE_BITSIZE (GET_MODE (temp)) < HOST_BITS_PER_INT
+ && GET_MODE_PRECISION (GET_MODE (temp)) < BITS_PER_WORD
+ && GET_MODE_PRECISION (GET_MODE (temp)) < HOST_BITS_PER_INT
&& (VEC_index (reg_stat_type, reg_stat,
REGNO (temp))->nonzero_bits
!= GET_MODE_MASK (word_mode)))))
&& CONST_INT_P (SET_SRC (x))
&& ((INTVAL (XEXP (SET_DEST (x), 1))
+ INTVAL (XEXP (SET_DEST (x), 2)))
- <= GET_MODE_BITSIZE (GET_MODE (XEXP (SET_DEST (x), 0))))
+ <= GET_MODE_PRECISION (GET_MODE (XEXP (SET_DEST (x), 0))))
&& ! side_effects_p (XEXP (SET_DEST (x), 0)))
{
HOST_WIDE_INT pos = INTVAL (XEXP (SET_DEST (x), 2));
rtx or_mask;
if (BITS_BIG_ENDIAN)
- pos = GET_MODE_BITSIZE (mode) - len - pos;
+ pos = GET_MODE_PRECISION (mode) - len - pos;
or_mask = gen_int_mode (src << pos, mode);
if (src == mask)
break;
pos = 0;
- len = GET_MODE_BITSIZE (GET_MODE (inner));
+ len = GET_MODE_PRECISION (GET_MODE (inner));
unsignedp = 0;
break;
pos = INTVAL (XEXP (SET_SRC (x), 2));
if (BITS_BIG_ENDIAN)
- pos = GET_MODE_BITSIZE (GET_MODE (inner)) - len - pos;
+ pos = GET_MODE_PRECISION (GET_MODE (inner)) - len - pos;
unsignedp = (code == ZERO_EXTRACT);
}
break;
break;
}
- if (len && pos >= 0 && pos + len <= GET_MODE_BITSIZE (GET_MODE (inner)))
+ if (len && pos >= 0
+ && pos + len <= GET_MODE_PRECISION (GET_MODE (inner)))
{
enum machine_mode mode = GET_MODE (SET_SRC (x));
(unsignedp ? LSHIFTRT : ASHIFTRT, mode,
gen_rtx_ASHIFT (mode,
gen_lowpart (mode, inner),
- GEN_INT (GET_MODE_BITSIZE (mode)
+ GEN_INT (GET_MODE_PRECISION (mode)
- len - pos)),
- GEN_INT (GET_MODE_BITSIZE (mode) - len)));
+ GEN_INT (GET_MODE_PRECISION (mode) - len)));
split = find_split_point (&SET_SRC (x), insn, true);
if (split && split != &SET_SRC (x))
if (GET_CODE (temp) == ASHIFTRT
&& CONST_INT_P (XEXP (temp, 1))
- && INTVAL (XEXP (temp, 1)) == GET_MODE_BITSIZE (mode) - 1)
+ && INTVAL (XEXP (temp, 1)) == GET_MODE_PRECISION (mode) - 1)
return simplify_shift_const (NULL_RTX, LSHIFTRT, mode, XEXP (temp, 0),
INTVAL (XEXP (temp, 1)));
rtx temp1 = simplify_shift_const
(NULL_RTX, ASHIFTRT, mode,
simplify_shift_const (NULL_RTX, ASHIFT, mode, temp,
- GET_MODE_BITSIZE (mode) - 1 - i),
- GET_MODE_BITSIZE (mode) - 1 - i);
+ GET_MODE_PRECISION (mode) - 1 - i),
+ GET_MODE_PRECISION (mode) - 1 - i);
/* If all we did was surround TEMP with the two shifts, we
haven't improved anything, so don't use it. Otherwise,
&& (UINTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1))
== ((unsigned HOST_WIDE_INT) 1 << (i + 1)) - 1))
|| (GET_CODE (XEXP (XEXP (x, 0), 0)) == ZERO_EXTEND
- && (GET_MODE_BITSIZE (GET_MODE (XEXP (XEXP (XEXP (x, 0), 0), 0)))
+ && (GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (XEXP (x, 0), 0), 0)))
== (unsigned int) i + 1))))
return simplify_shift_const
(NULL_RTX, ASHIFTRT, mode,
simplify_shift_const (NULL_RTX, ASHIFT, mode,
XEXP (XEXP (XEXP (x, 0), 0), 0),
- GET_MODE_BITSIZE (mode) - (i + 1)),
- GET_MODE_BITSIZE (mode) - (i + 1));
+ GET_MODE_PRECISION (mode) - (i + 1)),
+ GET_MODE_PRECISION (mode) - (i + 1));
/* If only the low-order bit of X is possibly nonzero, (plus x -1)
can become (ashiftrt (ashift (xor x 1) C) C) where C is
return simplify_shift_const (NULL_RTX, ASHIFTRT, mode,
simplify_shift_const (NULL_RTX, ASHIFT, mode,
gen_rtx_XOR (mode, XEXP (x, 0), const1_rtx),
- GET_MODE_BITSIZE (mode) - 1),
- GET_MODE_BITSIZE (mode) - 1);
+ GET_MODE_PRECISION (mode) - 1),
+ GET_MODE_PRECISION (mode) - 1);
/* If we are adding two things that have no bits in common, convert
the addition into an IOR. This will often be further simplified,
&& op1 == const0_rtx
&& mode == GET_MODE (op0)
&& (num_sign_bit_copies (op0, mode)
- == GET_MODE_BITSIZE (mode)))
+ == GET_MODE_PRECISION (mode)))
{
op0 = expand_compound_operation (op0);
return simplify_gen_unary (NEG, mode,
&& op1 == const0_rtx
&& mode == GET_MODE (op0)
&& (num_sign_bit_copies (op0, mode)
- == GET_MODE_BITSIZE (mode)))
+ == GET_MODE_PRECISION (mode)))
{
op0 = expand_compound_operation (op0);
return plus_constant (gen_lowpart (mode, op0), 1);
&& new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
&& op1 == const0_rtx
&& (num_sign_bit_copies (op0, mode)
- == GET_MODE_BITSIZE (mode)))
+ == GET_MODE_PRECISION (mode)))
return gen_lowpart (mode,
expand_compound_operation (op0));
&& op1 == const0_rtx
&& mode == GET_MODE (op0)
&& (num_sign_bit_copies (op0, mode)
- == GET_MODE_BITSIZE (mode)))
+ == GET_MODE_PRECISION (mode)))
{
op0 = expand_compound_operation (op0);
return simplify_gen_unary (NOT, mode,
{
x = simplify_shift_const (NULL_RTX, ASHIFT, mode,
expand_compound_operation (op0),
- GET_MODE_BITSIZE (mode) - 1 - i);
+ GET_MODE_PRECISION (mode) - 1 - i);
if (GET_CODE (x) == AND && XEXP (x, 1) == const_true_rtx)
return XEXP (x, 0);
else
}
else if (true_code == EQ && true_val == const0_rtx
&& (num_sign_bit_copies (from, GET_MODE (from))
- == GET_MODE_BITSIZE (GET_MODE (from))))
+ == GET_MODE_PRECISION (GET_MODE (from))))
{
false_code = EQ;
false_val = constm1_rtx;
&& rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
&& (num_sign_bit_copies (f, GET_MODE (f))
> (unsigned int)
- (GET_MODE_BITSIZE (mode)
- - GET_MODE_BITSIZE (GET_MODE (XEXP (XEXP (t, 0), 0))))))
+ (GET_MODE_PRECISION (mode)
+ - GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (t, 0), 0))))))
{
c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0));
extend_op = SIGN_EXTEND;
&& rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
&& (num_sign_bit_copies (f, GET_MODE (f))
> (unsigned int)
- (GET_MODE_BITSIZE (mode)
- - GET_MODE_BITSIZE (GET_MODE (XEXP (XEXP (t, 0), 1))))))
+ (GET_MODE_PRECISION (mode)
+ - GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (t, 0), 1))))))
{
c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0));
extend_op = SIGN_EXTEND;
&& ((1 == nonzero_bits (XEXP (cond, 0), mode)
&& (i = exact_log2 (UINTVAL (true_rtx))) >= 0)
|| ((num_sign_bit_copies (XEXP (cond, 0), mode)
- == GET_MODE_BITSIZE (mode))
+ == GET_MODE_PRECISION (mode))
&& (i = exact_log2 (-UINTVAL (true_rtx))) >= 0)))
return
simplify_shift_const (NULL_RTX, ASHIFT, mode,
if (dest == cc0_rtx
&& GET_CODE (src) == SUBREG
&& subreg_lowpart_p (src)
- && (GET_MODE_BITSIZE (GET_MODE (src))
- < GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (src)))))
+ && (GET_MODE_PRECISION (GET_MODE (src))
+ < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (src)))))
{
rtx inner = SUBREG_REG (src);
enum machine_mode inner_mode = GET_MODE (inner);
#endif
&& (num_sign_bit_copies (XEXP (XEXP (src, 0), 0),
GET_MODE (XEXP (XEXP (src, 0), 0)))
- == GET_MODE_BITSIZE (GET_MODE (XEXP (XEXP (src, 0), 0))))
+ == GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (src, 0), 0))))
&& ! side_effects_p (src))
{
rtx true_rtx = (GET_CODE (XEXP (src, 0)) == NE
if (! SCALAR_INT_MODE_P (GET_MODE (XEXP (x, 0))))
return x;
- len = GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)));
+ len = GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)));
/* If the inner object has VOIDmode (the only way this can happen
is if it is an ASM_OPERANDS), we can't do anything since we don't
know how much masking to do. */
pos = INTVAL (XEXP (x, 2));
/* This should stay within the object being extracted, fail otherwise. */
- if (len + pos > GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0))))
+ if (len + pos > GET_MODE_PRECISION (GET_MODE (XEXP (x, 0))))
return x;
if (BITS_BIG_ENDIAN)
- pos = GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0))) - len - pos;
+ pos = GET_MODE_PRECISION (GET_MODE (XEXP (x, 0))) - len - pos;
break;
if (GET_CODE (XEXP (x, 0)) == TRUNCATE
&& GET_MODE (XEXP (XEXP (x, 0), 0)) == GET_MODE (x)
&& COMPARISON_P (XEXP (XEXP (x, 0), 0))
- && (GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)))
+ && (GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)))
<= HOST_BITS_PER_WIDE_INT)
&& (STORE_FLAG_VALUE & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
return XEXP (XEXP (x, 0), 0);
&& GET_MODE (SUBREG_REG (XEXP (x, 0))) == GET_MODE (x)
&& subreg_lowpart_p (XEXP (x, 0))
&& COMPARISON_P (SUBREG_REG (XEXP (x, 0)))
- && (GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)))
+ && (GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)))
<= HOST_BITS_PER_WIDE_INT)
&& (STORE_FLAG_VALUE & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
return SUBREG_REG (XEXP (x, 0));
extraction. Then the constant of 31 would be substituted in
to produce such a position. */
- modewidth = GET_MODE_BITSIZE (GET_MODE (x));
+ modewidth = GET_MODE_PRECISION (GET_MODE (x));
if (modewidth >= pos + len)
{
enum machine_mode mode = GET_MODE (x);
&& GET_CODE (XEXP (SET_DEST (x), 0)) == SUBREG)
{
inner = SUBREG_REG (XEXP (SET_DEST (x), 0));
- len = GET_MODE_BITSIZE (GET_MODE (XEXP (SET_DEST (x), 0)));
+ len = GET_MODE_PRECISION (GET_MODE (XEXP (SET_DEST (x), 0)));
pos = GEN_INT (subreg_lsb (XEXP (SET_DEST (x), 0)));
}
else if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
/* A constant position should stay within the width of INNER. */
if (CONST_INT_P (pos)
- && INTVAL (pos) + len > GET_MODE_BITSIZE (GET_MODE (inner)))
+ && INTVAL (pos) + len > GET_MODE_PRECISION (GET_MODE (inner)))
break;
if (BITS_BIG_ENDIAN)
{
if (CONST_INT_P (pos))
- pos = GEN_INT (GET_MODE_BITSIZE (GET_MODE (inner)) - len
+ pos = GEN_INT (GET_MODE_PRECISION (GET_MODE (inner)) - len
- INTVAL (pos));
else if (GET_CODE (pos) == MINUS
&& CONST_INT_P (XEXP (pos, 1))
&& (INTVAL (XEXP (pos, 1))
- == GET_MODE_BITSIZE (GET_MODE (inner)) - len))
+ == GET_MODE_PRECISION (GET_MODE (inner)) - len))
/* If position is ADJUST - X, new position is X. */
pos = XEXP (pos, 0);
else
pos = simplify_gen_binary (MINUS, GET_MODE (pos),
- GEN_INT (GET_MODE_BITSIZE (
+ GEN_INT (GET_MODE_PRECISION (
GET_MODE (inner))
- len),
pos);
: BITS_PER_UNIT)) == 0
/* We can't do this if we are widening INNER_MODE (it
may not be aligned, for one thing). */
- && GET_MODE_BITSIZE (inner_mode) >= GET_MODE_BITSIZE (tmode)
+ && GET_MODE_PRECISION (inner_mode) >= GET_MODE_PRECISION (tmode)
&& (inner_mode == tmode
|| (! mode_dependent_address_p (XEXP (inner, 0))
&& ! MEM_VOLATILE_P (inner))))))
/* POS counts from lsb, but make OFFSET count in memory order. */
if (BYTES_BIG_ENDIAN)
- offset = (GET_MODE_BITSIZE (is_mode) - len - pos) / BITS_PER_UNIT;
+ offset = (GET_MODE_PRECISION (is_mode) - len - pos) / BITS_PER_UNIT;
else
offset = pos / BITS_PER_UNIT;
other cases, we would only be going outside our object in cases when
an original shift would have been undefined. */
if (MEM_P (inner)
- && ((pos_rtx == 0 && pos + len > GET_MODE_BITSIZE (is_mode))
+ && ((pos_rtx == 0 && pos + len > GET_MODE_PRECISION (is_mode))
|| (pos_rtx != 0 && len != 1)))
return 0;
{
enum rtx_code code = GET_CODE (x);
enum machine_mode mode = GET_MODE (x);
- int mode_width = GET_MODE_BITSIZE (mode);
+ int mode_width = GET_MODE_PRECISION (mode);
rtx rhs, lhs;
enum rtx_code next_code;
int i, j;
{
new_rtx = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code);
new_rtx = make_extraction (mode, new_rtx,
- (GET_MODE_BITSIZE (mode)
+ (GET_MODE_PRECISION (mode)
- INTVAL (XEXP (XEXP (x, 0), 1))),
NULL_RTX, i, 1, 0, in_code == COMPARE);
}
/* It is not valid to do a right-shift in a narrower mode
than the one it came in with. */
if ((code == LSHIFTRT || code == ASHIFTRT)
- && GET_MODE_BITSIZE (mode) < GET_MODE_BITSIZE (GET_MODE (x)))
+ && GET_MODE_PRECISION (mode) < GET_MODE_PRECISION (GET_MODE (x)))
op_mode = GET_MODE (x);
/* Truncate MASK to fit OP_MODE. */
unsigned HOST_WIDE_INT cval
= UINTVAL (XEXP (x, 1))
| (GET_MODE_MASK (GET_MODE (x)) & ~mask);
- int width = GET_MODE_BITSIZE (GET_MODE (x));
+ int width = GET_MODE_PRECISION (GET_MODE (x));
rtx y;
/* If MODE is narrower than HOST_WIDE_INT and CVAL is a negative
This may eliminate that PLUS and, later, the AND. */
{
- unsigned int width = GET_MODE_BITSIZE (mode);
+ unsigned int width = GET_MODE_PRECISION (mode);
unsigned HOST_WIDE_INT smask = mask;
/* If MODE is narrower than HOST_WIDE_INT and mask is a negative
&& CONST_INT_P (XEXP (x, 1))
&& ((INTVAL (XEXP (XEXP (x, 0), 1))
+ floor_log2 (INTVAL (XEXP (x, 1))))
- < GET_MODE_BITSIZE (GET_MODE (x)))
+ < GET_MODE_PRECISION (GET_MODE (x)))
&& (UINTVAL (XEXP (x, 1))
& ~nonzero_bits (XEXP (x, 0), GET_MODE (x))) == 0)
{
if (! (CONST_INT_P (XEXP (x, 1))
&& INTVAL (XEXP (x, 1)) >= 0
- && INTVAL (XEXP (x, 1)) < GET_MODE_BITSIZE (mode))
+ && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (mode))
&& ! (GET_MODE (XEXP (x, 1)) != VOIDmode
&& (nonzero_bits (XEXP (x, 1), GET_MODE (XEXP (x, 1)))
- < (unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (mode))))
+ < (unsigned HOST_WIDE_INT) GET_MODE_PRECISION (mode))))
break;
/* If the shift count is a constant and we can do arithmetic in
conservative form of the mask. */
if (CONST_INT_P (XEXP (x, 1))
&& INTVAL (XEXP (x, 1)) >= 0
- && INTVAL (XEXP (x, 1)) < GET_MODE_BITSIZE (op_mode)
+ && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (op_mode)
&& HWI_COMPUTABLE_MODE_P (op_mode))
mask >>= INTVAL (XEXP (x, 1));
else
bit. */
&& ((INTVAL (XEXP (x, 1))
+ num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0))))
- >= GET_MODE_BITSIZE (GET_MODE (x)))
+ >= GET_MODE_PRECISION (GET_MODE (x)))
&& exact_log2 (mask + 1) >= 0
/* Number of bits left after the shift must be more than the mask
needs. */
&& ((INTVAL (XEXP (x, 1)) + exact_log2 (mask + 1))
- <= GET_MODE_BITSIZE (GET_MODE (x)))
+ <= GET_MODE_PRECISION (GET_MODE (x)))
/* Must be more sign bit copies than the mask needs. */
&& ((int) num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0)))
>= exact_log2 (mask + 1)))
x = simplify_gen_binary (LSHIFTRT, GET_MODE (x), XEXP (x, 0),
- GEN_INT (GET_MODE_BITSIZE (GET_MODE (x))
+ GEN_INT (GET_MODE_PRECISION (GET_MODE (x))
- exact_log2 (mask + 1)));
goto shiftrt;
represent a mask for all its bits in a single scalar.
But we only care about the lower bits, so calculate these. */
- if (GET_MODE_BITSIZE (GET_MODE (x)) > HOST_BITS_PER_WIDE_INT)
+ if (GET_MODE_PRECISION (GET_MODE (x)) > HOST_BITS_PER_WIDE_INT)
{
nonzero = ~(unsigned HOST_WIDE_INT) 0;
- /* GET_MODE_BITSIZE (GET_MODE (x)) - INTVAL (XEXP (x, 1))
+ /* GET_MODE_PRECISION (GET_MODE (x)) - INTVAL (XEXP (x, 1))
is the number of bits a full-width mask would have set.
We need only shift if these are fewer than nonzero can
hold. If not, we must keep all bits set in nonzero. */
- if (GET_MODE_BITSIZE (GET_MODE (x)) - INTVAL (XEXP (x, 1))
+ if (GET_MODE_PRECISION (GET_MODE (x)) - INTVAL (XEXP (x, 1))
< HOST_BITS_PER_WIDE_INT)
nonzero >>= INTVAL (XEXP (x, 1))
+ HOST_BITS_PER_WIDE_INT
- - GET_MODE_BITSIZE (GET_MODE (x)) ;
+ - GET_MODE_PRECISION (GET_MODE (x)) ;
}
else
{
{
x = simplify_shift_const
(NULL_RTX, LSHIFTRT, GET_MODE (x), XEXP (x, 0),
- GET_MODE_BITSIZE (GET_MODE (x)) - 1 - i);
+ GET_MODE_PRECISION (GET_MODE (x)) - 1 - i);
if (GET_CODE (x) != ASHIFTRT)
return force_to_mode (x, mode, mask, next_select);
&& CONST_INT_P (XEXP (x, 1))
&& INTVAL (XEXP (x, 1)) >= 0
&& (INTVAL (XEXP (x, 1))
- <= GET_MODE_BITSIZE (GET_MODE (x)) - (floor_log2 (mask) + 1))
+ <= GET_MODE_PRECISION (GET_MODE (x)) - (floor_log2 (mask) + 1))
&& GET_CODE (XEXP (x, 0)) == ASHIFT
&& XEXP (XEXP (x, 0), 1) == XEXP (x, 1))
return force_to_mode (XEXP (XEXP (x, 0), 0), mode, mask,
&& CONST_INT_P (XEXP (XEXP (x, 0), 1))
&& INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
&& (INTVAL (XEXP (XEXP (x, 0), 1)) + floor_log2 (mask)
- < GET_MODE_BITSIZE (GET_MODE (x)))
+ < GET_MODE_PRECISION (GET_MODE (x)))
&& INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT)
{
temp = gen_int_mode (mask << INTVAL (XEXP (XEXP (x, 0), 1)),
false values when testing X. */
else if (x == constm1_rtx || x == const0_rtx
|| (mode != VOIDmode
- && num_sign_bit_copies (x, mode) == GET_MODE_BITSIZE (mode)))
+ && num_sign_bit_copies (x, mode) == GET_MODE_PRECISION (mode)))
{
*ptrue = constm1_rtx, *pfalse = const0_rtx;
return x;
return x;
pos = get_pos_from_mask ((~c1) & GET_MODE_MASK (GET_MODE (dest)), &len);
- if (pos < 0 || pos + len > GET_MODE_BITSIZE (GET_MODE (dest))
- || GET_MODE_BITSIZE (GET_MODE (dest)) > HOST_BITS_PER_WIDE_INT
+ if (pos < 0 || pos + len > GET_MODE_PRECISION (GET_MODE (dest))
+ || GET_MODE_PRECISION (GET_MODE (dest)) > HOST_BITS_PER_WIDE_INT
|| (c1 & nonzero_bits (other, GET_MODE (dest))) != 0)
return x;
other, pos),
dest);
src = force_to_mode (src, mode,
- GET_MODE_BITSIZE (mode) >= HOST_BITS_PER_WIDE_INT
+ GET_MODE_PRECISION (mode) >= HOST_BITS_PER_WIDE_INT
? ~(unsigned HOST_WIDE_INT) 0
: ((unsigned HOST_WIDE_INT) 1 << len) - 1,
0);
{
unsigned HOST_WIDE_INT mask = rsp->nonzero_bits;
- if (GET_MODE_BITSIZE (GET_MODE (x)) < GET_MODE_BITSIZE (mode))
+ if (GET_MODE_PRECISION (GET_MODE (x)) < GET_MODE_PRECISION (mode))
/* We don't know anything about the upper bits. */
mask |= GET_MODE_MASK (mode) ^ GET_MODE_MASK (GET_MODE (x));
*nonzero &= mask;
return tem;
if (nonzero_sign_valid && rsp->sign_bit_copies != 0
- && GET_MODE_BITSIZE (GET_MODE (x)) == GET_MODE_BITSIZE (mode))
+ && GET_MODE_PRECISION (GET_MODE (x)) == GET_MODE_PRECISION (mode))
*result = rsp->sign_bit_copies;
return NULL;
return (unsignedp
? (HWI_COMPUTABLE_MODE_P (mode)
- ? (unsigned int) (GET_MODE_BITSIZE (mode) - 1
+ ? (unsigned int) (GET_MODE_PRECISION (mode) - 1
- floor_log2 (nonzero_bits (x, mode)))
: 0)
: num_sign_bit_copies (x, mode) - 1);
{
if (orig_mode == mode)
return mode;
- gcc_assert (GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (orig_mode));
+ gcc_assert (GET_MODE_PRECISION (mode) > GET_MODE_PRECISION (orig_mode));
/* In general we can't perform in wider mode for right shift and rotate. */
switch (code)
/* We can still widen if the bits brought in from the left are identical
to the sign bit of ORIG_MODE. */
if (num_sign_bit_copies (op, mode)
- > (unsigned) (GET_MODE_BITSIZE (mode)
- - GET_MODE_BITSIZE (orig_mode)))
+ > (unsigned) (GET_MODE_PRECISION (mode)
+ - GET_MODE_PRECISION (orig_mode)))
return mode;
return orig_mode;
int care_bits = low_bitmask_len (orig_mode, outer_const);
if (care_bits >= 0
- && GET_MODE_BITSIZE (orig_mode) - care_bits >= count)
+ && GET_MODE_PRECISION (orig_mode) - care_bits >= count)
return mode;
}
/* fall through */
}
}
-/* Simplify a shift of VAROP by COUNT bits. CODE says what kind of shift.
- The result of the shift is RESULT_MODE. Return NULL_RTX if we cannot
- simplify it. Otherwise, return a simplified value.
+/* Simplify a shift of VAROP by ORIG_COUNT bits. CODE says what kind
+ of shift. The result of the shift is RESULT_MODE. Return NULL_RTX
+ if we cannot simplify it. Otherwise, return a simplified value.
The shift is normally computed in the widest mode we find in VAROP, as
long as it isn't a different number of words than RESULT_MODE. Exceptions
/* If we were given an invalid count, don't do anything except exactly
what was requested. */
- if (orig_count < 0 || orig_count >= (int) GET_MODE_BITSIZE (mode))
+ if (orig_count < 0 || orig_count >= (int) GET_MODE_PRECISION (mode))
return NULL_RTX;
count = orig_count;
/* Convert ROTATERT to ROTATE. */
if (code == ROTATERT)
{
- unsigned int bitsize = GET_MODE_BITSIZE (result_mode);;
+ unsigned int bitsize = GET_MODE_PRECISION (result_mode);
code = ROTATE;
if (VECTOR_MODE_P (result_mode))
count = bitsize / GET_MODE_NUNITS (result_mode) - count;
multiple operations, each of which are defined, we know what the
result is supposed to be. */
- if (count > (GET_MODE_BITSIZE (shift_mode) - 1))
+ if (count > (GET_MODE_PRECISION (shift_mode) - 1))
{
if (code == ASHIFTRT)
- count = GET_MODE_BITSIZE (shift_mode) - 1;
+ count = GET_MODE_PRECISION (shift_mode) - 1;
else if (code == ROTATE || code == ROTATERT)
- count %= GET_MODE_BITSIZE (shift_mode);
+ count %= GET_MODE_PRECISION (shift_mode);
else
{
/* We can't simply return zero because there may be an
is a no-op. */
if (code == ASHIFTRT
&& (num_sign_bit_copies (varop, shift_mode)
- == GET_MODE_BITSIZE (shift_mode)))
+ == GET_MODE_PRECISION (shift_mode)))
{
count = 0;
break;
if (code == ASHIFTRT
&& (count + num_sign_bit_copies (varop, shift_mode)
- >= GET_MODE_BITSIZE (shift_mode)))
- count = GET_MODE_BITSIZE (shift_mode) - 1;
+ >= GET_MODE_PRECISION (shift_mode)))
+ count = GET_MODE_PRECISION (shift_mode) - 1;
/* We simplify the tests below and elsewhere by converting
ASHIFTRT to LSHIFTRT if we know the sign bit is clear.
AND of a new shift with a mask. We compute the result below. */
if (CONST_INT_P (XEXP (varop, 1))
&& INTVAL (XEXP (varop, 1)) >= 0
- && INTVAL (XEXP (varop, 1)) < GET_MODE_BITSIZE (GET_MODE (varop))
+ && INTVAL (XEXP (varop, 1)) < GET_MODE_PRECISION (GET_MODE (varop))
&& HWI_COMPUTABLE_MODE_P (result_mode)
&& HWI_COMPUTABLE_MODE_P (mode)
&& !VECTOR_MODE_P (result_mode))
we have (ashift:M1 (subreg:M1 (ashiftrt:M2 FOO C1) 0) C2)
with C2 == GET_MODE_BITSIZE (M1) - GET_MODE_BITSIZE (M2),
we can convert it to
- (ashiftrt:M1 (ashift:M1 (and:M1 (subreg:M1 FOO 0 C2) C3) C1).
+ (ashiftrt:M1 (ashift:M1 (and:M1 (subreg:M1 FOO 0) C3) C2) C1).
This simplifies certain SIGN_EXTEND operations. */
if (code == ASHIFT && first_code == ASHIFTRT
- && count == (GET_MODE_BITSIZE (result_mode)
- - GET_MODE_BITSIZE (GET_MODE (varop))))
+ && count == (GET_MODE_PRECISION (result_mode)
+ - GET_MODE_PRECISION (GET_MODE (varop))))
{
/* C3 has the low-order C1 bits zero. */
if (code == ASHIFTRT
|| (code == ROTATE && first_code == ASHIFTRT)
- || GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT
+ || GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT
|| (GET_MODE (varop) != result_mode
&& (first_code == ASHIFTRT || first_code == LSHIFTRT
|| first_code == ROTATE
&& XEXP (XEXP (varop, 0), 1) == constm1_rtx
&& (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
&& (code == LSHIFTRT || code == ASHIFTRT)
- && count == (GET_MODE_BITSIZE (GET_MODE (varop)) - 1)
+ && count == (GET_MODE_PRECISION (GET_MODE (varop)) - 1)
&& rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
{
count = 0;
case EQ:
/* Convert (lshiftrt (eq FOO 0) C) to (xor FOO 1) if STORE_FLAG_VALUE
says that the sign bit can be tested, FOO has mode MODE, C is
- GET_MODE_BITSIZE (MODE) - 1, and FOO has only its low-order bit
+ GET_MODE_PRECISION (MODE) - 1, and FOO has only its low-order bit
that may be nonzero. */
if (code == LSHIFTRT
&& XEXP (varop, 1) == const0_rtx
&& GET_MODE (XEXP (varop, 0)) == result_mode
- && count == (GET_MODE_BITSIZE (result_mode) - 1)
+ && count == (GET_MODE_PRECISION (result_mode) - 1)
&& HWI_COMPUTABLE_MODE_P (result_mode)
&& STORE_FLAG_VALUE == -1
&& nonzero_bits (XEXP (varop, 0), result_mode) == 1
/* (lshiftrt (neg A) C) where A is either 0 or 1 and C is one less
than the number of bits in the mode is equivalent to A. */
if (code == LSHIFTRT
- && count == (GET_MODE_BITSIZE (result_mode) - 1)
+ && count == (GET_MODE_PRECISION (result_mode) - 1)
&& nonzero_bits (XEXP (varop, 0), result_mode) == 1)
{
varop = XEXP (varop, 0);
is one less than the number of bits in the mode is
equivalent to (xor A 1). */
if (code == LSHIFTRT
- && count == (GET_MODE_BITSIZE (result_mode) - 1)
+ && count == (GET_MODE_PRECISION (result_mode) - 1)
&& XEXP (varop, 1) == constm1_rtx
&& nonzero_bits (XEXP (varop, 0), result_mode) == 1
&& merge_outer_ops (&outer_op, &outer_const, XOR, 1, result_mode,
if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
&& GET_CODE (XEXP (varop, 0)) == ASHIFTRT
- && count == (GET_MODE_BITSIZE (GET_MODE (varop)) - 1)
+ && count == (GET_MODE_PRECISION (GET_MODE (varop)) - 1)
&& (code == LSHIFTRT || code == ASHIFTRT)
&& CONST_INT_P (XEXP (XEXP (varop, 0), 1))
&& INTVAL (XEXP (XEXP (varop, 0), 1)) == count
&& GET_CODE (XEXP (varop, 0)) == LSHIFTRT
&& CONST_INT_P (XEXP (XEXP (varop, 0), 1))
&& (INTVAL (XEXP (XEXP (varop, 0), 1))
- >= (GET_MODE_BITSIZE (GET_MODE (XEXP (varop, 0)))
- - GET_MODE_BITSIZE (GET_MODE (varop)))))
+ >= (GET_MODE_PRECISION (GET_MODE (XEXP (varop, 0)))
+ - GET_MODE_PRECISION (GET_MODE (varop)))))
{
rtx varop_inner = XEXP (varop, 0);
if (outer_op != UNKNOWN)
{
if (GET_RTX_CLASS (outer_op) != RTX_UNARY
- && GET_MODE_BITSIZE (result_mode) < HOST_BITS_PER_WIDE_INT)
+ && GET_MODE_PRECISION (result_mode) < HOST_BITS_PER_WIDE_INT)
outer_const = trunc_int_for_mode (outer_const, result_mode);
if (outer_op == AND)
simplify_compare_const (enum rtx_code code, rtx op0, rtx *pop1)
{
enum machine_mode mode = GET_MODE (op0);
- unsigned int mode_width = GET_MODE_BITSIZE (mode);
+ unsigned int mode_width = GET_MODE_PRECISION (mode);
HOST_WIDE_INT const_op = INTVAL (*pop1);
/* Get the constant we are comparing against and turn off all bits
&& XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1)
&& XEXP (op0, 1) == XEXP (XEXP (op1, 0), 1)
&& (INTVAL (XEXP (op0, 1))
- == (GET_MODE_BITSIZE (GET_MODE (op0))
- - (GET_MODE_BITSIZE
+ == (GET_MODE_PRECISION (GET_MODE (op0))
+ - (GET_MODE_PRECISION
(GET_MODE (SUBREG_REG (XEXP (XEXP (op0, 0), 0))))))))
{
op0 = SUBREG_REG (XEXP (XEXP (op0, 0), 0));
&& GET_CODE (inner_op1) == SUBREG
&& (GET_MODE (SUBREG_REG (inner_op0))
== GET_MODE (SUBREG_REG (inner_op1)))
- && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (inner_op0)))
+ && (GET_MODE_PRECISION (GET_MODE (SUBREG_REG (inner_op0)))
<= HOST_BITS_PER_WIDE_INT)
&& (0 == ((~c0) & nonzero_bits (SUBREG_REG (inner_op0),
GET_MODE (SUBREG_REG (inner_op0)))))
while (CONST_INT_P (op1))
{
enum machine_mode mode = GET_MODE (op0);
- unsigned int mode_width = GET_MODE_BITSIZE (mode);
+ unsigned int mode_width = GET_MODE_PRECISION (mode);
unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
int equality_comparison_p;
int sign_bit_comparison_p;
if (sign_bit_comparison_p && HWI_COMPUTABLE_MODE_P (mode))
op0 = force_to_mode (op0, mode,
(unsigned HOST_WIDE_INT) 1
- << (GET_MODE_BITSIZE (mode) - 1),
+ << (GET_MODE_PRECISION (mode) - 1),
0);
/* Now try cases based on the opcode of OP0. If none of the cases
else
{
mode = new_mode;
- i = (GET_MODE_BITSIZE (mode) - 1 - i);
+ i = (GET_MODE_PRECISION (mode) - 1 - i);
}
}
if (mode_width <= HOST_BITS_PER_WIDE_INT
&& subreg_lowpart_p (op0)
- && GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0))) > mode_width
+ && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op0))) > mode_width
&& GET_CODE (SUBREG_REG (op0)) == PLUS
&& CONST_INT_P (XEXP (SUBREG_REG (op0), 1)))
{
/* (A - C1) sign-extends if it is positive and 1-extends
if it is negative, C2 both sign- and 1-extends. */
|| (num_sign_bit_copies (a, inner_mode)
- > (unsigned int) (GET_MODE_BITSIZE (inner_mode)
+ > (unsigned int) (GET_MODE_PRECISION (inner_mode)
- mode_width)
&& const_op < 0)))
|| ((unsigned HOST_WIDE_INT) c1
< (unsigned HOST_WIDE_INT) 1 << (mode_width - 2)
/* (A - C1) always sign-extends, like C2. */
&& num_sign_bit_copies (a, inner_mode)
- > (unsigned int) (GET_MODE_BITSIZE (inner_mode)
+ > (unsigned int) (GET_MODE_PRECISION (inner_mode)
- (mode_width - 1))))
{
op0 = SUBREG_REG (op0);
/* If the inner mode is narrower and we are extracting the low part,
we can treat the SUBREG as if it were a ZERO_EXTEND. */
if (subreg_lowpart_p (op0)
- && GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0))) < mode_width)
+ && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op0))) < mode_width)
/* Fall through */ ;
else
break;
the code has been changed. */
&& (0
#ifdef WORD_REGISTER_OPERATIONS
- || (mode_width > GET_MODE_BITSIZE (tmode)
+ || (mode_width > GET_MODE_PRECISION (tmode)
&& mode_width <= BITS_PER_WORD)
#endif
- || (mode_width <= GET_MODE_BITSIZE (tmode)
+ || (mode_width <= GET_MODE_PRECISION (tmode)
&& subreg_lowpart_p (XEXP (op0, 0))))
&& CONST_INT_P (XEXP (op0, 1))
&& mode_width <= HOST_BITS_PER_WIDE_INT
op1 = gen_lowpart (GET_MODE (op0), op1);
}
}
- else if ((GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0)))
+ else if ((GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op0)))
<= HOST_BITS_PER_WIDE_INT)
&& (nonzero_bits (SUBREG_REG (op0),
GET_MODE (SUBREG_REG (op0)))
if (zero_extended
|| ((num_sign_bit_copies (op0, tmode)
- > (unsigned int) (GET_MODE_BITSIZE (tmode)
- - GET_MODE_BITSIZE (mode)))
+ > (unsigned int) (GET_MODE_PRECISION (tmode)
+ - GET_MODE_PRECISION (mode)))
&& (num_sign_bit_copies (op1, tmode)
- > (unsigned int) (GET_MODE_BITSIZE (tmode)
- - GET_MODE_BITSIZE (mode)))))
+ > (unsigned int) (GET_MODE_PRECISION (tmode)
+ - GET_MODE_PRECISION (mode)))))
{
/* If OP0 is an AND and we don't have an AND in MODE either,
make a new AND in the proper mode. */
else if (GET_CODE (setter) == SET
&& GET_CODE (SET_DEST (setter)) == SUBREG
&& SUBREG_REG (SET_DEST (setter)) == dest
- && GET_MODE_BITSIZE (GET_MODE (dest)) <= BITS_PER_WORD
+ && GET_MODE_PRECISION (GET_MODE (dest)) <= BITS_PER_WORD
&& subreg_lowpart_p (SET_DEST (setter)))
record_value_for_reg (dest, record_dead_insn,
gen_lowpart (GET_MODE (dest),
unsigned int regno = REGNO (SUBREG_REG (subreg));
enum machine_mode mode = GET_MODE (subreg);
- if (GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT)
+ if (GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT)
return;
for (links = LOG_LINKS (insn); links;)
unsigned int word;
/* A paradoxical subreg begins at bit position 0. */
- if (GET_MODE_BITSIZE (outer_mode) > GET_MODE_BITSIZE (inner_mode))
+ if (GET_MODE_PRECISION (outer_mode) > GET_MODE_PRECISION (inner_mode))
return 0;
if (WORDS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
/* Paradoxical subregs are otherwise valid. */
if (!rknown
&& offset == 0
- && GET_MODE_SIZE (ymode) > GET_MODE_SIZE (xmode))
+ && GET_MODE_PRECISION (ymode) > GET_MODE_PRECISION (xmode))
{
info->representable_p = true;
/* If this is a big endian paradoxical subreg, which uses more
unsigned HOST_WIDE_INT inner_nz;
enum rtx_code code;
enum machine_mode inner_mode;
- unsigned int mode_width = GET_MODE_BITSIZE (mode);
+ unsigned int mode_width = GET_MODE_PRECISION (mode);
/* For floating-point and vector values, assume all bits are needed. */
if (FLOAT_MODE_P (GET_MODE (x)) || FLOAT_MODE_P (mode)
return nonzero;
/* If X is wider than MODE, use its mode instead. */
- if (GET_MODE_BITSIZE (GET_MODE (x)) > mode_width)
+ if (GET_MODE_PRECISION (GET_MODE (x)) > mode_width)
{
mode = GET_MODE (x);
nonzero = GET_MODE_MASK (mode);
- mode_width = GET_MODE_BITSIZE (mode);
+ mode_width = GET_MODE_PRECISION (mode);
}
if (mode_width > HOST_BITS_PER_WIDE_INT)
not known to be zero. */
if (GET_MODE (x) != VOIDmode && GET_MODE (x) != mode
- && GET_MODE_BITSIZE (GET_MODE (x)) <= BITS_PER_WORD
- && GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT
- && GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (GET_MODE (x)))
+ && GET_MODE_PRECISION (GET_MODE (x)) <= BITS_PER_WORD
+ && GET_MODE_PRECISION (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT
+ && GET_MODE_PRECISION (mode) > GET_MODE_PRECISION (GET_MODE (x)))
{
nonzero &= cached_nonzero_bits (x, GET_MODE (x),
known_x, known_mode, known_ret);
/* Disabled to avoid exponential mutual recursion between nonzero_bits
and num_sign_bit_copies. */
if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x))
- == GET_MODE_BITSIZE (GET_MODE (x)))
+ == GET_MODE_PRECISION (GET_MODE (x)))
nonzero = 1;
#endif
/* Disabled to avoid exponential mutual recursion between nonzero_bits
and num_sign_bit_copies. */
if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x))
- == GET_MODE_BITSIZE (GET_MODE (x)))
+ == GET_MODE_PRECISION (GET_MODE (x)))
nonzero = 1;
#endif
break;
unsigned HOST_WIDE_INT nz1
= cached_nonzero_bits (XEXP (x, 1), mode,
known_x, known_mode, known_ret);
- int sign_index = GET_MODE_BITSIZE (GET_MODE (x)) - 1;
+ int sign_index = GET_MODE_PRECISION (GET_MODE (x)) - 1;
int width0 = floor_log2 (nz0) + 1;
int width1 = floor_log2 (nz1) + 1;
int low0 = floor_log2 (nz0 & -nz0);
/* If the inner mode is a single word for both the host and target
machines, we can compute this from which bits of the inner
object might be nonzero. */
- if (GET_MODE_BITSIZE (inner_mode) <= BITS_PER_WORD
- && (GET_MODE_BITSIZE (inner_mode) <= HOST_BITS_PER_WIDE_INT))
+ if (GET_MODE_PRECISION (inner_mode) <= BITS_PER_WORD
+ && (GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT))
{
nonzero &= cached_nonzero_bits (SUBREG_REG (x), mode,
known_x, known_mode, known_ret);
/* On many CISC machines, accessing an object in a wider mode
causes the high-order bits to become undefined. So they are
not known to be zero. */
- if (GET_MODE_SIZE (GET_MODE (x))
- > GET_MODE_SIZE (inner_mode))
+ if (GET_MODE_PRECISION (GET_MODE (x))
+ > GET_MODE_PRECISION (inner_mode))
nonzero |= (GET_MODE_MASK (GET_MODE (x))
& ~GET_MODE_MASK (inner_mode));
}
if (CONST_INT_P (XEXP (x, 1))
&& INTVAL (XEXP (x, 1)) >= 0
&& INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
- && INTVAL (XEXP (x, 1)) < GET_MODE_BITSIZE (GET_MODE (x)))
+ && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (GET_MODE (x)))
{
enum machine_mode inner_mode = GET_MODE (x);
- unsigned int width = GET_MODE_BITSIZE (inner_mode);
+ unsigned int width = GET_MODE_PRECISION (inner_mode);
int count = INTVAL (XEXP (x, 1));
unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (inner_mode);
unsigned HOST_WIDE_INT op_nonzero
unsigned int known_ret)
{
enum rtx_code code = GET_CODE (x);
- unsigned int bitwidth = GET_MODE_BITSIZE (mode);
+ unsigned int bitwidth = GET_MODE_PRECISION (mode);
int num0, num1, result;
unsigned HOST_WIDE_INT nonzero;
return 1;
/* For a smaller object, just ignore the high bits. */
- if (bitwidth < GET_MODE_BITSIZE (GET_MODE (x)))
+ if (bitwidth < GET_MODE_PRECISION (GET_MODE (x)))
{
num0 = cached_num_sign_bit_copies (x, GET_MODE (x),
known_x, known_mode, known_ret);
return MAX (1,
- num0 - (int) (GET_MODE_BITSIZE (GET_MODE (x)) - bitwidth));
+ num0 - (int) (GET_MODE_PRECISION (GET_MODE (x)) - bitwidth));
}
- if (GET_MODE (x) != VOIDmode && bitwidth > GET_MODE_BITSIZE (GET_MODE (x)))
+ if (GET_MODE (x) != VOIDmode && bitwidth > GET_MODE_PRECISION (GET_MODE (x)))
{
#ifndef WORD_REGISTER_OPERATIONS
- /* If this machine does not do all register operations on the entire
- register and MODE is wider than the mode of X, we can say nothing
- at all about the high-order bits. */
+ /* If this machine does not do all register operations on the entire
+ register and MODE is wider than the mode of X, we can say nothing
+ at all about the high-order bits. */
return 1;
#else
/* Likewise on machines that do, if the mode of the object is smaller
than a word and loads of that size don't sign extend, we can say
nothing about the high order bits. */
- if (GET_MODE_BITSIZE (GET_MODE (x)) < BITS_PER_WORD
+ if (GET_MODE_PRECISION (GET_MODE (x)) < BITS_PER_WORD
#ifdef LOAD_EXTEND_OP
&& LOAD_EXTEND_OP (GET_MODE (x)) != SIGN_EXTEND
#endif
if (target_default_pointer_address_modes_p ()
&& ! POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode
&& mode == Pmode && REG_POINTER (x))
- return GET_MODE_BITSIZE (Pmode) - GET_MODE_BITSIZE (ptr_mode) + 1;
+ return GET_MODE_PRECISION (Pmode) - GET_MODE_PRECISION (ptr_mode) + 1;
#endif
{
/* Some RISC machines sign-extend all loads of smaller than a word. */
if (LOAD_EXTEND_OP (GET_MODE (x)) == SIGN_EXTEND)
return MAX (1, ((int) bitwidth
- - (int) GET_MODE_BITSIZE (GET_MODE (x)) + 1));
+ - (int) GET_MODE_PRECISION (GET_MODE (x)) + 1));
#endif
break;
num0 = cached_num_sign_bit_copies (SUBREG_REG (x), mode,
known_x, known_mode, known_ret);
return MAX ((int) bitwidth
- - (int) GET_MODE_BITSIZE (GET_MODE (x)) + 1,
+ - (int) GET_MODE_PRECISION (GET_MODE (x)) + 1,
num0);
}
/* For a smaller object, just ignore the high bits. */
- if (bitwidth <= GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x))))
+ if (bitwidth <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (x))))
{
num0 = cached_num_sign_bit_copies (SUBREG_REG (x), VOIDmode,
known_x, known_mode, known_ret);
return MAX (1, (num0
- - (int) (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x)))
+ - (int) (GET_MODE_PRECISION (GET_MODE (SUBREG_REG (x)))
- bitwidth)));
}
break;
case SIGN_EXTEND:
- return (bitwidth - GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)))
+ return (bitwidth - GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)))
+ cached_num_sign_bit_copies (XEXP (x, 0), VOIDmode,
known_x, known_mode, known_ret));
/* For a smaller object, just ignore the high bits. */
num0 = cached_num_sign_bit_copies (XEXP (x, 0), VOIDmode,
known_x, known_mode, known_ret);
- return MAX (1, (num0 - (int) (GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)))
+ return MAX (1, (num0 - (int) (GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)))
- bitwidth)));
case NOT:
known_x, known_mode, known_ret);
if (CONST_INT_P (XEXP (x, 1))
&& INTVAL (XEXP (x, 1)) > 0
- && INTVAL (XEXP (x, 1)) < GET_MODE_BITSIZE (GET_MODE (x)))
+ && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (GET_MODE (x)))
num0 = MIN ((int) bitwidth, num0 + INTVAL (XEXP (x, 1)));
return num0;
if (!CONST_INT_P (XEXP (x, 1))
|| INTVAL (XEXP (x, 1)) < 0
|| INTVAL (XEXP (x, 1)) >= (int) bitwidth
- || INTVAL (XEXP (x, 1)) >= GET_MODE_BITSIZE (GET_MODE (x)))
+ || INTVAL (XEXP (x, 1)) >= GET_MODE_PRECISION (GET_MODE (x)))
return 1;
num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
count those bits and return one less than that amount. If we can't
safely compute the mask for this mode, always return BITWIDTH. */
- bitwidth = GET_MODE_BITSIZE (mode);
+ bitwidth = GET_MODE_PRECISION (mode);
if (bitwidth > HOST_BITS_PER_WIDE_INT)
return 1;
if (GET_MODE_CLASS (GET_MODE (op0)) != MODE_CC
&& CONST_INT_P (op1)
&& GET_MODE (op0) != VOIDmode
- && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
+ && GET_MODE_PRECISION (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
{
HOST_WIDE_INT const_val = INTVAL (op1);
unsigned HOST_WIDE_INT uconst_val = const_val;
case GE:
if ((const_val & max_val)
!= ((unsigned HOST_WIDE_INT) 1
- << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1)))
+ << (GET_MODE_PRECISION (GET_MODE (op0)) - 1)))
code = GT, op1 = gen_int_mode (const_val - 1, GET_MODE (op0));
break;
have to be sign-bit copies too. */
|| num_sign_bit_copies_in_rep [in_mode][mode])
num_sign_bit_copies_in_rep [in_mode][mode]
- += GET_MODE_BITSIZE (wider) - GET_MODE_BITSIZE (i);
+ += GET_MODE_PRECISION (wider) - GET_MODE_PRECISION (i);
}
}
}
{
if (mode != VOIDmode)
{
- if (GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT)
+ if (GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT)
return -1;
m &= GET_MODE_MASK (mode);
}
if (STORE_FLAG_VALUE == -1
&& GET_CODE (op) == ASHIFTRT
&& GET_CODE (XEXP (op, 1))
- && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
+ && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
return simplify_gen_relational (GE, mode, VOIDmode,
XEXP (op, 0), const0_rtx);
C is equal to the width of MODE minus 1. */
if (GET_CODE (op) == ASHIFTRT
&& CONST_INT_P (XEXP (op, 1))
- && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
+ && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
return simplify_gen_binary (LSHIFTRT, mode,
XEXP (op, 0), XEXP (op, 1));
C is equal to the width of MODE minus 1. */
if (GET_CODE (op) == LSHIFTRT
&& CONST_INT_P (XEXP (op, 1))
- && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
+ && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
return simplify_gen_binary (ASHIFTRT, mode,
XEXP (op, 0), XEXP (op, 1));
&& SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
{
enum machine_mode inner = GET_MODE (XEXP (op, 0));
- int isize = GET_MODE_BITSIZE (inner);
+ int isize = GET_MODE_PRECISION (inner);
if (STORE_FLAG_VALUE == 1)
{
temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
GEN_INT (isize - 1));
if (mode == inner)
return temp;
- if (GET_MODE_BITSIZE (mode) > isize)
+ if (GET_MODE_PRECISION (mode) > isize)
return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
return simplify_gen_unary (TRUNCATE, mode, temp, inner);
}
GEN_INT (isize - 1));
if (mode == inner)
return temp;
- if (GET_MODE_BITSIZE (mode) > isize)
+ if (GET_MODE_PRECISION (mode) > isize)
return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
return simplify_gen_unary (TRUNCATE, mode, temp, inner);
}
patterns. */
if ((TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
? (num_sign_bit_copies (op, GET_MODE (op))
- > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op))
- - GET_MODE_BITSIZE (mode)))
+ > (unsigned int) (GET_MODE_PRECISION (GET_MODE (op))
+ - GET_MODE_PRECISION (mode)))
: truncated_to_mode (mode, op))
&& ! (GET_CODE (op) == LSHIFTRT
&& GET_CODE (XEXP (op, 0)) == MULT))
&& (flag_unsafe_math_optimizations
|| (SCALAR_FLOAT_MODE_P (GET_MODE (op))
&& ((unsigned)significand_size (GET_MODE (op))
- >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
+ >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
- num_sign_bit_copies (XEXP (op, 0),
GET_MODE (XEXP (op, 0))))))))
return simplify_gen_unary (FLOAT, mode,
|| (GET_CODE (op) == FLOAT
&& SCALAR_FLOAT_MODE_P (GET_MODE (op))
&& ((unsigned)significand_size (GET_MODE (op))
- >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
+ >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))
- num_sign_bit_copies (XEXP (op, 0),
GET_MODE (XEXP (op, 0)))))))
return simplify_gen_unary (GET_CODE (op), mode,
return op;
/* If operand is known to be only -1 or 0, convert ABS to NEG. */
- if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
+ if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode))
return gen_rtx_NEG (mode, op);
break;
simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
rtx op, enum machine_mode op_mode)
{
- unsigned int width = GET_MODE_BITSIZE (mode);
- unsigned int op_width = GET_MODE_BITSIZE (op_mode);
+ unsigned int width = GET_MODE_PRECISION (mode);
+ unsigned int op_width = GET_MODE_PRECISION (op_mode);
if (code == VEC_DUPLICATE)
{
if (hv < 0)
return 0;
}
- else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
+ else if (GET_MODE_PRECISION (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
;
else
hv = 0, lv &= GET_MODE_MASK (op_mode);
if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (op_mode, val))
;
else
- val = GET_MODE_BITSIZE (op_mode) - floor_log2 (arg0) - 1;
+ val = GET_MODE_PRECISION (op_mode) - floor_log2 (arg0) - 1;
break;
case CLRSB:
arg0 &= GET_MODE_MASK (op_mode);
if (arg0 == 0)
- val = GET_MODE_BITSIZE (op_mode) - 1;
+ val = GET_MODE_PRECISION (op_mode) - 1;
else if (arg0 >= 0)
- val = GET_MODE_BITSIZE (op_mode) - floor_log2 (arg0) - 2;
+ val = GET_MODE_PRECISION (op_mode) - floor_log2 (arg0) - 2;
else if (arg0 < 0)
- val = GET_MODE_BITSIZE (op_mode) - floor_log2 (~arg0) - 2;
+ val = GET_MODE_PRECISION (op_mode) - floor_log2 (~arg0) - 2;
break;
case CTZ:
/* Even if the value at zero is undefined, we have to come
up with some replacement. Seems good enough. */
if (! CTZ_DEFINED_VALUE_AT_ZERO (op_mode, val))
- val = GET_MODE_BITSIZE (op_mode);
+ val = GET_MODE_PRECISION (op_mode);
}
else
val = ctz_hwi (arg0);
/* When zero-extending a CONST_INT, we need to know its
original mode. */
gcc_assert (op_mode != VOIDmode);
- if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
+ if (op_width == HOST_BITS_PER_WIDE_INT)
{
/* If we were really extending the mode,
we would have to distinguish between zero-extension
and sign-extension. */
- gcc_assert (width == GET_MODE_BITSIZE (op_mode));
+ gcc_assert (width == op_width);
val = arg0;
}
else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
case SIGN_EXTEND:
if (op_mode == VOIDmode)
op_mode = mode;
- if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
+ op_width = GET_MODE_PRECISION (op_mode);
+ if (op_width == HOST_BITS_PER_WIDE_INT)
{
/* If we were really extending the mode,
we would have to distinguish between zero-extension
and sign-extension. */
- gcc_assert (width == GET_MODE_BITSIZE (op_mode));
+ gcc_assert (width == op_width);
val = arg0;
}
- else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
+ else if (op_width < HOST_BITS_PER_WIDE_INT)
{
val = arg0 & GET_MODE_MASK (op_mode);
if (val_signbit_known_set_p (op_mode, val))
case CLZ:
hv = 0;
if (h1 != 0)
- lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
+ lv = GET_MODE_PRECISION (mode) - floor_log2 (h1) - 1
- HOST_BITS_PER_WIDE_INT;
else if (l1 != 0)
- lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
+ lv = GET_MODE_PRECISION (mode) - floor_log2 (l1) - 1;
else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
- lv = GET_MODE_BITSIZE (mode);
+ lv = GET_MODE_PRECISION (mode);
break;
case CTZ:
else if (h1 != 0)
lv = HOST_BITS_PER_WIDE_INT + ctz_hwi (h1);
else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
- lv = GET_MODE_BITSIZE (mode);
+ lv = GET_MODE_PRECISION (mode);
break;
case POPCOUNT:
case ZERO_EXTEND:
gcc_assert (op_mode != VOIDmode);
- if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
+ if (op_width > HOST_BITS_PER_WIDE_INT)
return 0;
hv = 0;
case SIGN_EXTEND:
if (op_mode == VOIDmode
- || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
+ || op_width > HOST_BITS_PER_WIDE_INT)
return 0;
else
{
{
rtx tem, reversed, opleft, opright;
HOST_WIDE_INT val;
- unsigned int width = GET_MODE_BITSIZE (mode);
+ unsigned int width = GET_MODE_PRECISION (mode);
/* Even if we can't compute a constant result,
there are some cases worth simplifying. */
&& CONST_INT_P (XEXP (opleft, 1))
&& CONST_INT_P (XEXP (opright, 1))
&& (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
- == GET_MODE_BITSIZE (mode)))
+ == GET_MODE_PRECISION (mode)))
return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
/* Same, but for ashift that has been "simplified" to a wider mode
&& CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
&& CONST_INT_P (XEXP (opright, 1))
&& (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
- == GET_MODE_BITSIZE (mode)))
+ == GET_MODE_PRECISION (mode)))
return gen_rtx_ROTATE (mode, XEXP (opright, 0),
XEXP (SUBREG_REG (opleft), 1));
&& trueop1 == const1_rtx
&& GET_CODE (op0) == LSHIFTRT
&& CONST_INT_P (XEXP (op0, 1))
- && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
+ && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1)
return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
/* (xor (comparison foo bar) (const_int sign-bit))
unsigned HOST_WIDE_INT zero_val = 0;
if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
- && zero_val == GET_MODE_BITSIZE (imode)
+ && zero_val == GET_MODE_PRECISION (imode)
&& INTVAL (trueop1) == exact_log2 (zero_val))
return simplify_gen_relational (EQ, mode, imode,
XEXP (op0, 0), const0_rtx);
{
HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
HOST_WIDE_INT val;
- unsigned int width = GET_MODE_BITSIZE (mode);
+ unsigned int width = GET_MODE_PRECISION (mode);
if (VECTOR_MODE_P (mode)
&& code != VEC_CONCAT
unsigned HOST_WIDE_INT cnt;
if (SHIFT_COUNT_TRUNCATED)
- o1 = double_int_zext (o1, GET_MODE_BITSIZE (mode));
+ o1 = double_int_zext (o1, GET_MODE_PRECISION (mode));
if (!double_int_fits_in_uhwi_p (o1)
- || double_int_to_uhwi (o1) >= GET_MODE_BITSIZE (mode))
+ || double_int_to_uhwi (o1) >= GET_MODE_PRECISION (mode))
return 0;
cnt = double_int_to_uhwi (o1);
if (code == LSHIFTRT || code == ASHIFTRT)
- res = double_int_rshift (o0, cnt, GET_MODE_BITSIZE (mode),
+ res = double_int_rshift (o0, cnt, GET_MODE_PRECISION (mode),
code == ASHIFTRT);
else if (code == ASHIFT)
- res = double_int_lshift (o0, cnt, GET_MODE_BITSIZE (mode),
+ res = double_int_lshift (o0, cnt, GET_MODE_PRECISION (mode),
true);
else if (code == ROTATE)
- res = double_int_lrotate (o0, cnt, GET_MODE_BITSIZE (mode));
+ res = double_int_lrotate (o0, cnt, GET_MODE_PRECISION (mode));
else /* code == ROTATERT */
- res = double_int_rrotate (o0, cnt, GET_MODE_BITSIZE (mode));
+ res = double_int_rrotate (o0, cnt, GET_MODE_PRECISION (mode));
}
break;
&& (GET_CODE (trueop1) == CONST_DOUBLE
|| CONST_INT_P (trueop1)))
{
- int width = GET_MODE_BITSIZE (mode);
+ int width = GET_MODE_PRECISION (mode);
HOST_WIDE_INT l0s, h0s, l1s, h1s;
unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
{
- int sign_bitnum = GET_MODE_BITSIZE (mode) - 1;
+ int sign_bitnum = GET_MODE_PRECISION (mode) - 1;
int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
&& (UINTVAL (inner_const)
& ((unsigned HOST_WIDE_INT) 1
enum machine_mode op0_mode, rtx op0, rtx op1,
rtx op2)
{
- unsigned int width = GET_MODE_BITSIZE (mode);
+ unsigned int width = GET_MODE_PRECISION (mode);
bool any_change = false;
rtx tem;
{
/* Extracting a bit-field from a constant */
unsigned HOST_WIDE_INT val = UINTVAL (op0);
-
+ HOST_WIDE_INT op1val = INTVAL (op1);
+ HOST_WIDE_INT op2val = INTVAL (op2);
if (BITS_BIG_ENDIAN)
- val >>= GET_MODE_BITSIZE (op0_mode) - INTVAL (op2) - INTVAL (op1);
+ val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val;
else
- val >>= INTVAL (op2);
+ val >>= op2val;
- if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
+ if (HOST_BITS_PER_WIDE_INT != op1val)
{
/* First zero-extend. */
- val &= ((unsigned HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
+ val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1;
/* If desired, propagate sign bit. */
if (code == SIGN_EXTRACT
- && (val & ((unsigned HOST_WIDE_INT) 1 << (INTVAL (op1) - 1)))
+ && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1)))
!= 0)
- val |= ~ (((unsigned HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
+ val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1);
}
return gen_int_mode (val, mode);
/* Optimize SUBREG truncations of zero and sign extended values. */
if ((GET_CODE (op) == ZERO_EXTEND
|| GET_CODE (op) == SIGN_EXTEND)
- && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
+ && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode))
{
unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
enum machine_mode origmode = GET_MODE (XEXP (op, 0));
if (outermode == origmode)
return XEXP (op, 0);
- if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
+ if (GET_MODE_PRECISION (outermode) <= GET_MODE_PRECISION (origmode))
return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
subreg_lowpart_offset (outermode,
origmode));
/* A SUBREG resulting from a zero extension may fold to zero if
it extracts higher bits that the ZERO_EXTEND's source bits. */
if (GET_CODE (op) == ZERO_EXTEND
- && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
+ && bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))))
return CONST0_RTX (outermode);
}
to avoid the possibility that an outer LSHIFTRT shifts by more
than the sign extension's sign_bit_copies and introduces zeros
into the high bits of the result. */
- && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
+ && (2 * GET_MODE_PRECISION (outermode)) <= GET_MODE_PRECISION (innermode)
&& CONST_INT_P (XEXP (op, 1))
&& GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
&& GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
- && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
+ && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (outermode)
&& subreg_lsb_1 (outermode, innermode, byte) == 0)
return simplify_gen_binary (ASHIFTRT, outermode,
XEXP (XEXP (op, 0), 0), XEXP (op, 1));
if ((GET_CODE (op) == LSHIFTRT
|| GET_CODE (op) == ASHIFTRT)
&& SCALAR_INT_MODE_P (outermode)
- && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
+ && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
&& CONST_INT_P (XEXP (op, 1))
&& GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
&& GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
- && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
+ && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (outermode)
&& subreg_lsb_1 (outermode, innermode, byte) == 0)
return simplify_gen_binary (LSHIFTRT, outermode,
XEXP (XEXP (op, 0), 0), XEXP (op, 1));
the outer subreg is effectively a truncation to the original mode. */
if (GET_CODE (op) == ASHIFT
&& SCALAR_INT_MODE_P (outermode)
- && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
+ && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)
&& CONST_INT_P (XEXP (op, 1))
&& (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
|| GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
&& GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
- && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
+ && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (outermode)
&& subreg_lsb_1 (outermode, innermode, byte) == 0)
return simplify_gen_binary (ASHIFT, outermode,
XEXP (XEXP (op, 0), 0), XEXP (op, 1));
if ((GET_CODE (op) == LSHIFTRT
|| GET_CODE (op) == ASHIFTRT)
&& SCALAR_INT_MODE_P (outermode)
- && GET_MODE_BITSIZE (outermode) >= BITS_PER_WORD
- && GET_MODE_BITSIZE (innermode) >= (2 * GET_MODE_BITSIZE (outermode))
+ && GET_MODE_PRECISION (outermode) >= BITS_PER_WORD
+ && GET_MODE_PRECISION (innermode) >= (2 * GET_MODE_PRECISION (outermode))
&& CONST_INT_P (XEXP (op, 1))
- && (INTVAL (XEXP (op, 1)) & (GET_MODE_BITSIZE (outermode) - 1)) == 0
+ && (INTVAL (XEXP (op, 1)) & (GET_MODE_PRECISION (outermode) - 1)) == 0
&& INTVAL (XEXP (op, 1)) >= 0
- && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (innermode)
+ && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (innermode)
&& byte == subreg_lowpart_offset (outermode, innermode))
{
int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;