{
if (SHIFT_COUNT_TRUNCATED)
canon_const_arg1 = GEN_INT (INTVAL (const_arg1)
- & (GET_MODE_BITSIZE (mode)
+ & (GET_MODE_UNIT_BITSIZE (mode)
- 1));
else
break;
{
if (SHIFT_COUNT_TRUNCATED)
inner_const = GEN_INT (INTVAL (inner_const)
- & (GET_MODE_BITSIZE (mode) - 1));
+ & (GET_MODE_UNIT_BITSIZE (mode)
+ - 1));
else
break;
}
/* As an exception, we can turn an ASHIFTRT of this
form into a shift of the number of bits - 1. */
if (code == ASHIFTRT)
- new_const = GEN_INT (GET_MODE_BITSIZE (mode) - 1);
+ new_const = GEN_INT (GET_MODE_UNIT_BITSIZE (mode) - 1);
else if (!side_effects_p (XEXP (y, 0)))
return CONST0_RTX (mode);
else
if we test MODE instead, we can get an infinite recursion
alternating between two modes each wider than MODE. */
- if (code == NE && GET_CODE (op0) == SUBREG
- && subreg_lowpart_p (op0)
- && (GET_MODE_SIZE (GET_MODE (op0))
- < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))))
+ if (code == NE
+ && partial_subreg_p (op0)
+ && subreg_lowpart_p (op0))
{
machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
rtx tem = record_jump_cond_subreg (inner_mode, op1);
reversed_nonequality);
}
- if (code == NE && GET_CODE (op1) == SUBREG
- && subreg_lowpart_p (op1)
- && (GET_MODE_SIZE (GET_MODE (op1))
- < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op1)))))
+ if (code == NE
+ && partial_subreg_p (op1)
+ && subreg_lowpart_p (op1))
{
machine_mode inner_mode = GET_MODE (SUBREG_REG (op1));
rtx tem = record_jump_cond_subreg (inner_mode, op0);
&& CONST_INT_P (XEXP (SET_DEST (sets[0].rtl), 2)))
{
rtx dest_reg = XEXP (SET_DEST (sets[0].rtl), 0);
+ /* This is the mode of XEXP (tem, 0) as well. */
+ scalar_int_mode dest_mode
+ = as_a <scalar_int_mode> (GET_MODE (dest_reg));
rtx width = XEXP (SET_DEST (sets[0].rtl), 1);
rtx pos = XEXP (SET_DEST (sets[0].rtl), 2);
HOST_WIDE_INT val = INTVAL (XEXP (tem, 0));
HOST_WIDE_INT mask;
unsigned int shift;
if (BITS_BIG_ENDIAN)
- shift = GET_MODE_PRECISION (GET_MODE (dest_reg))
- - INTVAL (pos) - INTVAL (width);
+ shift = (GET_MODE_PRECISION (dest_mode)
+ - INTVAL (pos) - INTVAL (width));
else
shift = INTVAL (pos);
if (INTVAL (width) == HOST_BITS_PER_WIDE_INT)
&& GET_CODE (src) == AND && CONST_INT_P (XEXP (src, 1))
&& GET_MODE_SIZE (int_mode) < UNITS_PER_WORD)
{
- machine_mode tmode;
+ opt_scalar_int_mode tmode_iter;
rtx new_and = gen_rtx_AND (VOIDmode, NULL_RTX, XEXP (src, 1));
- FOR_EACH_WIDER_MODE (tmode, int_mode)
+ FOR_EACH_WIDER_MODE (tmode_iter, int_mode)
{
+ scalar_int_mode tmode = tmode_iter.require ();
if (GET_MODE_SIZE (tmode) > UNITS_PER_WORD)
break;
{
struct rtx_def memory_extend_buf;
rtx memory_extend_rtx = &memory_extend_buf;
- machine_mode tmode;
/* Set what we are trying to extend and the operation it might
have been extended with. */
PUT_CODE (memory_extend_rtx, extend_op);
XEXP (memory_extend_rtx, 0) = src;
- FOR_EACH_WIDER_MODE (tmode, int_mode)
+ opt_scalar_int_mode tmode_iter;
+ FOR_EACH_WIDER_MODE (tmode_iter, int_mode)
{
struct table_elt *larger_elt;
+ scalar_int_mode tmode = tmode_iter.require ();
if (GET_MODE_SIZE (tmode) > UNITS_PER_WORD)
break;
&& ! (src != 0
&& GET_CODE (src) == SUBREG
&& GET_MODE (src) == GET_MODE (p->exp)
- && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))
- < GET_MODE_SIZE (GET_MODE (SUBREG_REG (p->exp))))))
+ && partial_subreg_p (GET_MODE (SUBREG_REG (src)),
+ GET_MODE (SUBREG_REG (p->exp)))))
continue;
if (src && GET_CODE (src) == code && rtx_equal_p (src, p->exp))
&& ! (src != 0
&& GET_CODE (src) == SUBREG
&& GET_MODE (src) == GET_MODE (elt->exp)
- && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))
- < GET_MODE_SIZE (GET_MODE (SUBREG_REG (elt->exp))))))
+ && partial_subreg_p (GET_MODE (SUBREG_REG (src)),
+ GET_MODE (SUBREG_REG (elt->exp)))))
{
elt = elt->next_same_value;
continue;
HOST_WIDE_INT val = INTVAL (dest_cst);
HOST_WIDE_INT mask;
unsigned int shift;
+ /* This is the mode of DEST_CST as well. */
+ scalar_int_mode dest_mode
+ = as_a <scalar_int_mode> (GET_MODE (dest_reg));
if (BITS_BIG_ENDIAN)
- shift = GET_MODE_PRECISION (GET_MODE (dest_reg))
+ shift = GET_MODE_PRECISION (dest_mode)
- INTVAL (pos) - INTVAL (width);
else
shift = INTVAL (pos);
mask = (HOST_WIDE_INT_1 << INTVAL (width)) - 1;
val &= ~(mask << shift);
val |= (INTVAL (trial) & mask) << shift;
- val = trunc_int_for_mode (val, GET_MODE (dest_reg));
+ val = trunc_int_for_mode (val, dest_mode);
validate_unshare_change (insn, &SET_DEST (sets[i].rtl),
dest_reg, 1);
validate_unshare_change (insn, &SET_SRC (sets[i].rtl),
&& (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest))) - 1)
/ UNITS_PER_WORD)
== (GET_MODE_SIZE (GET_MODE (dest)) - 1) / UNITS_PER_WORD)
- && (GET_MODE_SIZE (GET_MODE (dest))
- >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest))))
+ && !partial_subreg_p (dest)
&& sets[i].src_elt != 0)
{
machine_mode new_mode = GET_MODE (SUBREG_REG (dest));
rtx new_src = 0;
unsigned src_hash;
struct table_elt *src_elt;
- int byte = 0;
/* Ignore invalid entries. */
if (!REG_P (elt->exp)
new_src = elt->exp;
else
{
- /* Calculate big endian correction for the SUBREG_BYTE.
- We have already checked that M1 (GET_MODE (dest))
- is not narrower than M2 (new_mode). */
- if (BYTES_BIG_ENDIAN)
- byte = (GET_MODE_SIZE (GET_MODE (dest))
- - GET_MODE_SIZE (new_mode));
-
+ unsigned int byte
+ = subreg_lowpart_offset (new_mode, GET_MODE (dest));
new_src = simplify_gen_subreg (new_mode, elt->exp,
GET_MODE (dest), byte);
}