+2017-08-30 Richard Sandiford <richard.sandiford@linaro.org>
+ Alan Hayward <alan.hayward@arm.com>
+ David Sherwood <david.sherwood@arm.com>
+
+ * combine.c (find_split_point): Add is_a <scalar_int_mode> checks.
+ (make_compound_operation_int): Likewise.
+ (change_zero_ext): Likewise.
+ * expr.c (convert_move): Likewise.
+ (convert_modes): Likewise.
+ * fwprop.c (forward_propagate_subreg): Likewise.
+ * loop-iv.c (get_biv_step_1): Likewise.
+ * optabs.c (widen_operand): Likewise.
+ * postreload.c (move2add_valid_value_p): Likewise.
+ * recog.c (simplify_while_replacing): Likewise.
+ * simplify-rtx.c (simplify_unary_operation_1): Likewise.
+ (simplify_binary_operation_1): Likewise. Remove redundant
+ mode equality check.
+
2017-08-30 Richard Sandiford <richard.sandiford@linaro.org>
Alan Hayward <alan.hayward@arm.com>
David Sherwood <david.sherwood@arm.com>
HOST_WIDE_INT pos = 0;
int unsignedp = 0;
rtx inner = NULL_RTX;
- scalar_int_mode inner_mode;
+ scalar_int_mode mode, inner_mode;
/* First special-case some codes. */
switch (code)
case SIGN_EXTRACT:
case ZERO_EXTRACT:
- if (CONST_INT_P (XEXP (SET_SRC (x), 1))
+ if (is_a <scalar_int_mode> (GET_MODE (XEXP (SET_SRC (x), 0)),
+ &inner_mode)
+ && CONST_INT_P (XEXP (SET_SRC (x), 1))
&& CONST_INT_P (XEXP (SET_SRC (x), 2)))
{
inner = XEXP (SET_SRC (x), 0);
pos = INTVAL (XEXP (SET_SRC (x), 2));
if (BITS_BIG_ENDIAN)
- pos = GET_MODE_PRECISION (GET_MODE (inner)) - len - pos;
+ pos = GET_MODE_PRECISION (inner_mode) - len - pos;
unsignedp = (code == ZERO_EXTRACT);
}
break;
}
if (len && pos >= 0
- && pos + len <= GET_MODE_PRECISION (GET_MODE (inner)))
+ && pos + len <= GET_MODE_PRECISION (GET_MODE (inner))
+ && is_a <scalar_int_mode> (GET_MODE (SET_SRC (x)), &mode))
{
- machine_mode mode = GET_MODE (SET_SRC (x));
-
/* For unsigned, we have a choice of a shift followed by an
AND or two shifts. Use two shifts for field sizes where the
constant might be too large. We assume here that we can
rtx new_rtx = 0;
int i;
rtx tem;
+ scalar_int_mode inner_mode;
bool equality_comparison = false;
if (in_code == EQ)
/* Same as previous, but for (subreg (lshiftrt ...)) in first op. */
else if (GET_CODE (XEXP (x, 0)) == SUBREG
&& subreg_lowpart_p (XEXP (x, 0))
+ && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (XEXP (x, 0))),
+ &inner_mode)
&& GET_CODE (SUBREG_REG (XEXP (x, 0))) == LSHIFTRT
&& (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
{
rtx inner_x0 = SUBREG_REG (XEXP (x, 0));
- machine_mode inner_mode = GET_MODE (inner_x0);
new_rtx = make_compound_operation (XEXP (inner_x0, 0), next_code);
new_rtx = make_extraction (inner_mode, new_rtx, 0,
XEXP (inner_x0, 1),
/* If the SUBREG is masking of a logical right shift,
make an extraction. */
if (GET_CODE (inner) == LSHIFTRT
+ && is_a <scalar_int_mode> (GET_MODE (inner), &inner_mode)
+ && GET_MODE_SIZE (mode) < GET_MODE_SIZE (inner_mode)
&& CONST_INT_P (XEXP (inner, 1))
- && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (inner))
- && (UINTVAL (XEXP (inner, 1))
- < GET_MODE_PRECISION (GET_MODE (inner)))
+ && UINTVAL (XEXP (inner, 1)) < GET_MODE_PRECISION (inner_mode)
&& subreg_lowpart_p (x))
{
new_rtx = make_compound_operation (XEXP (inner, 0), next_code);
- int width = GET_MODE_PRECISION (GET_MODE (inner))
+ int width = GET_MODE_PRECISION (inner_mode)
- INTVAL (XEXP (inner, 1));
if (width > mode_width)
width = mode_width;
maybe_swap_commutative_operands (**iter);
rtx *dst = &SET_DEST (pat);
+ scalar_int_mode mode;
if (GET_CODE (*dst) == ZERO_EXTRACT
&& REG_P (XEXP (*dst, 0))
+ && is_a <scalar_int_mode> (GET_MODE (XEXP (*dst, 0)), &mode)
&& CONST_INT_P (XEXP (*dst, 1))
&& CONST_INT_P (XEXP (*dst, 2)))
{
rtx reg = XEXP (*dst, 0);
int width = INTVAL (XEXP (*dst, 1));
int offset = INTVAL (XEXP (*dst, 2));
- machine_mode mode = GET_MODE (reg);
int reg_width = GET_MODE_PRECISION (mode);
if (BITS_BIG_ENDIAN)
offset = reg_width - width - offset;
the required extension, strip it. We don't handle such SUBREGs as
TO here. */
- if (GET_CODE (from) == SUBREG && SUBREG_PROMOTED_VAR_P (from)
+ scalar_int_mode to_int_mode;
+ if (GET_CODE (from) == SUBREG
+ && SUBREG_PROMOTED_VAR_P (from)
+ && is_a <scalar_int_mode> (to_mode, &to_int_mode)
&& (GET_MODE_PRECISION (GET_MODE (SUBREG_REG (from)))
- >= GET_MODE_PRECISION (to_mode))
+ >= GET_MODE_PRECISION (to_int_mode))
&& SUBREG_CHECK_PROMOTED_SIGN (from, unsignedp))
- from = gen_lowpart (to_mode, from), from_mode = to_mode;
+ from = gen_lowpart (to_int_mode, from), from_mode = to_int_mode;
gcc_assert (GET_CODE (to) != SUBREG || !SUBREG_PROMOTED_VAR_P (to));
/* If FROM is a SUBREG that indicates that we have already done at least
the required extension, strip it. */
- if (GET_CODE (x) == SUBREG && SUBREG_PROMOTED_VAR_P (x)
- && GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))) >= GET_MODE_SIZE (mode)
+ if (GET_CODE (x) == SUBREG
+ && SUBREG_PROMOTED_VAR_P (x)
+ && is_a <scalar_int_mode> (mode, &int_mode)
+ && GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))) >= GET_MODE_SIZE (int_mode)
&& SUBREG_CHECK_PROMOTED_SIGN (x, unsignedp))
- x = gen_lowpart (mode, SUBREG_REG (x));
+ x = gen_lowpart (int_mode, SUBREG_REG (x));
if (GET_MODE (x) != VOIDmode)
oldmode = GET_MODE (x);
rtx use_reg = DF_REF_REG (use);
rtx_insn *use_insn;
rtx src;
+ scalar_int_mode int_use_mode, src_mode;
/* Only consider subregs... */
machine_mode use_mode = GET_MODE (use_reg);
definition of Y or, failing that, allow A to be deleted after
reload through register tying. Introducing more uses of Y
prevents both optimisations. */
- else if (subreg_lowpart_p (use_reg))
+ else if (is_a <scalar_int_mode> (use_mode, &int_use_mode)
+ && subreg_lowpart_p (use_reg))
{
use_insn = DF_REF_INSN (use);
src = SET_SRC (def_set);
if ((GET_CODE (src) == ZERO_EXTEND
|| GET_CODE (src) == SIGN_EXTEND)
+ && is_a <scalar_int_mode> (GET_MODE (src), &src_mode)
&& REG_P (XEXP (src, 0))
&& REGNO (XEXP (src, 0)) >= FIRST_PSEUDO_REGISTER
&& GET_MODE (XEXP (src, 0)) == use_mode
&& !free_load_extend (src, def_insn)
- && (targetm.mode_rep_extended (use_mode, GET_MODE (src))
+ && (targetm.mode_rep_extended (int_use_mode, src_mode)
!= (int) GET_CODE (src))
&& all_uses_available_at (def_insn, use_insn))
return try_fwprop_subst (use, DF_REF_LOC (use), XEXP (src, 0),
if (GET_CODE (next) == SUBREG)
{
- machine_mode amode = GET_MODE (next);
-
- if (GET_MODE_SIZE (amode) > GET_MODE_SIZE (*inner_mode))
+ scalar_int_mode amode;
+ if (!is_a <scalar_int_mode> (GET_MODE (next), &amode)
+ || GET_MODE_SIZE (amode) > GET_MODE_SIZE (*inner_mode))
return false;
*inner_mode = amode;
int unsignedp, int no_extend)
{
rtx result;
+ scalar_int_mode int_mode;
/* If we don't have to extend and this is a constant, return it. */
if (no_extend && GET_MODE (op) == VOIDmode)
extend since it will be more efficient to do so unless the signedness of
a promoted object differs from our extension. */
if (! no_extend
+ || !is_a <scalar_int_mode> (mode, &int_mode)
|| (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op)
&& SUBREG_CHECK_PROMOTED_SIGN (op, unsignedp)))
return convert_modes (mode, oldmode, op, unsignedp);
/* If MODE is no wider than a single word, we return a lowpart or paradoxical
SUBREG. */
- if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
- return gen_lowpart (mode, force_reg (GET_MODE (op), op));
+ if (GET_MODE_SIZE (int_mode) <= UNITS_PER_WORD)
+ return gen_lowpart (int_mode, force_reg (GET_MODE (op), op));
/* Otherwise, get an object of MODE, clobber it, and set the low-order
part to OP. */
- result = gen_reg_rtx (mode);
+ result = gen_reg_rtx (int_mode);
emit_clobber (result);
emit_move_insn (gen_lowpart (GET_MODE (op), result), op);
return result;
if (mode != reg_mode[regno])
{
- if (!MODES_OK_FOR_MOVE2ADD (mode, reg_mode[regno]))
+ scalar_int_mode old_mode;
+ if (!is_a <scalar_int_mode> (reg_mode[regno], &old_mode)
+ || !MODES_OK_FOR_MOVE2ADD (mode, old_mode))
return false;
/* The value loaded into regno in reg_mode[regno] is also valid in
mode after truncation only if (REG:mode regno) is the lowpart of
(REG:reg_mode[regno] regno). Now, for big endian, the starting
regno of the lowpart might be different. */
- int s_off = subreg_lowpart_offset (mode, reg_mode[regno]);
- s_off = subreg_regno_offset (regno, reg_mode[regno], s_off, mode);
+ int s_off = subreg_lowpart_offset (mode, old_mode);
+ s_off = subreg_regno_offset (regno, old_mode, s_off, mode);
if (s_off != 0)
/* We could in principle adjust regno, check reg_mode[regno] to be
BLKmode, and return s_off to the caller (vs. -1 for failure),
rtx x = *loc;
enum rtx_code code = GET_CODE (x);
rtx new_rtx = NULL_RTX;
+ scalar_int_mode is_mode;
if (SWAPPABLE_OPERANDS_P (x)
&& swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
happen, we might just fail in some cases). */
if (MEM_P (XEXP (x, 0))
+ && is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &is_mode)
&& CONST_INT_P (XEXP (x, 1))
&& CONST_INT_P (XEXP (x, 2))
&& !mode_dependent_address_p (XEXP (XEXP (x, 0), 0),
&& !MEM_VOLATILE_P (XEXP (x, 0)))
{
machine_mode wanted_mode = VOIDmode;
- machine_mode is_mode = GET_MODE (XEXP (x, 0));
int pos = INTVAL (XEXP (x, 2));
if (GET_CODE (x) == ZERO_EXTRACT && targetm.have_extzv ())
{
enum rtx_code reversed;
rtx temp;
- scalar_int_mode inner, int_mode;
+ scalar_int_mode inner, int_mode, op0_mode;
switch (code)
{
(zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
(and:SI (reg:SI) (const_int 63)). */
if (GET_CODE (op) == SUBREG
- && GET_MODE_PRECISION (GET_MODE (op))
- < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
- && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
- <= HOST_BITS_PER_WIDE_INT
- && GET_MODE_PRECISION (mode)
- >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op)))
+ && is_a <scalar_int_mode> (mode, &int_mode)
+ && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op)), &op0_mode)
+ && GET_MODE_PRECISION (GET_MODE (op)) < GET_MODE_PRECISION (op0_mode)
+ && GET_MODE_PRECISION (op0_mode) <= HOST_BITS_PER_WIDE_INT
+ && GET_MODE_PRECISION (int_mode) >= GET_MODE_PRECISION (op0_mode)
&& subreg_lowpart_p (op)
- && (nonzero_bits (SUBREG_REG (op), GET_MODE (SUBREG_REG (op)))
+ && (nonzero_bits (SUBREG_REG (op), op0_mode)
& ~GET_MODE_MASK (GET_MODE (op))) == 0)
{
- if (GET_MODE_PRECISION (mode)
- == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op))))
+ if (GET_MODE_PRECISION (int_mode) == GET_MODE_PRECISION (op0_mode))
return SUBREG_REG (op);
- return simplify_gen_unary (ZERO_EXTEND, mode, SUBREG_REG (op),
- GET_MODE (SUBREG_REG (op)));
+ return simplify_gen_unary (ZERO_EXTEND, int_mode, SUBREG_REG (op),
+ op0_mode);
}
#if defined(POINTERS_EXTEND_UNSIGNED)
by simplify_shift_const. */
if (GET_CODE (opleft) == SUBREG
+ && is_a <scalar_int_mode> (mode, &int_mode)
+ && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (opleft)),
+ &inner_mode)
&& GET_CODE (SUBREG_REG (opleft)) == ASHIFT
&& GET_CODE (opright) == LSHIFTRT
&& GET_CODE (XEXP (opright, 0)) == SUBREG
- && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
&& SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
- && (GET_MODE_SIZE (GET_MODE (opleft))
- < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
+ && GET_MODE_SIZE (int_mode) < GET_MODE_SIZE (inner_mode)
&& rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
SUBREG_REG (XEXP (opright, 0)))
&& CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
&& CONST_INT_P (XEXP (opright, 1))
- && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
- == GET_MODE_PRECISION (mode)))
- return gen_rtx_ROTATE (mode, XEXP (opright, 0),
- XEXP (SUBREG_REG (opleft), 1));
+ && (INTVAL (XEXP (SUBREG_REG (opleft), 1))
+ + INTVAL (XEXP (opright, 1))
+ == GET_MODE_PRECISION (int_mode)))
+ return gen_rtx_ROTATE (int_mode, XEXP (opright, 0),
+ XEXP (SUBREG_REG (opleft), 1));
/* If we have (ior (and (X C1) C2)), simplify this by making
C1 as small as possible if C1 actually changes. */