(extract:MODE (mult (reg) (MULT_IMM)) (EXTRACT_IMM) (const_int 0)). */
bool
-aarch64_is_extend_from_extract (machine_mode mode, rtx mult_imm,
+aarch64_is_extend_from_extract (scalar_int_mode mode, rtx mult_imm,
rtx extract_imm)
{
HOST_WIDE_INT mult_val, extract_val;
static rtx
-aarch64_add_offset (machine_mode mode, rtx temp, rtx reg, HOST_WIDE_INT offset)
+aarch64_add_offset (scalar_int_mode mode, rtx temp, rtx reg,
+ HOST_WIDE_INT offset)
{
if (!aarch64_plus_immediate (GEN_INT (offset), mode))
{
static int
aarch64_internal_mov_immediate (rtx dest, rtx imm, bool generate,
- machine_mode mode)
+ scalar_int_mode mode)
{
int i;
unsigned HOST_WIDE_INT val, val2, mask;
gcc_assert (mode == SImode || mode == DImode);
/* Check on what type of symbol it is. */
- if (GET_CODE (imm) == SYMBOL_REF
- || GET_CODE (imm) == LABEL_REF
- || GET_CODE (imm) == CONST)
+ scalar_int_mode int_mode;
+ if ((GET_CODE (imm) == SYMBOL_REF
+ || GET_CODE (imm) == LABEL_REF
+ || GET_CODE (imm) == CONST)
+ && is_a <scalar_int_mode> (mode, &int_mode))
{
rtx mem, base, offset;
enum aarch64_symbol_type sty;
{
case SYMBOL_FORCE_TO_MEM:
if (offset != const0_rtx
- && targetm.cannot_force_const_mem (mode, imm))
+ && targetm.cannot_force_const_mem (int_mode, imm))
{
gcc_assert (can_create_pseudo_p ());
- base = aarch64_force_temporary (mode, dest, base);
- base = aarch64_add_offset (mode, NULL, base, INTVAL (offset));
+ base = aarch64_force_temporary (int_mode, dest, base);
+ base = aarch64_add_offset (int_mode, NULL, base,
+ INTVAL (offset));
aarch64_emit_move (dest, base);
return;
}
mem = gen_rtx_MEM (ptr_mode, base);
}
- if (mode != ptr_mode)
- mem = gen_rtx_ZERO_EXTEND (mode, mem);
+ if (int_mode != ptr_mode)
+ mem = gen_rtx_ZERO_EXTEND (int_mode, mem);
emit_insn (gen_rtx_SET (dest, mem));
if (offset != const0_rtx)
{
gcc_assert(can_create_pseudo_p ());
- base = aarch64_force_temporary (mode, dest, base);
- base = aarch64_add_offset (mode, NULL, base, INTVAL (offset));
+ base = aarch64_force_temporary (int_mode, dest, base);
+ base = aarch64_add_offset (int_mode, NULL, base,
+ INTVAL (offset));
aarch64_emit_move (dest, base);
return;
}
return;
}
- aarch64_internal_mov_immediate (dest, imm, true, GET_MODE (dest));
+ aarch64_internal_mov_immediate (dest, imm, true,
+ as_a <scalar_int_mode> (mode));
}
/* Add DELTA to REGNUM in mode MODE. SCRATCHREG can be used to hold a
large immediate). */
static void
-aarch64_add_constant_internal (machine_mode mode, int regnum, int scratchreg,
- HOST_WIDE_INT delta, bool frame_related_p,
- bool emit_move_imm)
+aarch64_add_constant_internal (scalar_int_mode mode, int regnum,
+ int scratchreg, HOST_WIDE_INT delta,
+ bool frame_related_p, bool emit_move_imm)
{
HOST_WIDE_INT mdelta = abs_hwi (delta);
rtx this_rtx = gen_rtx_REG (mode, regnum);
}
static inline void
-aarch64_add_constant (machine_mode mode, int regnum, int scratchreg,
+aarch64_add_constant (scalar_int_mode mode, int regnum, int scratchreg,
HOST_WIDE_INT delta)
{
aarch64_add_constant_internal (mode, regnum, scratchreg, delta, false, true);
/* Return true if val is an immediate that can be loaded into a
register by a MOVZ instruction. */
static bool
-aarch64_movw_imm (HOST_WIDE_INT val, machine_mode mode)
+aarch64_movw_imm (HOST_WIDE_INT val, scalar_int_mode mode)
{
if (GET_MODE_SIZE (mode) > 4)
{
bool
aarch64_and_bitmask_imm (unsigned HOST_WIDE_INT val_in, machine_mode mode)
{
- if (aarch64_bitmask_imm (val_in, mode))
+ scalar_int_mode int_mode;
+ if (!is_a <scalar_int_mode> (mode, &int_mode))
+ return false;
+
+ if (aarch64_bitmask_imm (val_in, int_mode))
return false;
- if (aarch64_move_imm (val_in, mode))
+ if (aarch64_move_imm (val_in, int_mode))
return false;
unsigned HOST_WIDE_INT imm2 = aarch64_and_split_imm2 (val_in);
- return aarch64_bitmask_imm (imm2, mode);
+ return aarch64_bitmask_imm (imm2, int_mode);
}
/* Return true if val is an immediate that can be loaded into a
bool
aarch64_move_imm (HOST_WIDE_INT val, machine_mode mode)
{
- if (aarch64_movw_imm (val, mode) || aarch64_movw_imm (~val, mode))
+ scalar_int_mode int_mode;
+ if (!is_a <scalar_int_mode> (mode, &int_mode))
+ return false;
+
+ if (aarch64_movw_imm (val, int_mode) || aarch64_movw_imm (~val, int_mode))
return 1;
- return aarch64_bitmask_imm (val, mode);
+ return aarch64_bitmask_imm (val, int_mode);
}
static bool
&& SCALAR_FLOAT_MODE_P (mode)
&& aarch64_reinterpret_float_as_int (x, &ival))
{
- machine_mode imode = (mode == HFmode
- ? SImode
- : int_mode_for_mode (mode).require ());
+ scalar_int_mode imode = (mode == HFmode
+ ? SImode
+ : int_mode_for_mode (mode).require ());
int num_instr = aarch64_internal_mov_immediate
(NULL_RTX, gen_int_mode (ival, imode), false, imode);
return num_instr < 3;
if (!TARGET_SIMD)
return false;
- machine_mode vmode, imode;
+ machine_mode vmode;
+ scalar_int_mode imode;
unsigned HOST_WIDE_INT ival;
if (GET_CODE (x) == CONST_DOUBLE
imode = int_mode_for_mode (mode).require ();
}
else if (GET_CODE (x) == CONST_INT
- && SCALAR_INT_MODE_P (mode))
- {
- imode = mode;
- ival = INTVAL (x);
- }
+ && is_a <scalar_int_mode> (mode, &imode))
+ ival = INTVAL (x);
else
return false;
/* use a 64 bit mode for everything except for DI/DF mode, where we use
a 128 bit vector mode. */
- int width = GET_MODE_BITSIZE (mode) == 64 ? 128 : 64;
+ int width = GET_MODE_BITSIZE (imode) == 64 ? 128 : 64;
vmode = aarch64_simd_container_mode (imode, width);
rtx v_op = aarch64_simd_gen_const_vector_dup (vmode, ival);
gcc_assert (GET_CODE (diff_vec) == ADDR_DIFF_VEC);
- index = exact_log2 (GET_MODE_SIZE (GET_MODE (diff_vec)));
+ scalar_int_mode mode = as_a <scalar_int_mode> (GET_MODE (diff_vec));
+ index = exact_log2 (GET_MODE_SIZE (mode));
gcc_assert (index >= 0 && index <= 3);
static rtx
aarch64_strip_extend (rtx x, bool strip_shift)
{
+ scalar_int_mode mode;
rtx op = x;
+ if (!is_a <scalar_int_mode> (GET_MODE (op), &mode))
+ return op;
+
/* Zero and sign extraction of a widened value. */
if ((GET_CODE (op) == ZERO_EXTRACT || GET_CODE (op) == SIGN_EXTRACT)
&& XEXP (op, 2) == const0_rtx
&& GET_CODE (XEXP (op, 0)) == MULT
- && aarch64_is_extend_from_extract (GET_MODE (op), XEXP (XEXP (op, 0), 1),
+ && aarch64_is_extend_from_extract (mode, XEXP (XEXP (op, 0), 1),
XEXP (op, 1)))
return XEXP (XEXP (op, 0), 0);
/* Return true if the RTX X in mode MODE is a zero or sign extract
usable in an ADD or SUB (extended register) instruction. */
static bool
-aarch64_rtx_arith_op_extract_p (rtx x, machine_mode mode)
+aarch64_rtx_arith_op_extract_p (rtx x, scalar_int_mode mode)
{
/* Catch add with a sign extract.
This is add_<optab><mode>_multp2. */
aarch64_extr_rtx_p (rtx x, rtx *res_op0, rtx *res_op1)
{
rtx op0, op1;
- machine_mode mode = GET_MODE (x);
+ scalar_int_mode mode;
+ if (!is_a <scalar_int_mode> (GET_MODE (x), &mode))
+ return false;
*res_op0 = NULL_RTX;
*res_op1 = NULL_RTX;
mode MODE. See the *andim_ashift<mode>_bfiz pattern. */
bool
-aarch64_mask_and_shift_for_ubfiz_p (machine_mode mode, rtx mask, rtx shft_amnt)
+aarch64_mask_and_shift_for_ubfiz_p (scalar_int_mode mode, rtx mask,
+ rtx shft_amnt)
{
return CONST_INT_P (mask) && CONST_INT_P (shft_amnt)
&& INTVAL (shft_amnt) < GET_MODE_BITSIZE (mode)
if ((GET_CODE (op1) == ZERO_EXTEND
|| GET_CODE (op1) == SIGN_EXTEND)
&& CONST_INT_P (XEXP (op0, 1))
- && (GET_MODE_BITSIZE (GET_MODE (XEXP (op1, 0)))
- >= INTVAL (XEXP (op0, 1))))
+ && is_a <scalar_int_mode> (GET_MODE (XEXP (op1, 0)), &int_mode)
+ && GET_MODE_BITSIZE (int_mode) >= INTVAL (XEXP (op0, 1)))
op1 = XEXP (op1, 0);
if (CONST_INT_P (op1))
proportionally expensive to the number of instructions
required to build that constant. This is true whether we
are compiling for SPEED or otherwise. */
+ if (!is_a <scalar_int_mode> (mode, &int_mode))
+ int_mode = word_mode;
*cost = COSTS_N_INSNS (aarch64_internal_mov_immediate
- (NULL_RTX, x, false, mode));
+ (NULL_RTX, x, false, int_mode));
}
return true;
bool succeed = aarch64_reinterpret_float_as_int (x, &ival);
gcc_assert (succeed);
- machine_mode imode = (mode == HFmode
- ? SImode
- : int_mode_for_mode (mode).require ());
+ scalar_int_mode imode = (mode == HFmode
+ ? SImode
+ : int_mode_for_mode (mode).require ());
int ncost = aarch64_internal_mov_immediate
(NULL_RTX, gen_int_mode (ival, imode), false, imode);
*cost += COSTS_N_INSNS (ncost);
}
/* Look for SUB (extended register). */
- if (aarch64_rtx_arith_op_extract_p (op1, mode))
+ if (is_a <scalar_int_mode> (mode, &int_mode)
+ && aarch64_rtx_arith_op_extract_p (op1, int_mode))
{
if (speed)
*cost += extra_cost->alu.extend_arith;
*cost += rtx_cost (op1, mode, PLUS, 1, speed);
/* Look for ADD (extended register). */
- if (aarch64_rtx_arith_op_extract_p (op0, mode))
+ if (is_a <scalar_int_mode> (mode, &int_mode)
+ && aarch64_rtx_arith_op_extract_p (op0, int_mode))
{
if (speed)
*cost += extra_cost->alu.extend_arith;
/* Check OP is a legal scalar immediate for the MOVI instruction. */
bool
-aarch64_simd_scalar_immediate_valid_for_move (rtx op, machine_mode mode)
+aarch64_simd_scalar_immediate_valid_for_move (rtx op, scalar_int_mode mode)
{
machine_mode vmode;
- gcc_assert (!VECTOR_MODE_P (mode));
- vmode = aarch64_preferred_simd_mode (as_a <scalar_mode> (mode));
+ vmode = aarch64_preferred_simd_mode (mode);
rtx op_v = aarch64_simd_gen_const_vector_dup (vmode, INTVAL (op));
return aarch64_simd_valid_immediate (op_v, vmode, false, NULL);
}
}
char*
-aarch64_output_scalar_simd_mov_immediate (rtx immediate, machine_mode mode)
+aarch64_output_scalar_simd_mov_immediate (rtx immediate, scalar_int_mode mode)
{
/* If a floating point number was passed and we desire to use it in an
a 128 bit vector mode. */
int width = GET_MODE_BITSIZE (mode) == 64 ? 128 : 64;
- gcc_assert (!VECTOR_MODE_P (mode));
vmode = aarch64_simd_container_mode (mode, width);
rtx v_op = aarch64_simd_gen_const_vector_dup (vmode, INTVAL (immediate));
return aarch64_output_simd_mov_immediate (v_op, vmode, width);