enum tls_model tls_kind = TLS_MODEL_NONE;
poly_int64 offset;
addr = strip_offset_and_salt (addr, &offset);
- if (GET_CODE (addr) == SYMBOL_REF)
+ if (SYMBOL_REF_P (addr))
tls_kind = SYMBOL_REF_TLS_MODEL (addr);
return tls_kind;
/* The operand is expected to be MEM. Whenever the related insn
pattern changed, above code which calculate mem should be
updated. */
- gcc_assert (GET_CODE (mem) == MEM);
+ gcc_assert (MEM_P (mem));
MEM_READONLY_P (mem) = 1;
MEM_NOTRAP_P (mem) = 1;
emit_insn (insn);
mem = XVECEXP (XEXP (SET_SRC (insn), 0), 0, 0);
}
- gcc_assert (GET_CODE (mem) == MEM);
+ gcc_assert (MEM_P (mem));
MEM_READONLY_P (mem) = 1;
MEM_NOTRAP_P (mem) = 1;
emit_insn (insn);
bool
aarch64_mov128_immediate (rtx imm)
{
- if (GET_CODE (imm) == CONST_INT)
+ if (CONST_INT_P (imm))
return true;
gcc_assert (CONST_WIDE_INT_NUNITS (imm) == 2);
/* Check on what type of symbol it is. */
scalar_int_mode int_mode;
- if ((GET_CODE (imm) == SYMBOL_REF
- || GET_CODE (imm) == LABEL_REF
+ if ((SYMBOL_REF_P (imm)
+ || LABEL_REF_P (imm)
|| GET_CODE (imm) == CONST
|| GET_CODE (imm) == CONST_POLY_INT)
&& is_a <scalar_int_mode> (mode, &int_mode))
aarch64_maybe_expand_sve_subreg_move (rtx dest, rtx src)
{
gcc_assert (BYTES_BIG_ENDIAN);
- if (GET_CODE (dest) == SUBREG)
+ if (SUBREG_P (dest))
dest = SUBREG_REG (dest);
- if (GET_CODE (src) == SUBREG)
+ if (SUBREG_P (src))
src = SUBREG_REG (src);
/* The optimization handles two single SVE REGs with different element
FOR_EACH_SUBRTX (iter, array, x, ALL)
{
const_rtx x = *iter;
- if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0)
+ if (SYMBOL_REF_P (x) && SYMBOL_REF_TLS_MODEL (x) != 0)
return true;
/* Don't recurse into UNSPEC_TLS looking for TLS symbols; these are
TLS offsets, not real symbol references. */
poly_int64 offset;
rtx base = strip_offset_and_salt (x, &offset);
- if (GET_CODE (base) == SYMBOL_REF || GET_CODE (base) == LABEL_REF)
+ if (SYMBOL_REF_P (base) || LABEL_REF_P (base))
{
/* We checked for POLY_INT_CST offsets above. */
if (aarch64_classify_symbol (base, offset.to_constant ())
aarch64_base_register_rtx_p (rtx x, bool strict_p)
{
if (!strict_p
- && GET_CODE (x) == SUBREG
+ && SUBREG_P (x)
&& contains_reg_of_mode[GENERAL_REGS][GET_MODE (SUBREG_REG (x))])
x = SUBREG_REG (x);
int shift;
/* (reg:P) */
- if ((REG_P (x) || GET_CODE (x) == SUBREG)
+ if ((REG_P (x) || SUBREG_P (x))
&& GET_MODE (x) == Pmode)
{
type = ADDRESS_REG_REG;
return false;
if (!strict_p
- && GET_CODE (index) == SUBREG
+ && SUBREG_P (index)
&& contains_reg_of_mode[GENERAL_REGS][GET_MODE (SUBREG_REG (index))])
index = SUBREG_REG (index);
{
poly_int64 offset;
rtx sym = strip_offset_and_salt (x, &offset);
- return ((GET_CODE (sym) == LABEL_REF
- || (GET_CODE (sym) == SYMBOL_REF
+ return ((LABEL_REF_P (sym)
+ || (SYMBOL_REF_P (sym)
&& CONSTANT_POOL_ADDRESS_P (sym)
&& aarch64_pcrelative_literal_loads)));
}
poly_int64 offset;
HOST_WIDE_INT const_offset;
rtx sym = strip_offset_and_salt (info->offset, &offset);
- if (GET_CODE (sym) == SYMBOL_REF
+ if (SYMBOL_REF_P (sym)
&& offset.is_constant (&const_offset)
&& (aarch64_classify_symbol (sym, const_offset)
== SYMBOL_SMALL_ABSOLUTE))
{
poly_int64 offset;
x = strip_offset_and_salt (x, &offset);
- return GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF;
+ return SYMBOL_REF_P (x) || LABEL_REF_P (x);
}
/* Classify the base of symbolic expression X. */
}
scalar_float_mode mode;
- if (GET_CODE (value) != CONST_DOUBLE
+ if (!CONST_DOUBLE_P (value)
|| !is_a <scalar_float_mode> (GET_MODE (value), &mode)
|| GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT
/* Only support up to DF mode. */
mov/movk pairs over ldr/adrp pairs. */
unsigned HOST_WIDE_INT ival;
- if (GET_CODE (x) == CONST_DOUBLE
+ if (CONST_DOUBLE_P (x)
&& SCALAR_FLOAT_MODE_P (mode)
&& aarch64_reinterpret_float_as_int (x, &ival))
{
scalar_int_mode imode;
unsigned HOST_WIDE_INT ival;
- if (GET_CODE (x) == CONST_DOUBLE
+ if (CONST_DOUBLE_P (x)
&& SCALAR_FLOAT_MODE_P (mode))
{
if (!aarch64_reinterpret_float_as_int (x, &ival))
imode = int_mode_for_mode (mode).require ();
}
- else if (GET_CODE (x) == CONST_INT
+ else if (CONST_INT_P (x)
&& is_a <scalar_int_mode> (mode, &imode))
ival = INTVAL (x);
else
the comparison will have to be swapped when we emit the assembly
code. */
if ((mode_x == SImode || mode_x == DImode)
- && (REG_P (y) || GET_CODE (y) == SUBREG || y == const0_rtx)
+ && (REG_P (y) || SUBREG_P (y) || y == const0_rtx)
&& (code_x == ASHIFT || code_x == ASHIFTRT
|| code_x == LSHIFTRT
|| code_x == ZERO_EXTEND || code_x == SIGN_EXTEND))
/* Similarly for a negated operand, but we can only do this for
equalities. */
if ((mode_x == SImode || mode_x == DImode)
- && (REG_P (y) || GET_CODE (y) == SUBREG)
+ && (REG_P (y) || SUBREG_P (y))
&& (code == EQ || code == NE)
&& code_x == NEG)
return CC_Zmode;
{
machine_mode mode = GET_MODE (x);
- if (GET_CODE (x) != MEM
+ if (!MEM_P (x)
|| (code == 'y' && maybe_ne (GET_MODE_SIZE (mode), 16)))
{
output_operand_lossage ("invalid operand for '%%%c'", code);
const char *fmt;
int i;
- if (GET_CODE (x) == LABEL_REF)
+ if (LABEL_REF_P (x))
return true;
/* UNSPEC_TLS entries for a symbol include a LABEL_REF for the
/* If we have to disable direct literal pool loads and stores because the
function is too big, then we need a scratch register. */
- if (MEM_P (x) && GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x)
+ if (MEM_P (x) && SYMBOL_REF_P (x) && CONSTANT_POOL_ADDRESS_P (x)
&& (SCALAR_FLOAT_MODE_P (GET_MODE (x))
|| targetm.vector_mode_supported_p (GET_MODE (x)))
&& !aarch64_pcrelative_literal_loads)
rtx lhs = XEXP (x, 0);
/* Look through a possible SUBREG introduced by ILP32. */
- if (GET_CODE (lhs) == SUBREG)
+ if (SUBREG_P (lhs))
lhs = SUBREG_REG (lhs);
gcc_assert (REG_P (lhs));
if (!aarch64_classify_address (&info, x, mode, false))
{
- if (GET_CODE (x) == CONST || GET_CODE (x) == SYMBOL_REF)
+ if (GET_CODE (x) == CONST || SYMBOL_REF_P (x))
{
/* This is a CONST or SYMBOL ref which will be split
in a different way depending on the code model in use.
return false;
x = strip_salt (x);
- if (GET_CODE (x) != SYMBOL_REF)
+ if (!SYMBOL_REF_P (x))
return false;
return SYMBOL_REF_TLS_MODEL (x) != 0;
{
x = strip_salt (x);
- if (GET_CODE (x) == LABEL_REF)
+ if (LABEL_REF_P (x))
{
switch (aarch64_cmodel)
{
}
}
- if (GET_CODE (x) == SYMBOL_REF)
+ if (SYMBOL_REF_P (x))
{
if (aarch64_tls_symbol_p (x))
return aarch64_classify_tls_symbol (x);
{
poly_int64 offset;
x = strip_offset_and_salt (x, &offset);
- if (GET_CODE (x) == SYMBOL_REF)
+ if (SYMBOL_REF_P (x))
return false;
return true;
return true;
/* Label references are always constant. */
- if (GET_CODE (x) == LABEL_REF)
+ if (LABEL_REF_P (x))
return true;
return false;
REAL_VALUE_TYPE r;
if (!const_vec_duplicate_p (x, &elt)
- || GET_CODE (elt) != CONST_DOUBLE)
+ || !CONST_DOUBLE_P (elt))
return false;
r = *CONST_DOUBLE_REAL_VALUE (elt);
rtx elt;
return (const_vec_duplicate_p (x, &elt)
- && GET_CODE (elt) == CONST_DOUBLE
+ && CONST_DOUBLE_P (elt)
&& (real_equal (CONST_DOUBLE_REAL_VALUE (elt), &dconsthalf)
|| real_equal (CONST_DOUBLE_REAL_VALUE (elt), &dconst2)));
}
}
x = strip_salt (x);
- if (GET_CODE (x) == SYMBOL_REF && mode == DImode && CONSTANT_ADDRESS_P (x))
+ if (SYMBOL_REF_P (x) && mode == DImode && CONSTANT_ADDRESS_P (x))
return true;
if (TARGET_SVE && aarch64_sve_cnt_immediate_p (x))
{
fusion = SCHED_FUSION_LD_SIGN_EXTEND;
src = XEXP (src, 0);
- if (GET_CODE (src) != MEM || GET_MODE (src) != SImode)
+ if (!MEM_P (src) || GET_MODE (src) != SImode)
return SCHED_FUSION_NONE;
}
else if (GET_CODE (src) == ZERO_EXTEND)
{
fusion = SCHED_FUSION_LD_ZERO_EXTEND;
src = XEXP (src, 0);
- if (GET_CODE (src) != MEM || GET_MODE (src) != SImode)
+ if (!MEM_P (src) || GET_MODE (src) != SImode)
return SCHED_FUSION_NONE;
}
- if (GET_CODE (src) == MEM && REG_P (dest))
+ if (MEM_P (src) && REG_P (dest))
extract_base_offset_in_addr (src, base, offset);
- else if (GET_CODE (dest) == MEM && (REG_P (src) || src == const0_rtx))
+ else if (MEM_P (dest) && (REG_P (src) || src == const0_rtx))
{
fusion = SCHED_FUSION_ST;
extract_base_offset_in_addr (dest, base, offset);