case UNSPEC_VRINTR:
case UNSPEC_VRINTX:
case UNSPEC_VRINTA:
- *cost = COSTS_N_INSNS (1);
if (speed_p)
*cost += extra_cost->fp[GET_MODE (x) == DFmode].roundint;
{
machine_mode mode = GET_MODE (x);
+ *cost = COSTS_N_INSNS (1);
+
if (TARGET_THUMB1)
{
if (speed_p)
bool is_ldm = load_multiple_operation (x, SImode);
bool is_stm = store_multiple_operation (x, SImode);
- *cost = COSTS_N_INSNS (1);
-
if (is_ldm || is_stm)
{
if (speed_p)
case UDIV:
if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT
&& (mode == SFmode || !TARGET_VFP_SINGLE))
- *cost = COSTS_N_INSNS (speed_p
- ? extra_cost->fp[mode != SFmode].div : 1);
+ *cost += COSTS_N_INSNS (speed_p
+ ? extra_cost->fp[mode != SFmode].div : 0);
else if (mode == SImode && TARGET_IDIV)
- *cost = COSTS_N_INSNS (speed_p ? extra_cost->mult[0].idiv : 1);
+ *cost += COSTS_N_INSNS (speed_p ? extra_cost->mult[0].idiv : 0);
else
*cost = LIBCALL_COST (2);
return false; /* All arguments must be in registers. */
case ROTATE:
if (mode == SImode && REG_P (XEXP (x, 1)))
{
- *cost = (COSTS_N_INSNS (2)
+ *cost += (COSTS_N_INSNS (1)
+ rtx_cost (XEXP (x, 0), mode, code, 0, speed_p));
if (speed_p)
*cost += extra_cost->alu.shift_reg;
case ASHIFTRT:
if (mode == DImode && CONST_INT_P (XEXP (x, 1)))
{
- *cost = (COSTS_N_INSNS (3)
+ *cost += (COSTS_N_INSNS (2)
+ rtx_cost (XEXP (x, 0), mode, code, 0, speed_p));
if (speed_p)
*cost += 2 * extra_cost->alu.shift;
}
else if (mode == SImode)
{
- *cost = (COSTS_N_INSNS (1)
- + rtx_cost (XEXP (x, 0), mode, code, 0, speed_p));
+ *cost += rtx_cost (XEXP (x, 0), mode, code, 0, speed_p);
/* Slightly disparage register shifts at -Os, but not by much. */
if (!CONST_INT_P (XEXP (x, 1)))
*cost += (speed_p ? extra_cost->alu.shift_reg : 1
{
if (code == ASHIFT)
{
- *cost = (COSTS_N_INSNS (1)
- + rtx_cost (XEXP (x, 0), mode, code, 0, speed_p));
+ *cost += rtx_cost (XEXP (x, 0), mode, code, 0, speed_p);
/* Slightly disparage register shifts at -Os, but not by
much. */
if (!CONST_INT_P (XEXP (x, 1)))
if (arm_arch_thumb2 && CONST_INT_P (XEXP (x, 1)))
{
/* Can use SBFX/UBFX. */
- *cost = COSTS_N_INSNS (1);
if (speed_p)
*cost += extra_cost->alu.bfx;
*cost += rtx_cost (XEXP (x, 0), mode, code, 0, speed_p);
}
else
{
- *cost = COSTS_N_INSNS (2);
+ *cost += COSTS_N_INSNS (1);
*cost += rtx_cost (XEXP (x, 0), mode, code, 0, speed_p);
if (speed_p)
{
}
else /* Rotates. */
{
- *cost = COSTS_N_INSNS (3 + !CONST_INT_P (XEXP (x, 1)));
+ *cost = COSTS_N_INSNS (2 + !CONST_INT_P (XEXP (x, 1)));
*cost += rtx_cost (XEXP (x, 0), mode, code, 0, speed_p);
if (speed_p)
{
{
if (mode == SImode)
{
- *cost = COSTS_N_INSNS (1);
if (speed_p)
*cost += extra_cost->alu.rev;
and thumb_legacy_rev for the form of RTL used then. */
if (TARGET_THUMB)
{
- *cost = COSTS_N_INSNS (10);
+ *cost += COSTS_N_INSNS (9);
if (speed_p)
{
}
else
{
- *cost = COSTS_N_INSNS (5);
+ *cost += COSTS_N_INSNS (4);
if (speed_p)
{
if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT
&& (mode == SFmode || !TARGET_VFP_SINGLE))
{
- *cost = COSTS_N_INSNS (1);
if (GET_CODE (XEXP (x, 0)) == MULT
|| GET_CODE (XEXP (x, 1)) == MULT)
{
rtx shift_op;
rtx non_shift_op;
- *cost = COSTS_N_INSNS (1);
-
shift_op = shifter_op_p (XEXP (x, 0), &shift_by_reg);
if (shift_op == NULL)
{
HANDLE_NARROW_SHIFT_ARITH (MINUS, 1)
/* Slightly disparage, as we might need to widen the result. */
- *cost = 1 + COSTS_N_INSNS (1);
+ *cost += 1;
if (speed_p)
*cost += extra_cost->alu.arith;
if (mode == DImode)
{
- *cost = COSTS_N_INSNS (2);
+ *cost += COSTS_N_INSNS (1);
if (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND)
{
if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT
&& (mode == SFmode || !TARGET_VFP_SINGLE))
{
- *cost = COSTS_N_INSNS (1);
if (GET_CODE (XEXP (x, 0)) == MULT)
{
rtx mul_op0, mul_op1, add_op;
/* Slightly penalize a narrow operation as the result may
need widening. */
- *cost = 1 + COSTS_N_INSNS (1);
+ *cost += 1;
if (speed_p)
*cost += extra_cost->alu.arith;
{
rtx shift_op, shift_reg;
- *cost = COSTS_N_INSNS (1);
if (TARGET_INT_SIMD
&& (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
|| GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
{
rtx mul_op = XEXP (x, 0);
- *cost = COSTS_N_INSNS (1);
-
if (TARGET_DSP_MULTIPLY
&& ((GET_CODE (XEXP (mul_op, 0)) == SIGN_EXTEND
&& (GET_CODE (XEXP (mul_op, 1)) == SIGN_EXTEND
|| (GET_CODE (XEXP (XEXP (x, 0), 0)) == SIGN_EXTEND
&& GET_CODE (XEXP (XEXP (x, 0), 1)) == SIGN_EXTEND)))
{
- *cost = COSTS_N_INSNS (1);
if (speed_p)
*cost += extra_cost->mult[1].extend_add;
*cost += (rtx_cost (XEXP (XEXP (XEXP (x, 0), 0), 0), mode,
return true;
}
- *cost = COSTS_N_INSNS (2);
+ *cost += COSTS_N_INSNS (1);
if (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
|| GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
case IOR:
if (mode == SImode && arm_arch6 && aarch_rev16_p (x))
{
- *cost = COSTS_N_INSNS (1);
if (speed_p)
*cost += extra_cost->alu.rev;
rtx op0 = XEXP (x, 0);
rtx shift_op, shift_reg;
- *cost = COSTS_N_INSNS (1);
-
if (subcode == NOT
&& (code == AND
|| (code == IOR && TARGET_THUMB2)))
rtx op0 = XEXP (x, 0);
enum rtx_code subcode = GET_CODE (op0);
- *cost = COSTS_N_INSNS (2);
+ *cost += COSTS_N_INSNS (1);
if (subcode == NOT
&& (code == AND
{
rtx op0 = XEXP (x, 0);
- *cost = COSTS_N_INSNS (1);
-
if (GET_CODE (op0) == NEG)
op0 = XEXP (op0, 0);
if (mode == SImode)
{
- *cost = COSTS_N_INSNS (1);
if (TARGET_DSP_MULTIPLY
&& ((GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
&& (GET_CODE (XEXP (x, 1)) == SIGN_EXTEND
|| (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
&& GET_CODE (XEXP (x, 1)) == SIGN_EXTEND)))
{
- *cost = COSTS_N_INSNS (1);
if (speed_p)
*cost += extra_cost->mult[1].extend;
*cost += (rtx_cost (XEXP (XEXP (x, 0), 0), VOIDmode,
if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT
&& (mode == SFmode || !TARGET_VFP_SINGLE))
{
- *cost = COSTS_N_INSNS (1);
if (speed_p)
*cost += extra_cost->fp[mode != SFmode].neg;
{
if (GET_CODE (XEXP (x, 0)) == ABS)
{
- *cost = COSTS_N_INSNS (2);
+ *cost += COSTS_N_INSNS (1);
/* Assume the non-flag-changing variant. */
if (speed_p)
*cost += (extra_cost->alu.log_shift
if (GET_RTX_CLASS (GET_CODE (XEXP (x, 0))) == RTX_COMPARE
|| GET_RTX_CLASS (GET_CODE (XEXP (x, 0))) == RTX_COMM_COMPARE)
{
- *cost = COSTS_N_INSNS (2);
+ *cost += COSTS_N_INSNS (1);
/* No extra cost for MOV imm and MVN imm. */
/* If the comparison op is using the flags, there's no further
cost, otherwise we need to add the cost of the comparison. */
}
return true;
}
- *cost = COSTS_N_INSNS (1);
+
if (speed_p)
*cost += extra_cost->alu.arith;
return false;
&& GET_MODE_SIZE (mode) < 4)
{
/* Slightly disparage, as we might need an extend operation. */
- *cost = 1 + COSTS_N_INSNS (1);
+ *cost += 1;
if (speed_p)
*cost += extra_cost->alu.arith;
return false;
if (mode == DImode)
{
- *cost = COSTS_N_INSNS (2);
+ *cost += COSTS_N_INSNS (1);
if (speed_p)
*cost += 2 * extra_cost->alu.arith;
return false;
rtx shift_op;
rtx shift_reg = NULL;
- *cost = COSTS_N_INSNS (1);
shift_op = shifter_op_p (XEXP (x, 0), &shift_reg);
if (shift_op)
}
if (mode == DImode)
{
- *cost = COSTS_N_INSNS (2);
+ *cost += COSTS_N_INSNS (1);
return false;
}
{
if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
{
- *cost = COSTS_N_INSNS (4);
+ *cost += COSTS_N_INSNS (3);
return true;
}
int op1cost = rtx_cost (XEXP (x, 1), mode, SET, 1, speed_p);
if (TARGET_HARD_FLOAT && GET_MODE_CLASS (op0mode) == MODE_FLOAT
&& (op0mode == SFmode || !TARGET_VFP_SINGLE))
{
- *cost = COSTS_N_INSNS (1);
if (speed_p)
*cost += extra_cost->fp[op0mode != SFmode].compare;
/* DImode compares normally take two insns. */
if (op0mode == DImode)
{
- *cost = COSTS_N_INSNS (2);
+ *cost += COSTS_N_INSNS (1);
if (speed_p)
*cost += 2 * extra_cost->alu.arith;
return false;
shift_op = shifter_op_p (XEXP (x, 0), &shift_reg);
if (shift_op != NULL)
{
- *cost = COSTS_N_INSNS (1);
if (shift_reg != NULL)
{
*cost += rtx_cost (shift_reg, op0mode, ASHIFT,
return true;
}
- *cost = COSTS_N_INSNS (1);
if (speed_p)
*cost += extra_cost->alu.arith;
if (CONST_INT_P (XEXP (x, 1))
&& XEXP (x, 1) == const0_rtx)
{
/* Thumb also needs an IT insn. */
- *cost = COSTS_N_INSNS (TARGET_THUMB ? 3 : 2);
+ *cost += COSTS_N_INSNS (TARGET_THUMB ? 2 : 1);
return true;
}
if (XEXP (x, 1) == const0_rtx)
{
case LT:
/* LSR Rd, Rn, #31. */
- *cost = COSTS_N_INSNS (1);
if (speed_p)
*cost += extra_cost->alu.shift;
break;
case NE:
/* SUBS T1, Rn, #1
SBC Rd, Rn, T1. */
- *cost = COSTS_N_INSNS (2);
+ *cost += COSTS_N_INSNS (1);
break;
case LE:
/* RSBS T1, Rn, Rn, LSR #31
ADC Rd, Rn, T1. */
- *cost = COSTS_N_INSNS (2);
+ *cost += COSTS_N_INSNS (1);
if (speed_p)
*cost += extra_cost->alu.arith_shift;
break;
case GT:
/* RSB Rd, Rn, Rn, ASR #1
LSR Rd, Rd, #31. */
- *cost = COSTS_N_INSNS (2);
+ *cost += COSTS_N_INSNS (1);
if (speed_p)
*cost += (extra_cost->alu.arith_shift
+ extra_cost->alu.shift);
case GE:
/* ASR Rd, Rn, #31
ADD Rd, Rn, #1. */
- *cost = COSTS_N_INSNS (2);
+ *cost += COSTS_N_INSNS (1);
if (speed_p)
*cost += extra_cost->alu.shift;
break;
}
else
{
- *cost = COSTS_N_INSNS (TARGET_THUMB ? 4 : 3);
+ *cost += COSTS_N_INSNS (TARGET_THUMB ? 3 : 2);
if (CONST_INT_P (XEXP (x, 1))
&& const_ok_for_op (INTVAL (XEXP (x, 1)), COMPARE))
{
if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT
&& (mode == SFmode || !TARGET_VFP_SINGLE))
{
- *cost = COSTS_N_INSNS (1);
if (speed_p)
*cost += extra_cost->fp[mode != SFmode].neg;
if (mode == SImode)
{
- *cost = COSTS_N_INSNS (1);
if (speed_p)
*cost += extra_cost->alu.log_shift + extra_cost->alu.arith_shift;
return false;
if (GET_MODE (XEXP (x, 0)) != SImode && arm_arch6)
{
/* We have SXTB/SXTH. */
- *cost = COSTS_N_INSNS (1);
*cost += rtx_cost (XEXP (x, 0), VOIDmode, code, 0, speed_p);
if (speed_p)
*cost += extra_cost->alu.extend;
else if (GET_MODE (XEXP (x, 0)) != SImode)
{
/* Needs two shifts. */
- *cost = COSTS_N_INSNS (2);
+ *cost += COSTS_N_INSNS (1);
*cost += rtx_cost (XEXP (x, 0), VOIDmode, code, 0, speed_p);
if (speed_p)
*cost += 2 * extra_cost->alu.shift;
optimizing for speed it should never be slower to use
AND, and we don't really model 16-bit vs 32-bit insns
here. */
- *cost = COSTS_N_INSNS (1);
if (speed_p)
*cost += extra_cost->alu.logical;
}
else if (GET_MODE (XEXP (x, 0)) != SImode && arm_arch6)
{
/* We have UXTB/UXTH. */
- *cost = COSTS_N_INSNS (1);
*cost += rtx_cost (XEXP (x, 0), VOIDmode, code, 0, speed_p);
if (speed_p)
*cost += extra_cost->alu.extend;
if (speed_p)
*cost += 2 * extra_cost->alu.shift;
}
- else /* GET_MODE (XEXP (x, 0)) == SImode. */
- *cost = COSTS_N_INSNS (1);
/* Widening beyond 32-bits requires one more insn. */
if (mode == DImode)
if (speed_p)
{
if (arm_arch_thumb2 && !flag_pic)
- *cost = COSTS_N_INSNS (2);
+ *cost += COSTS_N_INSNS (1);
else
- *cost = COSTS_N_INSNS (1) + extra_cost->ldst.load;
+ *cost += extra_cost->ldst.load;
}
else
- *cost = COSTS_N_INSNS (2);
+ *cost += COSTS_N_INSNS (1);
if (flag_pic)
{
{
if (vfp3_const_double_rtx (x))
{
- *cost = COSTS_N_INSNS (1);
if (speed_p)
*cost += extra_cost->fp[mode == DFmode].fpconst;
return true;
if (speed_p)
{
- *cost = COSTS_N_INSNS (1);
if (mode == DFmode)
*cost += extra_cost->ldst.loadd;
else
*cost += extra_cost->ldst.loadf;
}
else
- *cost = COSTS_N_INSNS (2 + (mode == DFmode));
+ *cost += COSTS_N_INSNS (1 + (mode == DFmode));
return true;
}
case HIGH:
case LO_SUM:
- *cost = COSTS_N_INSNS (1);
/* When optimizing for size, we prefer constant pool entries to
MOVW/MOVT pairs, so bump the cost of these slightly. */
if (!speed_p)
return true;
case CLZ:
- *cost = COSTS_N_INSNS (1);
if (speed_p)
*cost += extra_cost->alu.clz;
return false;
case SMIN:
if (XEXP (x, 1) == const0_rtx)
{
- *cost = COSTS_N_INSNS (1);
if (speed_p)
*cost += extra_cost->alu.log_shift;
*cost += rtx_cost (XEXP (x, 0), mode, code, 0, speed_p);
case SMAX:
case UMIN:
case UMAX:
- *cost = COSTS_N_INSNS (2);
+ *cost += COSTS_N_INSNS (1);
return false;
case TRUNCATE:
&& (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1))
== ZERO_EXTEND))))
{
- *cost = COSTS_N_INSNS (1);
if (speed_p)
*cost += extra_cost->mult[1].extend;
*cost += (rtx_cost (XEXP (XEXP (XEXP (x, 0), 0), 0), VOIDmode,
&& CONST_INT_P (XEXP (x, 1))
&& CONST_INT_P (XEXP (x, 2)))
{
- *cost = COSTS_N_INSNS (1);
if (speed_p)
*cost += extra_cost->alu.bfx;
*cost += rtx_cost (XEXP (x, 0), mode, code, 0, speed_p);
return true;
}
/* Without UBFX/SBFX, need to resort to shift operations. */
- *cost = COSTS_N_INSNS (2);
+ *cost += COSTS_N_INSNS (1);
if (speed_p)
*cost += 2 * extra_cost->alu.shift;
*cost += rtx_cost (XEXP (x, 0), mode, ASHIFT, 0, speed_p);
case FLOAT_EXTEND:
if (TARGET_HARD_FLOAT)
{
- *cost = COSTS_N_INSNS (1);
if (speed_p)
*cost += extra_cost->fp[mode == DFmode].widen;
if (!TARGET_FPU_ARMV8
case FLOAT_TRUNCATE:
if (TARGET_HARD_FLOAT)
{
- *cost = COSTS_N_INSNS (1);
if (speed_p)
*cost += extra_cost->fp[mode == DFmode].narrow;
*cost += rtx_cost (XEXP (x, 0), VOIDmode, code, 0, speed_p);
rtx op1 = XEXP (x, 1);
rtx op2 = XEXP (x, 2);
- *cost = COSTS_N_INSNS (1);
/* vfms or vfnma. */
if (GET_CODE (op0) == NEG)
{
if (GET_MODE_CLASS (mode) == MODE_INT)
{
- *cost = COSTS_N_INSNS (1);
mode = GET_MODE (XEXP (x, 0));
if (speed_p)
*cost += extra_cost->fp[mode == DFmode].toint;
else if (GET_MODE_CLASS (mode) == MODE_FLOAT
&& TARGET_FPU_ARMV8)
{
- *cost = COSTS_N_INSNS (1);
if (speed_p)
*cost += extra_cost->fp[mode == DFmode].roundint;
return false;
{
/* ??? Increase the cost to deal with transferring from CORE
-> FP registers? */
- *cost = COSTS_N_INSNS (1);
if (speed_p)
*cost += extra_cost->fp[mode == DFmode].fromint;
return false;
return false;
case CALL:
- *cost = COSTS_N_INSNS (1);
return true;
case ASM_OPERANDS: