UNSPEC_LD1_GATHER))]
"TARGET_SVE"
{
- operands[5] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
+ operands[5] = aarch64_ptrue_reg (<VPRED>mode);
}
)
UNSPEC_ST1_SCATTER))]
"TARGET_SVE"
{
- operands[5] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
+ operands[5] = aarch64_ptrue_reg (<VPRED>mode);
}
)
{
if (MEM_P (operands[1]))
{
- rtx ptrue = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
+ rtx ptrue = aarch64_ptrue_reg (<VPRED>mode);
emit_insn (gen_sve_ld1r<mode> (operands[0], ptrue, operands[1],
CONST0_RTX (<MODE>mode)));
DONE;
UNSPEC_LDN))]
"TARGET_SVE"
{
- operands[2] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
+ operands[2] = aarch64_ptrue_reg (<VPRED>mode);
}
)
UNSPEC_STN))]
"TARGET_SVE"
{
- operands[2] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
+ operands[2] = aarch64_ptrue_reg (<VPRED>mode);
}
)
UNSPEC_MERGE_PTRUE))]
"TARGET_SVE"
{
- operands[3] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
+ operands[3] = aarch64_ptrue_reg (<VPRED>mode);
}
)
UNSPEC_MERGE_PTRUE))]
"TARGET_SVE"
{
- operands[3] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
+ operands[3] = aarch64_ptrue_reg (<VPRED>mode);
}
)
UNSPEC_MERGE_PTRUE))]
"TARGET_SVE"
{
- operands[3] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
+ operands[3] = aarch64_ptrue_reg (<VPRED>mode);
}
)
UNSPEC_MERGE_PTRUE))]
"TARGET_SVE"
{
- operands[2] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
+ operands[2] = aarch64_ptrue_reg (<VPRED>mode);
}
)
(match_dup 3)))]
"TARGET_SVE"
{
- operands[3] = force_reg (<MODE>mode, CONSTM1_RTX (<MODE>mode));
+ operands[3] = aarch64_ptrue_reg (<MODE>mode);
}
)
(match_dup 2)))]
"TARGET_SVE"
{
- operands[2] = force_reg (<MODE>mode, CONSTM1_RTX (<MODE>mode));
+ operands[2] = aarch64_ptrue_reg (<MODE>mode);
}
)
UNSPEC_MERGE_PTRUE))]
"TARGET_SVE"
{
- operands[3] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
+ operands[3] = aarch64_ptrue_reg (<VPRED>mode);
}
)
(pc)))]
""
{
- rtx ptrue = force_reg (<MODE>mode, CONSTM1_RTX (<MODE>mode));
+ rtx ptrue = aarch64_ptrue_reg (<MODE>mode);
rtx pred;
if (operands[2] == CONST0_RTX (<MODE>mode))
pred = operands[1];
UNSPEC_MERGE_PTRUE))]
"TARGET_SVE"
{
- operands[3] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
+ operands[3] = aarch64_ptrue_reg (<VPRED>mode);
}
)
UNSPEC_MERGE_PTRUE))]
"TARGET_SVE"
{
- operands[3] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
+ operands[3] = aarch64_ptrue_reg (<VPRED>mode);
}
)
UNSPEC_MERGE_PTRUE))]
"TARGET_SVE"
{
- operands[3] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
+ operands[3] = aarch64_ptrue_reg (<VPRED>mode);
}
)
UNSPEC_ADDV))]
"TARGET_SVE"
{
- operands[2] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
+ operands[2] = aarch64_ptrue_reg (<VPRED>mode);
}
)
UNSPEC_FADDV))]
"TARGET_SVE"
{
- operands[2] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
+ operands[2] = aarch64_ptrue_reg (<VPRED>mode);
}
)
MAXMINV))]
"TARGET_SVE"
{
- operands[2] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
+ operands[2] = aarch64_ptrue_reg (<VPRED>mode);
}
)
FMAXMINV))]
"TARGET_SVE"
{
- operands[2] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
+ operands[2] = aarch64_ptrue_reg (<VPRED>mode);
}
)
BITWISEV))]
"TARGET_SVE"
{
- operands[2] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
+ operands[2] = aarch64_ptrue_reg (<VPRED>mode);
}
)
UNSPEC_FADDA))]
"TARGET_SVE"
{
- operands[3] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
+ operands[3] = aarch64_ptrue_reg (<VPRED>mode);
}
)
UNSPEC_MERGE_PTRUE))]
"TARGET_SVE"
{
- operands[3] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
+ operands[3] = aarch64_ptrue_reg (<VPRED>mode);
}
)
UNSPEC_MERGE_PTRUE))]
"TARGET_SVE"
{
- operands[3] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
+ operands[3] = aarch64_ptrue_reg (<VPRED>mode);
}
)
UNSPEC_MERGE_PTRUE))]
"TARGET_SVE"
{
- operands[3] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
+ operands[3] = aarch64_ptrue_reg (<VPRED>mode);
}
)
UNSPEC_MERGE_PTRUE))]
"TARGET_SVE"
{
- operands[4] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
+ operands[4] = aarch64_ptrue_reg (<VPRED>mode);
}
)
UNSPEC_MERGE_PTRUE))]
"TARGET_SVE"
{
- operands[4] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
+ operands[4] = aarch64_ptrue_reg (<VPRED>mode);
}
)
UNSPEC_MERGE_PTRUE))]
"TARGET_SVE"
{
- operands[4] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
+ operands[4] = aarch64_ptrue_reg (<VPRED>mode);
}
)
UNSPEC_MERGE_PTRUE))]
"TARGET_SVE"
{
- operands[4] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
+ operands[4] = aarch64_ptrue_reg (<VPRED>mode);
}
)
UNSPEC_MERGE_PTRUE))]
"TARGET_SVE"
{
- operands[3] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
+ operands[3] = aarch64_ptrue_reg (<VPRED>mode);
}
)
UNSPEC_MERGE_PTRUE))]
"TARGET_SVE"
{
- operands[2] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
+ operands[2] = aarch64_ptrue_reg (<VPRED>mode);
}
)
UNSPEC_MERGE_PTRUE))]
"TARGET_SVE"
{
- operands[2] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
+ operands[2] = aarch64_ptrue_reg (<VPRED>mode);
}
)
UNSPEC_MERGE_PTRUE))]
"TARGET_SVE"
{
- operands[2] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
+ operands[2] = aarch64_ptrue_reg (<VPRED>mode);
}
)
UNSPEC_MERGE_PTRUE))]
"TARGET_SVE"
{
- operands[2] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
+ operands[2] = aarch64_ptrue_reg (<VPRED>mode);
}
)
? gen_aarch64_sve_zip2<mode>
: gen_aarch64_sve_zip1<mode>)
(temp, operands[1], operands[1]));
- rtx ptrue = force_reg (<VWIDE_PRED>mode, CONSTM1_RTX (<VWIDE_PRED>mode));
+ rtx ptrue = aarch64_ptrue_reg (<VWIDE_PRED>mode);
emit_insn (gen_aarch64_sve_extend<mode><Vwide>2 (operands[0],
ptrue, temp));
DONE;
? gen_aarch64_sve_zip2vnx4si
: gen_aarch64_sve_zip1vnx4si)
(temp, operands[1], operands[1]));
- rtx ptrue = force_reg (VNx2BImode, CONSTM1_RTX (VNx2BImode));
+ rtx ptrue = aarch64_ptrue_reg (VNx2BImode);
emit_insn (gen_aarch64_sve_<FLOATUORS:optab>vnx4sivnx2df2 (operands[0],
ptrue, temp));
DONE;
(unspec:SVE_HSF [(match_dup 4) (match_dup 5)] UNSPEC_UZP1))]
"TARGET_SVE"
{
- operands[3] = force_reg (<VWIDE_PRED>mode, CONSTM1_RTX (<VWIDE_PRED>mode));
+ operands[3] = aarch64_ptrue_reg (<VWIDE_PRED>mode);
operands[4] = gen_reg_rtx (<MODE>mode);
operands[5] = gen_reg_rtx (<MODE>mode);
}
(unspec:VNx4SI [(match_dup 4) (match_dup 5)] UNSPEC_UZP1))]
"TARGET_SVE"
{
- operands[3] = force_reg (VNx2BImode, CONSTM1_RTX (VNx2BImode));
+ operands[3] = aarch64_ptrue_reg (VNx2BImode);
operands[4] = gen_reg_rtx (VNx4SImode);
operands[5] = gen_reg_rtx (VNx4SImode);
}
(match_operand:SVE_I 2 "register_operand"))]
"TARGET_SVE"
{
- rtx pred = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
+ rtx pred = aarch64_ptrue_reg (<VPRED>mode);
emit_insn (gen_aarch64_<su>abd<mode>_3 (operands[0], pred, operands[1],
operands[2]));
DONE;
}
}
+/* Return an all-true predicate register of mode MODE. */
+
+rtx
+aarch64_ptrue_reg (machine_mode mode)
+{
+ gcc_assert (GET_MODE_CLASS (mode) == MODE_VECTOR_BOOL);
+ return force_reg (mode, CONSTM1_RTX (mode));
+}
+
/* Return true if we can move VALUE into a register using a single
CNT[BHWD] instruction. */
machine_mode mode = GET_MODE (dest);
unsigned int elem_bytes = GET_MODE_UNIT_SIZE (mode);
machine_mode pred_mode = aarch64_sve_pred_mode (elem_bytes).require ();
- rtx ptrue = force_reg (pred_mode, CONSTM1_RTX (pred_mode));
+ rtx ptrue = aarch64_ptrue_reg (pred_mode);
src = gen_rtx_UNSPEC (mode, gen_rtvec (2, ptrue, src), UNSPEC_LD1RQ);
emit_insn (gen_rtx_SET (dest, src));
return true;
aarch64_expand_sve_mem_move (rtx dest, rtx src, machine_mode pred_mode)
{
machine_mode mode = GET_MODE (dest);
- rtx ptrue = force_reg (pred_mode, CONSTM1_RTX (pred_mode));
+ rtx ptrue = aarch64_ptrue_reg (pred_mode);
if (!register_operand (src, mode)
&& !register_operand (dest, mode))
{
return false;
/* Generate *aarch64_sve_mov<mode>_subreg_be. */
- rtx ptrue = force_reg (VNx16BImode, CONSTM1_RTX (VNx16BImode));
+ rtx ptrue = aarch64_ptrue_reg (VNx16BImode);
rtx unspec = gen_rtx_UNSPEC (GET_MODE (dest), gen_rtvec (2, ptrue, src),
UNSPEC_REV_SUBREG);
emit_insn (gen_rtx_SET (dest, unspec));
rtx src = gen_rtx_UNSPEC (d->vmode, gen_rtvec (1, d->op0), unspec);
if (d->vec_flags == VEC_SVE_DATA)
{
- rtx pred = force_reg (pred_mode, CONSTM1_RTX (pred_mode));
+ rtx pred = aarch64_ptrue_reg (pred_mode);
src = gen_rtx_UNSPEC (d->vmode, gen_rtvec (2, pred, src),
UNSPEC_MERGE_PTRUE);
}
if (!aarch64_sve_cmp_operand_p (code, op1))
op1 = force_reg (data_mode, op1);
- rtx ptrue = force_reg (pred_mode, CONSTM1_RTX (pred_mode));
+ rtx ptrue = aarch64_ptrue_reg (pred_mode);
rtx cond = gen_rtx_fmt_ee (code, pred_mode, op0, op1);
aarch64_emit_sve_ptrue_op_cc (target, ptrue, cond);
}
machine_mode pred_mode = GET_MODE (target);
machine_mode data_mode = GET_MODE (op0);
- rtx ptrue = force_reg (pred_mode, CONSTM1_RTX (pred_mode));
+ rtx ptrue = aarch64_ptrue_reg (pred_mode);
switch (code)
{
case UNORDERED: