(use (match_operand 4 "immediate_operand"))]
""
{
- rtx addr1, addr2, out, outlow, count, countreg, align;
+ rtx addr1, addr2, countreg, align, out;
if (optimize_insn_for_size_p () && !TARGET_INLINE_ALL_STRINGOPS)
FAIL;
&& TREE_CODE (TREE_OPERAND (TREE_OPERAND (t2, 0), 0)) == STRING_CST)))
FAIL;
- out = operands[0];
- if (!REG_P (out))
- out = gen_reg_rtx (SImode);
-
addr1 = copy_addr_to_reg (XEXP (operands[1], 0));
addr2 = copy_addr_to_reg (XEXP (operands[2], 0));
if (addr1 != XEXP (operands[1], 0))
if (addr2 != XEXP (operands[2], 0))
operands[2] = replace_equiv_address_nv (operands[2], addr2);
- count = operands[3];
- countreg = ix86_zero_extend_to_Pmode (count);
+ countreg = ix86_zero_extend_to_Pmode (operands[3]);
/* %%% Iff we are testing strict equality, we can use known alignment
to good advantage. This may be possible with combine, particularly
once cc0 is dead. */
align = operands[4];
- if (CONST_INT_P (count))
+ if (CONST_INT_P (operands[3]))
{
- if (INTVAL (count) == 0)
+ if (operands[3] == const0_rtx)
{
emit_move_insn (operands[0], const0_rtx);
DONE;
operands[1], operands[2]));
}
- outlow = gen_lowpart (QImode, out);
- emit_insn (gen_cmpintqi (outlow));
- emit_move_insn (out, gen_rtx_SIGN_EXTEND (SImode, outlow));
-
- if (operands[0] != out)
- emit_move_insn (operands[0], out);
+ out = gen_lowpart (QImode, operands[0]);
+ emit_insn (gen_cmpintqi (out));
+ emit_move_insn (operands[0], gen_rtx_SIGN_EXTEND (SImode, out));
DONE;
})
(match_operand:SI 2 "const_int_operand"))]
"TARGET_3DNOW || TARGET_PREFETCH_SSE || TARGET_PRFCHW || TARGET_PREFETCHWT1"
{
- bool write = INTVAL (operands[1]) != 0;
+ bool write = operands[1] != const0_rtx;
int locality = INTVAL (operands[2]);
gcc_assert (IN_RANGE (locality, 0, 3));
(const_int 3))]
"TARGET_3DNOW || TARGET_PRFCHW || TARGET_PREFETCHWT1"
{
- if (INTVAL (operands[1]) == 0)
+ if (operands[1] == const0_rtx)
return "prefetch\t%a0";
else
return "prefetchw\t%a0";