/ SYMBOL_FLAG_MACH_DEP)
#define TINY_ADIW(REG1, REG2, I) \
- "subi " #REG1 ",lo8(-(" #I "))" CR_TAB \
- "sbci " #REG2 ",hi8(-(" #I "))"
+ "subi " #REG1 ",lo8(-(" #I "))" CR_TAB \
+ "sbci " #REG2 ",hi8(-(" #I "))"
#define TINY_SBIW(REG1, REG2, I) \
- "subi " #REG1 ",lo8((" #I "))" CR_TAB \
- "sbci " #REG2 ",hi8((" #I "))"
+ "subi " #REG1 ",lo8((" #I "))" CR_TAB \
+ "sbci " #REG2 ",hi8((" #I "))"
#define AVR_TMP_REGNO (AVR_TINY ? TMP_REGNO_TINY : TMP_REGNO)
#define AVR_ZERO_REGNO (AVR_TINY ? ZERO_REGNO_TINY : ZERO_REGNO)
for (avr_current_device = avr_mcu_types; ; avr_current_device++)
{
if (!avr_current_device->name)
- fatal_error ("mcu not found");
+ fatal_error ("mcu not found");
if (!avr_current_device->macro
- && avr_current_device->arch == avr_arch_index)
- break;
+ && avr_current_device->arch == avr_arch_index)
+ break;
}
avr_current_arch = &avr_arch_types[avr_arch_index];
/* TINY core does not have regs r10-r16, but avr-dimode.md expects them
to be present */
if (AVR_TINY)
- avr_have_dimode = false;
+ avr_have_dimode = false;
}
return "";
}
-/*
-AVRTC-579
-if operand is symbol or constant expression with value > 0xbf
- return false, otherwise true
-This check is used to avoid lds/sts instruction with invalid memory
-access range (valid range 0x40..0xbf). For io operand range 0x0..0x3f,
-in/out instruction will be generated.
-*/
-bool tiny_valid_direct_memory_access_range(rtx op, enum machine_mode mode)
+
+/* AVRTC-579
+ If OP is a symbol or a constant expression with value > 0xbf
+ return FALSE, otherwise TRUE.
+ This check is used to avoid LDS / STS instruction with invalid memory
+ access range (valid range 0x40..0xbf). For I/O operand range 0x0..0x3f,
+ IN / OUT instruction will be generated. */
+
+bool
+tiny_valid_direct_memory_access_range (rtx op, enum machine_mode mode)
{
rtx x;
if (!AVR_TINY)
return true;
- x = XEXP(op,0);
+ x = XEXP (op,0);
- if (MEM_P(op) && x && (GET_CODE(x) == SYMBOL_REF))
- {
- return false;
- }
- if (MEM_P(op) && x && (CONSTANT_ADDRESS_P (x)) &&
- !(IN_RANGE (INTVAL (x), 0, 0xC0 - GET_MODE_SIZE (mode))))
- {
- return false;
- }
+ if (MEM_P (op) && x && GET_CODE (x) == SYMBOL_REF)
+ {
+ return false;
+ }
+
+ if (MEM_P (op) && x && (CONSTANT_ADDRESS_P (x))
+ && !(IN_RANGE (INTVAL (x), 0, 0xC0 - GET_MODE_SIZE (mode))))
+ {
+ return false;
+ }
return true;
}
return "";
}
+
/* Same as out_movqi_r_mr, but TINY does not have ADIW or SBIW */
+
static const char*
avr_out_movqi_r_mr_reg_disp_tiny (rtx_insn *insn, rtx op[], int *plen)
{
rtx x = XEXP (src, 0);
avr_asm_len (TINY_ADIW (%I1, %J1, %o1) CR_TAB
- "ld %0,%b1" , op, plen, -3);
+ "ld %0,%b1" , op, plen, -3);
if (!reg_overlap_mentioned_p (dest, XEXP (x,0))
- && !reg_unused_after (insn, XEXP (x,0)))
- avr_asm_len (TINY_SBIW (%I1, %J1, %o1), op, plen, 2);
+ && !reg_unused_after (insn, XEXP (x,0)))
+ avr_asm_len (TINY_SBIW (%I1, %J1, %o1), op, plen, 2);
return "";
}
return avr_asm_len ("ld %0,%1", op, plen, -1);
}
+
/* Same as movhi_r_mr, but TINY does not have ADIW, SBIW and LDD */
+
static const char*
avr_out_movhi_r_mr_reg_no_disp_tiny (rtx op[], int *plen)
{
int reg_base = true_regnum (base);
if (reg_dest == reg_base) /* R = (R) */
- return avr_asm_len ("ld __tmp_reg__,%1+" CR_TAB
- "ld %B0,%1" CR_TAB
- "mov %A0,__tmp_reg__", op, plen, -3);
-
- return avr_asm_len ("ld %A0,%1" CR_TAB
- TINY_ADIW (%E1, %F1, 1) CR_TAB
- "ld %B0,%1" CR_TAB
- TINY_SBIW (%E1, %F1, 1), op, plen, -6);
+ return avr_asm_len ("ld __tmp_reg__,%1+" CR_TAB
+ "ld %B0,%1" CR_TAB
+ "mov %A0,__tmp_reg__", op, plen, -3);
+ return avr_asm_len ("ld %A0,%1" CR_TAB
+ TINY_ADIW (%E1, %F1, 1) CR_TAB
+ "ld %B0,%1" CR_TAB
+ TINY_SBIW (%E1, %F1, 1), op, plen, -6);
}
+
/* Same as movhi_r_mr, but TINY does not have ADIW, SBIW and LDD */
+
static const char*
avr_out_movhi_r_mr_reg_disp_tiny (rtx op[], int *plen)
{
int reg_base = true_regnum (XEXP (base, 0));
if (reg_base == reg_dest)
- {
+ {
return avr_asm_len (TINY_ADIW (%I1, %J1, %o1) CR_TAB
- "ld __tmp_reg__,%b1+" CR_TAB
- "ld %B0,%b1" CR_TAB
- "mov %A0,__tmp_reg__", op, plen, -5);
- }
+ "ld __tmp_reg__,%b1+" CR_TAB
+ "ld %B0,%b1" CR_TAB
+ "mov %A0,__tmp_reg__", op, plen, -5);
+ }
else
- {
+ {
return avr_asm_len (TINY_ADIW (%I1, %J1, %o1) CR_TAB
- "ld %A0,%b1+" CR_TAB
- "ld %B0,%b1" CR_TAB
- TINY_SBIW (%I1, %J1, %o1+1), op, plen, -6);
- }
-}
+ "ld %A0,%b1+" CR_TAB
+ "ld %B0,%b1" CR_TAB
+ TINY_SBIW (%I1, %J1, %o1+1), op, plen, -6);
+ }
+}
+
/* Same as movhi_r_mr, but TINY does not have ADIW, SBIW and LDD */
+
static const char*
avr_out_movhi_r_mr_pre_dec_tiny (rtx_insn *insn, rtx op[], int *plen)
{
mem_volatile_p = MEM_VOLATILE_P (src);
if (reg_overlap_mentioned_p (dest, XEXP (base, 0)))
- fatal_insn ("incorrect insn:", insn);
+ fatal_insn ("incorrect insn:", insn);
if (!mem_volatile_p)
- return avr_asm_len ("ld %B0,%1" CR_TAB
- "ld %A0,%1", op, plen, -2);
+ return avr_asm_len ("ld %B0,%1" CR_TAB
+ "ld %A0,%1", op, plen, -2);
return avr_asm_len (TINY_SBIW (%I1, %J1, 2) CR_TAB
- "ld %A0,%p1+" CR_TAB
- "ld %B0,%p1" CR_TAB
- TINY_SBIW (%I1, %J1, 1), op, plen, -6);
+ "ld %A0,%p1+" CR_TAB
+ "ld %B0,%p1" CR_TAB
+ TINY_SBIW (%I1, %J1, 1), op, plen, -6);
}
+
static const char*
out_movhi_r_mr (rtx_insn *insn, rtx op[], int *plen)
{
else if (GET_CODE (base) == PRE_DEC) /* (--R) */
{
if (AVR_TINY)
- return avr_out_movhi_r_mr_pre_dec_tiny (insn, op, plen);
+ return avr_out_movhi_r_mr_pre_dec_tiny (insn, op, plen);
if (reg_overlap_mentioned_p (dest, XEXP (base, 0)))
fatal_insn ("incorrect insn:", insn);
if (reg_dest == reg_base)
{
- /* "ld r26,-X" is undefined */
- return *l=9, (TINY_ADIW (%E1, %F1, 3) CR_TAB
- "ld %D0,%1" CR_TAB
- "ld %C0,-%1" CR_TAB
- "ld __tmp_reg__,-%1" CR_TAB
- TINY_SBIW (%E1, %F1, 1) CR_TAB
- "ld %A0,%1" CR_TAB
- "mov %B0,__tmp_reg__");
+ /* "ld r26,-X" is undefined */
+ return *l = 9, (TINY_ADIW (%E1, %F1, 3) CR_TAB
+ "ld %D0,%1" CR_TAB
+ "ld %C0,-%1" CR_TAB
+ "ld __tmp_reg__,-%1" CR_TAB
+ TINY_SBIW (%E1, %F1, 1) CR_TAB
+ "ld %A0,%1" CR_TAB
+ "mov %B0,__tmp_reg__");
}
else if (reg_dest == reg_base - 2)
{
- return *l=5, ("ld %A0,%1+" CR_TAB
- "ld %B0,%1+" CR_TAB
- "ld __tmp_reg__,%1+" CR_TAB
- "ld %D0,%1" CR_TAB
- "mov %C0,__tmp_reg__");
+ return *l = 5, ("ld %A0,%1+" CR_TAB
+ "ld %B0,%1+" CR_TAB
+ "ld __tmp_reg__,%1+" CR_TAB
+ "ld %D0,%1" CR_TAB
+ "mov %C0,__tmp_reg__");
}
else if (reg_unused_after (insn, base))
{
- return *l=4, ("ld %A0,%1+" CR_TAB
- "ld %B0,%1+" CR_TAB
- "ld %C0,%1+" CR_TAB
- "ld %D0,%1");
+ return *l = 4, ("ld %A0,%1+" CR_TAB
+ "ld %B0,%1+" CR_TAB
+ "ld %C0,%1+" CR_TAB
+ "ld %D0,%1");
}
else
{
- return *l=6, ("ld %A0,%1+" CR_TAB
- "ld %B0,%1+" CR_TAB
- "ld %C0,%1+" CR_TAB
- "ld %D0,%1" CR_TAB
- TINY_SBIW (%E1, %F1, 3));
+ return *l = 6, ("ld %A0,%1+" CR_TAB
+ "ld %B0,%1+" CR_TAB
+ "ld %C0,%1+" CR_TAB
+ "ld %D0,%1" CR_TAB
+ TINY_SBIW (%E1, %F1, 3));
}
}
+
static const char*
avr_out_movsi_r_mr_reg_disp_tiny (rtx_insn *insn, rtx op[], int *l)
{
if (reg_dest == reg_base)
{
- /* "ld r26,-X" is undefined */
- return *l=9, (TINY_ADIW (%I1, %J1, %o1+3) CR_TAB
- "ld %D0,%b1" CR_TAB
- "ld %C0,-%b1" CR_TAB
- "ld __tmp_reg__,-%b1" CR_TAB
- TINY_SBIW (%I1, %J1, 1) CR_TAB
- "ld %A0,%b1" CR_TAB
- "mov %B0,__tmp_reg__");
+ /* "ld r26,-X" is undefined */
+ return *l = 9, (TINY_ADIW (%I1, %J1, %o1+3) CR_TAB
+ "ld %D0,%b1" CR_TAB
+ "ld %C0,-%b1" CR_TAB
+ "ld __tmp_reg__,-%b1" CR_TAB
+ TINY_SBIW (%I1, %J1, 1) CR_TAB
+ "ld %A0,%b1" CR_TAB
+ "mov %B0,__tmp_reg__");
}
else if (reg_dest == reg_base - 2)
{
- return *l=7, (TINY_ADIW (%I1, %J1, %o1) CR_TAB
- "ld %A0,%b1+" CR_TAB
- "ld %B0,%b1+" CR_TAB
- "ld __tmp_reg__,%b1+" CR_TAB
- "ld %D0,%b1" CR_TAB
- "mov %C0,__tmp_reg__");
+ return *l = 7, (TINY_ADIW (%I1, %J1, %o1) CR_TAB
+ "ld %A0,%b1+" CR_TAB
+ "ld %B0,%b1+" CR_TAB
+ "ld __tmp_reg__,%b1+" CR_TAB
+ "ld %D0,%b1" CR_TAB
+ "mov %C0,__tmp_reg__");
}
else if (reg_unused_after (insn, XEXP (base, 0)))
{
- return *l=6, (TINY_ADIW (%I1, %J1, %o1) CR_TAB
- "ld %A0,%b1+" CR_TAB
- "ld %B0,%b1+" CR_TAB
- "ld %C0,%b1+" CR_TAB
- "ld %D0,%b1");
+ return *l = 6, (TINY_ADIW (%I1, %J1, %o1) CR_TAB
+ "ld %A0,%b1+" CR_TAB
+ "ld %B0,%b1+" CR_TAB
+ "ld %C0,%b1+" CR_TAB
+ "ld %D0,%b1");
}
else
{
- return *l=8, (TINY_ADIW (%I1, %J1, %o1) CR_TAB
- "ld %A0,%b1+" CR_TAB
- "ld %B0,%b1+" CR_TAB
- "ld %C0,%b1+" CR_TAB
- "ld %D0,%b1" CR_TAB
- TINY_SBIW (%I1, %J1, %o1+3));
+ return *l = 8, (TINY_ADIW (%I1, %J1, %o1) CR_TAB
+ "ld %A0,%b1+" CR_TAB
+ "ld %B0,%b1+" CR_TAB
+ "ld %C0,%b1+" CR_TAB
+ "ld %D0,%b1" CR_TAB
+ TINY_SBIW (%I1, %J1, %o1+3));
}
}
rtx base = XEXP (dest, 0);
int reg_base = true_regnum (base);
int reg_src = true_regnum (src);
-
+
if (reg_base == reg_src)
{
/* "ld r26,-X" is undefined */
if (reg_unused_after (insn, base))
- {
- return *l=7, ("mov __tmp_reg__, %B1" CR_TAB
- "st %0,%A1" CR_TAB
- TINY_ADIW (%E0, %F0, 1) CR_TAB
- "st %0+,__tmp_reg__" CR_TAB
- "st %0+,%C1" CR_TAB
- "st %0+,%D1");
+ {
+ return *l = 7, ("mov __tmp_reg__, %B1" CR_TAB
+ "st %0,%A1" CR_TAB
+ TINY_ADIW (%E0, %F0, 1) CR_TAB
+ "st %0+,__tmp_reg__" CR_TAB
+ "st %0+,%C1" CR_TAB
+ "st %0+,%D1");
}
else
{
- return *l=9, ("mov __tmp_reg__, %B1" CR_TAB
- "st %0,%A1" CR_TAB
- TINY_ADIW (%E0, %F0, 1) CR_TAB
- "st %0+,__tmp_reg__" CR_TAB
- "st %0+,%C1" CR_TAB
- "st %0+,%D1" CR_TAB
- TINY_SBIW (%E0, %F0, 3));
+ return *l = 9, ("mov __tmp_reg__, %B1" CR_TAB
+ "st %0,%A1" CR_TAB
+ TINY_ADIW (%E0, %F0, 1) CR_TAB
+ "st %0+,__tmp_reg__" CR_TAB
+ "st %0+,%C1" CR_TAB
+ "st %0+,%D1" CR_TAB
+ TINY_SBIW (%E0, %F0, 3));
}
}
- else if (reg_base == reg_src + 2)
- {
- if (reg_unused_after (insn, base))
- return *l=7, ("mov __zero_reg__,%C1" CR_TAB
+ else if (reg_base == reg_src + 2)
+ {
+ if (reg_unused_after (insn, base))
+ return *l = 7, ("mov __zero_reg__,%C1" CR_TAB
"mov __tmp_reg__,%D1" CR_TAB
"st %0+,%A1" CR_TAB
"st %0+,%B1" CR_TAB
"st %0+,__zero_reg__" CR_TAB
"st %0,__tmp_reg__" CR_TAB
"clr __zero_reg__");
- else
- return *l=9, ("mov __zero_reg__,%C1" CR_TAB
- "mov __tmp_reg__,%D1" CR_TAB
- "st %0+,%A1" CR_TAB
- "st %0+,%B1" CR_TAB
- "st %0+,__zero_reg__" CR_TAB
- "st %0,__tmp_reg__" CR_TAB
- "clr __zero_reg__" CR_TAB
- TINY_SBIW (%E0, %F0, 3));
- }
+ else
+ return *l = 9, ("mov __zero_reg__,%C1" CR_TAB
+ "mov __tmp_reg__,%D1" CR_TAB
+ "st %0+,%A1" CR_TAB
+ "st %0+,%B1" CR_TAB
+ "st %0+,__zero_reg__" CR_TAB
+ "st %0,__tmp_reg__" CR_TAB
+ "clr __zero_reg__" CR_TAB
+ TINY_SBIW (%E0, %F0, 3));
+ }
- return *l=6, ("st %0+,%A1" CR_TAB
- "st %0+,%B1" CR_TAB
- "st %0+,%C1" CR_TAB
- "st %0,%D1" CR_TAB
- TINY_SBIW (%E0, %F0, 3));
+ return *l = 6, ("st %0+,%A1" CR_TAB
+ "st %0+,%B1" CR_TAB
+ "st %0+,%C1" CR_TAB
+ "st %0,%D1" CR_TAB
+ TINY_SBIW (%E0, %F0, 3));
}
static const char*
if (reg_base == reg_src)
{
- *l = 11;
- return ("mov __tmp_reg__,%A2" CR_TAB
- "mov __zero_reg__,%B2" CR_TAB
+ *l = 11;
+ return ("mov __tmp_reg__,%A2" CR_TAB
+ "mov __zero_reg__,%B2" CR_TAB
TINY_ADIW (%I0, %J0, %o0) CR_TAB
- "st %b0+,__tmp_reg__" CR_TAB
- "st %b0+,__zero_reg__" CR_TAB
- "st %b0+,%C2" CR_TAB
- "st %b0,%D2" CR_TAB
- "clr __zero_reg__" CR_TAB
- TINY_SBIW (%I0, %J0, %o0+3));
- }
+ "st %b0+,__tmp_reg__" CR_TAB
+ "st %b0+,__zero_reg__" CR_TAB
+ "st %b0+,%C2" CR_TAB
+ "st %b0,%D2" CR_TAB
+ "clr __zero_reg__" CR_TAB
+ TINY_SBIW (%I0, %J0, %o0+3));
+ }
else if (reg_src == reg_base - 2)
{
- *l = 11;
- return ("mov __tmp_reg__,%C2" CR_TAB
- "mov __zero_reg__,%D2" CR_TAB
- TINY_ADIW (%I0, %J0, %o0) CR_TAB
- "st %b0+,%A0" CR_TAB
- "st %b0+,%B0" CR_TAB
- "st %b0+,__tmp_reg__" CR_TAB
- "st %b0,__zero_reg__" CR_TAB
- "clr __zero_reg__" CR_TAB
- TINY_SBIW (%I0, %J0, %o0+3));
- }
+ *l = 11;
+ return ("mov __tmp_reg__,%C2" CR_TAB
+ "mov __zero_reg__,%D2" CR_TAB
+ TINY_ADIW (%I0, %J0, %o0) CR_TAB
+ "st %b0+,%A0" CR_TAB
+ "st %b0+,%B0" CR_TAB
+ "st %b0+,__tmp_reg__" CR_TAB
+ "st %b0,__zero_reg__" CR_TAB
+ "clr __zero_reg__" CR_TAB
+ TINY_SBIW (%I0, %J0, %o0+3));
+ }
*l = 8;
return (TINY_ADIW (%I0, %J0, %o0) CR_TAB
- "st %b0+,%A1" CR_TAB
- "st %b0+,%B1" CR_TAB
- "st %b0+,%C1" CR_TAB
- "st %b0,%D1" CR_TAB
- TINY_SBIW (%I0, %J0, %o0+3));
+ "st %b0+,%A1" CR_TAB
+ "st %b0+,%B1" CR_TAB
+ "st %b0+,%C1" CR_TAB
+ "st %b0,%D1" CR_TAB
+ TINY_SBIW (%I0, %J0, %o0+3));
}
static const char*
{
return avr_asm_len (TINY_ADIW (%E1, %F1, 2) CR_TAB
"ld %C0,%1" CR_TAB
- "ld __tmp_reg__,-%1" CR_TAB
+ "ld __tmp_reg__,-%1" CR_TAB
TINY_SBIW (%E1, %F1, 1) CR_TAB
- "ld %A0,%1" CR_TAB
+ "ld %A0,%1" CR_TAB
"mov %B0,__tmp_reg__", op, plen, -8);
}
else
{
avr_asm_len (TINY_ADIW (%I1, %J1, %o1) CR_TAB
"ld %A0,%b1+" CR_TAB
- "ld %B0,%b1+" CR_TAB
+ "ld %B0,%b1+" CR_TAB
"ld %C0,%b1", op, plen, -5);
if (reg_dest != (reg_base - 2)
&& !reg_unused_after (insn, XEXP (base, 0)))
avr_asm_len (TINY_SBIW (%I1, %J1, %o1+2), op, plen, 2);
-
+
return "";
}
}
}
else if (reg_src == reg_base - 2)
{
- return avr_asm_len ("mov __tmp_reg__,%C1" CR_TAB
+ return avr_asm_len ("mov __tmp_reg__,%C1" CR_TAB
TINY_ADIW (%I0, %J0, %o0) CR_TAB
"st %b0+,%A1" CR_TAB
"st %b0+,%B1" CR_TAB
"st %b0,__zero_reg__" CR_TAB
"st -%b0,__tmp_reg__" CR_TAB
"clr __zero_reg__" CR_TAB
- TINY_SBIW (%I0, %J0, %o0), op, plen, -9)
+ TINY_SBIW (%I0, %J0, %o0), op, plen, -9)
: avr_asm_len (TINY_ADIW (%I0, %J0, %o0+1) CR_TAB
"st %b0,%B1" CR_TAB
else
{
if (MINUS == code && !test_hard_reg_class (LD_REGS, op[0]))
- avr_asm_len ("sec" CR_TAB "sbc %0,%0", op, plen, 2);
+ avr_asm_len ("sec" CR_TAB
+ "sbc %0,%0", op, plen, 2);
else
avr_asm_len (PLUS == code ? "sbc %0,%0" : "ldi %0,0xff",
op, plen, 1);
gcc_unreachable();
/* If we need to round the fraction part, we might need to save/round it
- before clobbering any of it in Step 1. Also, we might to want to do
+ before clobbering any of it in Step 1. Also, we might want to do
the rounding now to make use of LD_REGS. */
if (SCALAR_INT_MODE_P (GET_MODE (xop[0]))
&& SCALAR_ACCUM_MODE_P (GET_MODE (xop[1]))
&& !TARGET_FRACT_CONV_TRUNC)
{
bool overlap
- = (src.regno <=
- (offset ? dest.regno_msb - sign_bytes : dest.regno + zero_bytes - 1)
- && dest.regno - offset -1 >= dest.regno);
+ = (src.regno <=
+ (offset ? dest.regno_msb - sign_bytes : dest.regno + zero_bytes - 1)
+ && dest.regno - offset -1 >= dest.regno);
unsigned s0 = dest.regno - offset -1;
bool use_src = true;
unsigned sn;
bool have_carry = false;
if (src.ibyte > dest.ibyte)
- copied_msb -= src.ibyte - dest.ibyte;
+ copied_msb -= src.ibyte - dest.ibyte;
for (sn = s0; sn <= copied_msb; sn++)
- if (!IN_RANGE (sn, dest.regno, dest.regno_msb)
- && !reg_unused_after (insn, all_regs_rtx[sn]))
- use_src = false;
+ if (!IN_RANGE (sn, dest.regno, dest.regno_msb)
+ && !reg_unused_after (insn, all_regs_rtx[sn]))
+ use_src = false;
if (use_src && TEST_HARD_REG_BIT (reg_class_contents[LD_REGS], s0))
- {
- avr_asm_len ("tst %0" CR_TAB "brpl 0f",
- &all_regs_rtx[src.regno_msb], plen, 2);
- sn = src.regno;
- if (sn < s0)
- {
- if (TEST_HARD_REG_BIT (reg_class_contents[LD_REGS], sn))
- avr_asm_len ("cpi %0,1", &all_regs_rtx[sn], plen, 1);
- else
- avr_asm_len ("sec" CR_TAB "cpc %0,__zero_reg__",
- &all_regs_rtx[sn], plen, 2);
- have_carry = true;
- }
- while (++sn < s0)
- avr_asm_len ("cpc %0,__zero_reg__", &all_regs_rtx[sn], plen, 1);
- avr_asm_len (have_carry ? "sbci %0,128" : "subi %0,129",
- &all_regs_rtx[s0], plen, 1);
- for (sn = src.regno + src.fbyte; sn <= copied_msb; sn++)
- avr_asm_len ("sbci %0,255", &all_regs_rtx[sn], plen, 1);
- avr_asm_len ("\n0:", NULL, plen, 0);
- frac_rounded = true;
- }
+ {
+ avr_asm_len ("tst %0" CR_TAB "brpl 0f",
+ &all_regs_rtx[src.regno_msb], plen, 2);
+ sn = src.regno;
+ if (sn < s0)
+ {
+ if (TEST_HARD_REG_BIT (reg_class_contents[LD_REGS], sn))
+ avr_asm_len ("cpi %0,1", &all_regs_rtx[sn], plen, 1);
+ else
+ avr_asm_len ("sec" CR_TAB
+ "cpc %0,__zero_reg__",
+ &all_regs_rtx[sn], plen, 2);
+ have_carry = true;
+ }
+ while (++sn < s0)
+ avr_asm_len ("cpc %0,__zero_reg__", &all_regs_rtx[sn], plen, 1);
+
+ avr_asm_len (have_carry ? "sbci %0,128" : "subi %0,129",
+ &all_regs_rtx[s0], plen, 1);
+ for (sn = src.regno + src.fbyte; sn <= copied_msb; sn++)
+ avr_asm_len ("sbci %0,255", &all_regs_rtx[sn], plen, 1);
+ avr_asm_len ("\n0:", NULL, plen, 0);
+ frac_rounded = true;
+ }
else if (use_src && overlap)
- {
- avr_asm_len ("clr __tmp_reg__" CR_TAB
- "sbrc %1,0" CR_TAB "dec __tmp_reg__", xop, plen, 1);
- sn = src.regno;
- if (sn < s0)
- {
- avr_asm_len ("add %0,__tmp_reg__", &all_regs_rtx[sn], plen, 1);
- have_carry = true;
- }
- while (++sn < s0)
- avr_asm_len ("adc %0,__tmp_reg__", &all_regs_rtx[sn], plen, 1);
- if (have_carry)
- avr_asm_len ("clt" CR_TAB "bld __tmp_reg__,7" CR_TAB
- "adc %0,__tmp_reg__",
- &all_regs_rtx[s0], plen, 1);
- else
- avr_asm_len ("lsr __tmp_reg" CR_TAB "add %0,__tmp_reg__",
- &all_regs_rtx[s0], plen, 2);
- for (sn = src.regno + src.fbyte; sn <= copied_msb; sn++)
- avr_asm_len ("adc %0,__zero_reg__", &all_regs_rtx[sn], plen, 1);
- frac_rounded = true;
- }
+ {
+ avr_asm_len ("clr __tmp_reg__" CR_TAB
+ "sbrc %1,0" CR_TAB
+ "dec __tmp_reg__", xop, plen, 1);
+ sn = src.regno;
+ if (sn < s0)
+ {
+ avr_asm_len ("add %0,__tmp_reg__", &all_regs_rtx[sn], plen, 1);
+ have_carry = true;
+ }
+
+ while (++sn < s0)
+ avr_asm_len ("adc %0,__tmp_reg__", &all_regs_rtx[sn], plen, 1);
+
+ if (have_carry)
+ avr_asm_len ("clt" CR_TAB
+ "bld __tmp_reg__,7" CR_TAB
+ "adc %0,__tmp_reg__",
+ &all_regs_rtx[s0], plen, 1);
+ else
+ avr_asm_len ("lsr __tmp_reg" CR_TAB
+ "add %0,__tmp_reg__",
+ &all_regs_rtx[s0], plen, 2);
+ for (sn = src.regno + src.fbyte; sn <= copied_msb; sn++)
+ avr_asm_len ("adc %0,__zero_reg__", &all_regs_rtx[sn], plen, 1);
+ frac_rounded = true;
+ }
else if (overlap)
- {
- bool use_src
- = (TEST_HARD_REG_BIT (reg_class_contents[LD_REGS], s0)
- && (IN_RANGE (s0, dest.regno, dest.regno_msb)
- || reg_unused_after (insn, all_regs_rtx[s0])));
- xop[2] = all_regs_rtx[s0];
- unsigned sn = src.regno;
- if (!use_src || sn == s0)
- avr_asm_len ("mov __tmp_reg__,%2", xop, plen, 1);
- /* We need to consider to-be-discarded bits
- if the value is negative. */
- if (sn < s0)
- {
- avr_asm_len ("tst %0" CR_TAB "brpl 0f",
- &all_regs_rtx[src.regno_msb], plen, 2);
- /* Test to-be-discarded bytes for any nozero bits.
- ??? Could use OR or SBIW to test two registers at once. */
- if (sn < s0)
- avr_asm_len ("cp %0,__zero_reg__", &all_regs_rtx[sn], plen, 1);
- while (++sn < s0)
- avr_asm_len ("cpc %0,__zero_reg__", &all_regs_rtx[sn], plen, 1);
- /* Set bit 0 in __tmp_reg__ if any of the lower bits was set. */
- if (use_src)
- avr_asm_len ("breq 0f" CR_TAB
- "ori %2,1" "\n0:\t" "mov __tmp_reg__,%2",
- xop, plen, 3);
- else
- avr_asm_len ("breq 0f" CR_TAB
- "set" CR_TAB "bld __tmp_reg__,0\n0:",
- xop, plen, 3);
- }
- lsb_in_tmp_reg = true;
- }
+ {
+ bool use_src
+ = (TEST_HARD_REG_BIT (reg_class_contents[LD_REGS], s0)
+ && (IN_RANGE (s0, dest.regno, dest.regno_msb)
+ || reg_unused_after (insn, all_regs_rtx[s0])));
+ xop[2] = all_regs_rtx[s0];
+ unsigned sn = src.regno;
+ if (!use_src || sn == s0)
+ avr_asm_len ("mov __tmp_reg__,%2", xop, plen, 1);
+ /* We need to consider to-be-discarded bits
+ if the value is negative. */
+ if (sn < s0)
+ {
+ avr_asm_len ("tst %0" CR_TAB
+ "brpl 0f",
+ &all_regs_rtx[src.regno_msb], plen, 2);
+ /* Test to-be-discarded bytes for any nozero bits.
+ ??? Could use OR or SBIW to test two registers at once. */
+ if (sn < s0)
+ avr_asm_len ("cp %0,__zero_reg__", &all_regs_rtx[sn], plen, 1);
+
+ while (++sn < s0)
+ avr_asm_len ("cpc %0,__zero_reg__", &all_regs_rtx[sn], plen, 1);
+ /* Set bit 0 in __tmp_reg__ if any of the lower bits was set. */
+ if (use_src)
+ avr_asm_len ("breq 0f" CR_TAB
+ "ori %2,1"
+ "\n0:\t" "mov __tmp_reg__,%2",
+ xop, plen, 3);
+ else
+ avr_asm_len ("breq 0f" CR_TAB
+ "set" CR_TAB
+ "bld __tmp_reg__,0\n0:",
+ xop, plen, 3);
+ }
+ lsb_in_tmp_reg = true;
+ }
}
/* Step 1: Clear bytes at the low end and copy payload bits from source
{
/* We are going to override the sign bit. If we sign-extend,
store the sign in the Carry flag. This is not needed if
- the destination will be ASHIFT is the remainder because
+ the destination will be ASHIFT in the remainder because
the ASHIFT will set Carry without extra instruction. */
avr_asm_len ("lsl %0", &all_regs_rtx[src.regno_msb], plen, 1);
avr_asm_len ("cpc __zero_reg__,%0", &all_regs_rtx[sn++], plen, 1);
/* Overflow goes with set carry. Clear carry otherwise. */
- avr_asm_len ("brvs 0f" CR_TAB "clc\n0:", NULL, plen, 2);
+ avr_asm_len ("brvs 0f" CR_TAB
+ "clc\n0:", NULL, plen, 2);
}
/* Likewise, when converting from accumulator types to integer, we
need to round up negative values. */
/* Fall back to use __zero_reg__ as a temporary. */
avr_asm_len ("dec __zero_reg__", NULL, plen, 1);
if (have_carry)
- avr_asm_len ("clt" CR_TAB "bld __zero_reg__,7", NULL, plen, 2);
+ avr_asm_len ("clt" CR_TAB
+ "bld __zero_reg__,7", NULL, plen, 2);
else
avr_asm_len ("lsr __zero_reg__", NULL, plen, 1);
- avr_asm_len ((have_carry && lsb_in_tmp_reg
- ? "adc __tmp_reg__,__zero_reg__"
- : have_carry ? "adc %2,__zero_reg__"
- : lsb_in_tmp_reg ? "add __tmp_reg__,__zero_reg__"
- : "add %2,__zero_reg__"),
+ avr_asm_len (have_carry && lsb_in_tmp_reg
+ ? "adc __tmp_reg__,__zero_reg__"
+ : have_carry ? "adc %2,__zero_reg__"
+ : lsb_in_tmp_reg ? "add __tmp_reg__,__zero_reg__"
+ : "add %2,__zero_reg__",
xop, plen, 1);
avr_asm_len ("eor __zero_reg__,__zero_reg__", NULL, plen, 1);
}
- for (d0 = dest.regno + zero_bytes;
+
+ for (d0 = dest.regno + zero_bytes;
d0 <= dest.regno_msb - sign_bytes; d0++)
avr_asm_len ("adc %0,__zero_reg__", &all_regs_rtx[d0], plen, 1);
- avr_asm_len (lsb_in_tmp_reg
- ? "\n0:\t" "lsl __tmp_reg__" : "\n0:\t" "lsl %2",
+
+ avr_asm_len (lsb_in_tmp_reg
+ ? "\n0:\t" "lsl __tmp_reg__"
+ : "\n0:\t" "lsl %2",
xop, plen, 1);
}
else if (MAY_CLOBBER (s0))
gcc_assert (size <= MAX_SIZE);
/* Generate list of subreg moves. */
for (i = 0; i < size; i++)
- {
+ {
int from = i;
int to = (from + offset) % size;
move[i].src = simplify_gen_subreg (move_mode, operands[1],
- mode, from * move_size);
+ mode, from * move_size);
move[i].dst = simplify_gen_subreg (move_mode, operands[0],
- mode, to * move_size);
- move[i].links = -1;
- }
+ mode, to * move_size);
+ move[i].links = -1;
+ }
/* Mark dependence where a dst of one move is the src of another move.
The first move is a conflict as it must wait until second is
performed. We ignore moves to self - we catch this later. */
the length need not/must not be adjusted for these insns.
It is easier to state this in an insn attribute "adjust_len" than
to clutter up code here... */
-
+
if (JUMP_TABLE_DATA_P (insn) || recog_memoized (insn) == -1)
{
return len;
else
addr_attr = lookup_attribute ("address", attr);
if (io_low_attr
- || (io_attr && addr_attr &&
- low_io_address_operand (GEN_INT (TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (addr_attr)))), QImode)))
+ || (io_attr && addr_attr
+ && low_io_address_operand
+ (GEN_INT (TREE_INT_CST_LOW
+ (TREE_VALUE (TREE_VALUE (addr_attr)))), QImode)))
SYMBOL_REF_FLAGS (sym) |= SYMBOL_FLAG_IO_LOW;
if (io_attr || io_low_attr)
SYMBOL_REF_FLAGS (sym) |= SYMBOL_FLAG_IO;
32, 33, 34, 35
};
- /*
- Select specific register allocation order. Tiny Core (attiny4/5/9/10/20/40)
- devices has only 16 registers, so different allocation order should be used
- */
- const int *order = (TARGET_ORDER_1 ? (AVR_TINY ? tiny_order_1 : order_1) :
- TARGET_ORDER_2 ? (AVR_TINY ? tiny_order_0 : order_2) :
- (AVR_TINY ? tiny_order_0 : order_0));
+ /* Select specific register allocation order.
+ Tiny Core (ATtiny4/5/9/10/20/40) devices have only 16 registers,
+ so different allocation order should be used. */
+
+ const int *order = (TARGET_ORDER_1 ? (AVR_TINY ? tiny_order_1 : order_1)
+ : TARGET_ORDER_2 ? (AVR_TINY ? tiny_order_0 : order_2)
+ : (AVR_TINY ? tiny_order_0 : order_0));
for (i = 0; i < ARRAY_SIZE (order_0); ++i)
reg_alloc_order[i] = order[i];
}
static void
-avr_conditional_register_usage(void) {
+avr_conditional_register_usage(void)
+{
+ if (AVR_TINY)
+ {
+ unsigned int i;
- if (AVR_TINY) {
- unsigned int i;
+ const int tiny_reg_alloc_order[] = {
+ 24, 25,
+ 22, 23,
+ 30, 31,
+ 26, 27,
+ 28, 29,
+ 21, 20, 19, 18,
+ 16, 17,
+ 32, 33, 34, 35,
+ 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
+ };
- const int tiny_reg_alloc_order[] = {
- 24, 25,
- 22, 23,
- 30, 31,
- 26, 27,
- 28, 29,
- 21, 20, 19, 18,
- 16, 17,
- 32, 33, 34, 35,
- 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
- };
+ /* Set R0-R17 as fixed registers. Reset R0-R17 in call used register list
+ - R0-R15 are not available in Tiny Core devices
+ - R16 and R17 are fixed registers. */
- /* Set R0-R17 as fixed registers. Reset R0-R17 in call used register list
- - R0-R15 are not available in Tiny Core devices
- - R16 and R17 are fixed registers
- */
- for (i = 0; i <= 17; i++) {
- fixed_regs[i] = 1;
- call_used_regs[i] = 1;
- }
+ for (i = 0; i <= 17; i++)
+ {
+ fixed_regs[i] = 1;
+ call_used_regs[i] = 1;
+ }
- /* Set R18 to R21 as callee saved registers
- - R18, R19, R20 and R21 are the callee saved registers in Tiny Core devices
- */
- for (i = 18; i <= LAST_CALLEE_SAVED_REG; i++) {
- call_used_regs[i] = 0;
- }
+ /* Set R18 to R21 as callee saved registers
+ - R18, R19, R20 and R21 are the callee saved registers in
+ Tiny Core devices */
- /*update register allocation order for Tiny Core devices */
- for (i=0; i < ARRAY_SIZE (tiny_reg_alloc_order); i++) {
- reg_alloc_order[i] = tiny_reg_alloc_order[i];
- }
+ for (i = 18; i <= LAST_CALLEE_SAVED_REG; i++)
+ {
+ call_used_regs[i] = 0;
+ }
- CLEAR_HARD_REG_SET(reg_class_contents[(int)ADDW_REGS]);
- CLEAR_HARD_REG_SET(reg_class_contents[(int)NO_LD_REGS]);
- }
+ /* Update register allocation order for Tiny Core devices */
+
+ for (i = 0; i < ARRAY_SIZE (tiny_reg_alloc_order); i++)
+ {
+ reg_alloc_order[i] = tiny_reg_alloc_order[i];
+ }
+
+ CLEAR_HARD_REG_SET (reg_class_contents[(int) ADDW_REGS]);
+ CLEAR_HARD_REG_SET (reg_class_contents[(int) NO_LD_REGS]);
+ }
}
/* Implement `TARGET_HARD_REGNO_SCRATCH_OK'. */
avr_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
{
HOST_WIDE_INT size = int_size_in_bytes (type);
- HOST_WIDE_INT ret_size_limit = AVR_TINY ? 4 : 8;
+ HOST_WIDE_INT ret_size_limit = AVR_TINY ? 4 : 8;
- /* In avr, there are 8 return registers. But, for Tiny Core
- (attiny4/5/9/10/20/40) devices, only 4 registers available.
- Return true if size is unknown or greater than the limit */
- if ((size == -1) || (size > ret_size_limit))
- {
- return true;
- }
+ /* In avr, there are 8 return registers. But, for Tiny Core
+ (ATtiny4/5/9/10/20/40) devices, only 4 registers are available.
+ Return true if size is unknown or greater than the limit. */
+
+ if (size == -1 || size > ret_size_limit)
+ {
+ return true;
+ }
else
- {
- return false;
- }
+ {
+ return false;
+ }
}
{
addr_space_t as_old = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (expr)));
addr_space_t as_new = TYPE_ADDR_SPACE (TREE_TYPE (type));
-
+
if (avr_log.progmem)
avr_edump ("%?: type = %t\nexpr = %t\n\n", type, expr);
#define ITYP(T) \
lang_hooks.types.type_for_size (TYPE_PRECISION (T), TYPE_UNSIGNED (T))
-
+
#define FX_FTYPE_FX(fx) \
tree fx##r_ftype_##fx##r \
= build_function_type_list (node_##fx##r, node_##fx##r, NULL); \
tree fx##k_ftype_##fx##k_int \
= build_function_type_list (node_##fx##k, node_##fx##k, \
integer_type_node, NULL)
-
+
#define INT_FTYPE_FX(fx) \
tree int_ftype_##fx##r \
= build_function_type_list (integer_type_node, node_##fx##r, NULL); \
/* Warn about odd rounding. Rounding points >= FBIT will have
no effect. */
-
+
if (TREE_CODE (CALL_EXPR_ARG (exp, 1)) != INTEGER_CST)
break;
])
(define_constants
- [ (TMP_REGNO_TINY 16) ; r16 is temp register for AVR_TINY
- (ZERO_REGNO_TINY 17) ; r17 is zero register for AVR_TINY
+ [(TMP_REGNO_TINY 16) ; r16 is temp register for AVR_TINY
+ (ZERO_REGNO_TINY 17) ; r17 is zero register for AVR_TINY
])
(define_c_enum "unspec"
DONE;
}
- /* AVRTC-579
- if the source operand expression is out of range for 'lds' instruction
- copy source operand expression to register
- For tiny core, LDS instruction's memory access range limited to 0x40..0xbf
- */
- if (!tiny_valid_direct_memory_access_range(src,<MODE>mode))
+ // AVRTC-579
+ // If the source operand expression is out of range for LDS instruction
+ // copy source operand expression to register.
+ // For tiny core, LDS instruction's memory access range limited to 0x40..0xbf.
+
+ if (!tiny_valid_direct_memory_access_range (src, <MODE>mode))
{
- rtx srcx = XEXP(src,0);
- operands[1] = src = replace_equiv_address (src,copy_to_mode_reg (GET_MODE(srcx),srcx));
- emit_move_insn(dest,src);
+ rtx srcx = XEXP (src, 0);
+ operands[1] = src = replace_equiv_address (src, copy_to_mode_reg (GET_MODE (srcx), srcx));
+ emit_move_insn (dest, src);
DONE;
}
- /* AVRTC-579
- if the destination operand expression is out of range for 'sts' instruction
- copy destination operand expression to register
- For tiny core, STS instruction's memory access range limited to 0x40..0xbf
- */
- if (!tiny_valid_direct_memory_access_range(dest,<MODE>mode))
- {
- rtx destx = XEXP(dest,0);
- operands[0] = dest = replace_equiv_address (dest,copy_to_mode_reg (GET_MODE(destx),destx));
- emit_move_insn(dest,src);
- DONE;
- }
+ // AVRTC-579
+ // If the destination operand expression is out of range for STS instruction
+ // copy destination operand expression to register.
+ // For tiny core, STS instruction's memory access range limited to 0x40..0xbf.
+ if (!tiny_valid_direct_memory_access_range (dest, <MODE>mode))
+ {
+ rtx destx = XEXP (dest, 0);
+ operands[0] = dest = replace_equiv_address (dest, copy_to_mode_reg (GET_MODE (destx), destx));
+ emit_move_insn (dest, src);
+ DONE;
+ }
})
;;========================================================================
[(set (match_operand:ALL1 0 "nonimmediate_operand" "=r ,d ,Qm ,r ,q,r,*r")
(match_operand:ALL1 1 "nox_general_operand" "r Y00,n Ynn,r Y00,Qm,r,q,i"))]
"(register_operand (operands[0], <MODE>mode)
- || reg_or_0_operand (operands[1], <MODE>mode)) &&
- /* skip if operands are out of lds/sts memory access range(0x40..0xbf)
- though access range is checked during define_expand, it is required
- here to avoid merging rtls during combine pass */
- tiny_valid_direct_memory_access_range(operands[0],QImode) &&
- tiny_valid_direct_memory_access_range(operands[1],QImode)"
+ || reg_or_0_operand (operands[1], <MODE>mode))
+ /* Skip if operands are out of lds/sts memory access range(0x40..0xbf)
+ though access range is checked during define_expand, it is required
+ here to avoid merging RTXes during combine pass. */
+ && tiny_valid_direct_memory_access_range (operands[0], QImode)
+ && tiny_valid_direct_memory_access_range (operands[1], QImode)"
{
return output_movqi (insn, operands, NULL);
}
[(set (match_operand:ALL2 0 "nonimmediate_operand" "=r,r ,r,m ,d,*r,q,r")
(match_operand:ALL2 1 "nox_general_operand" "r,Y00,m,r Y00,i,i ,r,q"))]
"(register_operand (operands[0], <MODE>mode)
- || reg_or_0_operand (operands[1], <MODE>mode)) &&
- /* skip if operands are out of lds/sts memory access range(0x40..0xbf)
- though access range is checked during define_expand, it is required
- here to avoid merging rtls during combine pass */
- tiny_valid_direct_memory_access_range(operands[0],HImode) &&
- tiny_valid_direct_memory_access_range(operands[1],HImode)"
+ || reg_or_0_operand (operands[1], <MODE>mode))
+ /* Skip if operands are out of lds/sts memory access range(0x40..0xbf)
+ though access range is checked during define_expand, it is required
+ here to avoid merging RTXes during combine pass. */
+ && tiny_valid_direct_memory_access_range (operands[0], HImode)
+ && tiny_valid_direct_memory_access_range (operands[1], HImode)"
{
return output_movhi (insn, operands, NULL);
}
[(set (match_operand:ALL4 0 "nonimmediate_operand" "=r,r ,r ,Qm ,!d,r")
(match_operand:ALL4 1 "nox_general_operand" "r,Y00,Qm,r Y00,i ,i"))]
"(register_operand (operands[0], <MODE>mode)
- || reg_or_0_operand (operands[1], <MODE>mode)) &&
- /* skip if operands are out of lds/sts memory access range(0x40..0xbf)
- though access range is checked during define_expand, it is required
- here to avoid merging rtls during combine pass */
- tiny_valid_direct_memory_access_range(operands[0],SImode) &&
- tiny_valid_direct_memory_access_range(operands[1],SImode)"
+ || reg_or_0_operand (operands[1], <MODE>mode))
+ /* Skip if operands are out of lds/sts memory access range(0x40..0xbf)
+ though access range is checked during define_expand, it is required
+ here to avoid merging RTXes during combine pass. */
+ && tiny_valid_direct_memory_access_range (operands[0], SImode)
+ && tiny_valid_direct_memory_access_range (operands[1], SImode)"
{
return output_movsisf (insn, operands, NULL);
}
[(set (match_operand:SF 0 "nonimmediate_operand" "=r,r,r ,Qm,!d,r")
(match_operand:SF 1 "nox_general_operand" "r,G,Qm,rG,F ,F"))]
"(register_operand (operands[0], SFmode)
- || reg_or_0_operand (operands[1], SFmode)) &&
- /* skip if operands are out of lds/sts memory access range(0x40..0xbf)
- though access range is checked during define_expand, it is required
- here to avoid merging rtls during combine pass */
- tiny_valid_direct_memory_access_range(operands[0],SFmode) &&
- tiny_valid_direct_memory_access_range(operands[1],SFmode)"
+ || reg_or_0_operand (operands[1], SFmode))
+ /* Skip if operands are out of lds/sts memory access range(0x40..0xbf)
+ though access range is checked during define_expand, it is required
+ here to avoid merging rtls during combine pass. */
+ && tiny_valid_direct_memory_access_range (operands[0], SFmode)
+ && tiny_valid_direct_memory_access_range (operands[1], SFmode)"
{
return output_movsisf (insn, operands, NULL);
}
DONE;
}
- /* For small constants we can do better by extending them on the fly.
- The constant can be loaded in one instruction and the widening
- multiplication is shorter. First try the unsigned variant because it
- allows constraint "d" instead of "a" for the signed version. */
+ /* ; For small constants we can do better by extending them on the fly.
+ ; The constant can be loaded in one instruction and the widening
+ ; multiplication is shorter. First try the unsigned variant because it
+ ; allows constraint "d" instead of "a" for the signed version. */
if (s9_operand (operands[2], HImode))
{
swap %0\;lsl %0\;adc %0,__zero_reg__
swap %0\;lsl %0\;adc %0,__zero_reg__\;lsl %0\;adc %0,__zero_reg__
bst %0,0\;ror %0\;bld %0,7
- "
+ " ; empty
[(set_attr "length" "2,4,4,1,3,5,3,0")
(set_attr "cc" "set_n,set_n,clobber,none,set_n,set_n,clobber,none")])
}
else
{
- operands[7] = gen_rtx_PLUS (HImode, operands[6],
+ operands[7] = gen_rtx_PLUS (HImode, operands[6],
gen_rtx_LABEL_REF (VOIDmode, operands[3]));
operands[8] = const0_rtx;
operands[10] = operands[6];
(clobber (match_scratch:QI 2 "=&d"))]
""
"ldi %2,lo8(%0)
- 1: dec %2
+1: dec %2
brne 1b"
[(set_attr "length" "3")
(set_attr "cc" "clobber")])
(clobber (match_scratch:HI 2 "=&w,&d"))]
""
"@
- ldi %A2,lo8(%0)\;ldi %B2,hi8(%0)\;1: sbiw %A2,1\;brne 1b
- ldi %A2,lo8(%0)\;ldi %B2,hi8(%0)\;1: subi %A2,1\;sbci %B2,0\;brne 1b"
+ ldi %A2,lo8(%0)\;ldi %B2,hi8(%0)\n1: sbiw %A2,1\;brne 1b
+ ldi %A2,lo8(%0)\;ldi %B2,hi8(%0)\n1: subi %A2,1\;sbci %B2,0\;brne 1b"
[(set_attr "length" "4,5")
(set_attr "isa" "no_tiny,tiny")
(set_attr "cc" "clobber")])
"ldi %2,lo8(%0)
ldi %3,hi8(%0)
ldi %4,hlo8(%0)
- 1: subi %2,1
+1: subi %2,1
sbci %3,0
sbci %4,0
brne 1b"
ldi %3,hi8(%0)
ldi %4,hlo8(%0)
ldi %5,hhi8(%0)
- 1: subi %2,1
+1: subi %2,1
sbci %3,0
sbci %4,0
sbci %5,0