introduces additional code in LIM and increases reg pressure. */
maybe_set_param_value (PARAM_ALLOW_STORE_DATA_RACES, 1,
- global_options.x_param_values,
- global_options_set.x_param_values);
+ global_options.x_param_values,
+ global_options_set.x_param_values);
/* Unwind tables currently require a frame pointer for correctness,
see toplev.c:process_options(). */
if (cfun->machine->is_OS_task + cfun->machine->is_OS_main
+ (cfun->machine->is_signal || cfun->machine->is_interrupt) > 1)
error_at (loc, "function attributes %qs, %qs and %qs are mutually"
- " exclusive", "OS_task", "OS_main", isr);
+ " exclusive", "OS_task", "OS_main", isr);
/* 'naked' will hide effects of 'OS_task' and 'OS_main'. */
/* Can only return this function's return address. Others not supported. */
if (count)
- return NULL;
+ return NULL;
if (AVR_3_BYTE_PC)
{
r = gen_rtx_PLUS (Pmode, tem, r);
r = gen_frame_mem (Pmode, memory_address (Pmode, r));
r = gen_rtx_ROTATE (HImode, r, GEN_INT (8));
- return r;
+ return r;
}
/* Return 1 if the function epilogue is just a single "ret". */
static bool
avr_cannot_modify_jumps_p (void)
{
-
/* Naked Functions must not have any instructions after
their epilogue, see PR42240 */
else if (code == 'b')
{
if (GET_CODE (addr) != PLUS)
- fatal_insn ("bad address, not (reg+disp):", addr);
+ fatal_insn ("bad address, not (reg+disp):", addr);
avr_print_operand_address (file, VOIDmode, XEXP (addr, 0));
}
fatal_insn ("bad address, not post_inc or pre_dec:", addr);
if (code == 'p')
- /* X, Y, Z */
+ /* X, Y, Z */
avr_print_operand_address (file, VOIDmode, XEXP (addr, 0));
else
avr_print_operand (file, XEXP (addr, 0), 0); /* r26, r28, r30 */
return avr_out_lpm (insn, xop, plen);
}
- gcc_assert (2 == GET_MODE_SIZE (GET_MODE (dest)));
-
if (REG_P (dest))
{
if (REG_P (src)) /* mov r,r */
}
if (GET_CODE (x) == PLUS
- && REG_P (XEXP (x, 0))
- && CONST_INT_P (XEXP (x, 1)))
+ && REG_P (XEXP (x, 0))
+ && CONST_INT_P (XEXP (x, 1)))
{
/* memory access by reg+disp */
"ldd %B0,Y+63" CR_TAB
"sbiw r28,%o1-62", op, plen, -4)
- : avr_asm_len ("subi r28,lo8(-%o1)" CR_TAB
+ : avr_asm_len ("subi r28,lo8(-%o1)" CR_TAB
"sbci r29,hi8(-%o1)" CR_TAB
"ld %A0,Y" CR_TAB
"ldd %B0,Y+1" CR_TAB
if (reg_base == reg_src)
{
- /* "ld r26,-X" is undefined */
+ /* "ld r26,-X" is undefined */
if (reg_unused_after (insn, base))
{
return *l = 7, ("mov __tmp_reg__, %B1" CR_TAB
l = &dummy;
gcc_assert (4 == GET_MODE_SIZE (GET_MODE (dest)));
+
if (REG_P (dest))
{
if (REG_P (src)) /* mov r,r */
const char *templ;
if (src == CONST0_RTX (GET_MODE (dest)))
- operands[1] = zero_reg_rtx;
+ operands[1] = zero_reg_rtx;
templ = out_movsi_mr_r (insn, operands, real_l);
TINY_SBIW (%I1, %J1, 1) CR_TAB
"ld %A0,%b1" CR_TAB
"mov %B0,__tmp_reg__", op, plen, -8);
- }
+ }
else
{
avr_asm_len (TINY_ADIW (%I1, %J1, %o1) CR_TAB
"ldd %A0,%A1" CR_TAB
"mov %B0,__tmp_reg__", op, plen, -4);
- return avr_asm_len ("ldd %A0,%A1" CR_TAB
- "ldd %B0,%B1" CR_TAB
- "ldd %C0,%C1", op, plen, -3);
+ return avr_asm_len ("ldd %A0,%A1" CR_TAB
+ "ldd %B0,%B1" CR_TAB
+ "ldd %C0,%C1", op, plen, -3);
}
else if (GET_CODE (base) == PRE_DEC) /* (--R) */
return avr_asm_len ("ld %C0,%1" CR_TAB
TINY_ADIW (%I0, %J0, %o0) CR_TAB
"st %b0,__tmp_reg__", op, plen, -4);
}
- else
+ else
{
avr_asm_len (TINY_ADIW (%I0, %J0, %o0) CR_TAB
"st %b0,%1", op, plen, -3);
}
if (!reg_unused_after (insn, XEXP (x, 0)))
- avr_asm_len (TINY_SBIW (%I0, %J0, %o0), op, plen, 2);
+ avr_asm_len (TINY_SBIW (%I0, %J0, %o0), op, plen, 2);
return "";
}
}
return !mem_volatile_p && reg_unused_after (insn, base)
- ? avr_asm_len ("st %0+,%A1" CR_TAB
- "st %0,%B1", op, plen, -2)
- : avr_asm_len (TINY_ADIW (%E0, %F0, 1) CR_TAB
- "st %0,%B1" CR_TAB
- "st -%0,%A1", op, plen, -4);
+ ? avr_asm_len ("st %0+,%A1" CR_TAB
+ "st %0,%B1", op, plen, -2)
+ : avr_asm_len (TINY_ADIW (%E0, %F0, 1) CR_TAB
+ "st %0,%B1" CR_TAB
+ "st -%0,%A1", op, plen, -4);
}
static const char*
&& reg_unused_after (insn, xreg))
{
return AVR_TINY
- ? avr_asm_len (TINY_ADIW (%A0, %B0, %n1), xop, plen, 2)
- : avr_asm_len ("adiw %0,%n1", xop, plen, 1);
+ ? avr_asm_len (TINY_ADIW (%A0, %B0, %n1), xop, plen, 2)
+ : avr_asm_len ("adiw %0,%n1", xop, plen, 1);
}
}
int max_len = 10; /* If larger than this, always use a loop. */
if (count <= 0)
- return;
+ return;
if (count < 8 && !scratch)
use_zero_reg = true;
fatal_insn ("bad shift insn:", insn);
if (second_label)
- avr_asm_len ("rjmp 2f", op, plen, 1);
+ avr_asm_len ("rjmp 2f", op, plen, 1);
avr_asm_len ("1:", op, plen, 0);
avr_asm_len (templ, op, plen, t_len);
xop[3] = all_regs_rtx[dest.regno_msb];
avr_asm_len ("ldi %3,127", xop, plen, 1);
avr_asm_len ((have_carry && lsb_in_tmp_reg ? "adc __tmp_reg__,%3"
- : have_carry ? "adc %2,%3"
- : lsb_in_tmp_reg ? "add __tmp_reg__,%3"
- : "add %2,%3"),
+ : have_carry ? "adc %2,%3"
+ : lsb_in_tmp_reg ? "add __tmp_reg__,%3"
+ : "add %2,%3"),
xop, plen, 1);
}
else
"lsl __tmp_reg__", &all_regs_rtx[s0], plen, 2);
sign_in_carry = true;
- }
+ }
gcc_assert (sign_in_carry + msb_in_carry + lsb_in_carry <= 1);
/* Create RTL split patterns for byte sized rotate expressions. This
- produces a series of move instructions and considers overlap situations.
- Overlapping non-HImode operands need a scratch register. */
+ produces a series of move instructions and considers overlap situations.
+ Overlapping non-HImode operands need a scratch register. */
bool
avr_rotate_bytes (rtx operands[])
{
- machine_mode mode = GET_MODE (operands[0]);
- bool overlapped = reg_overlap_mentioned_p (operands[0], operands[1]);
- bool same_reg = rtx_equal_p (operands[0], operands[1]);
- int num = INTVAL (operands[2]);
- rtx scratch = operands[3];
- /* Work out if byte or word move is needed. Odd byte rotates need QImode.
- Word move if no scratch is needed, otherwise use size of scratch. */
- machine_mode move_mode = QImode;
- int move_size, offset, size;
-
- if (num & 0xf)
- move_mode = QImode;
- else if ((mode == SImode && !same_reg) || !overlapped)
- move_mode = HImode;
- else
- move_mode = GET_MODE (scratch);
-
- /* Force DI rotate to use QI moves since other DI moves are currently split
- into QI moves so forward propagation works better. */
- if (mode == DImode)
- move_mode = QImode;
- /* Make scratch smaller if needed. */
- if (SCRATCH != GET_CODE (scratch)
- && HImode == GET_MODE (scratch)
- && QImode == move_mode)
- scratch = simplify_gen_subreg (move_mode, scratch, HImode, 0);
-
- move_size = GET_MODE_SIZE (move_mode);
- /* Number of bytes/words to rotate. */
- offset = (num >> 3) / move_size;
- /* Number of moves needed. */
- size = GET_MODE_SIZE (mode) / move_size;
- /* Himode byte swap is special case to avoid a scratch register. */
- if (mode == HImode && same_reg)
- {
- /* HImode byte swap, using xor. This is as quick as using scratch. */
- rtx src, dst;
- src = simplify_gen_subreg (move_mode, operands[1], mode, 0);
- dst = simplify_gen_subreg (move_mode, operands[0], mode, 1);
- if (!rtx_equal_p (dst, src))
- {
- emit_move_insn (dst, gen_rtx_XOR (QImode, dst, src));
- emit_move_insn (src, gen_rtx_XOR (QImode, src, dst));
- emit_move_insn (dst, gen_rtx_XOR (QImode, dst, src));
- }
- }
- else
- {
+ machine_mode mode = GET_MODE (operands[0]);
+ bool overlapped = reg_overlap_mentioned_p (operands[0], operands[1]);
+ bool same_reg = rtx_equal_p (operands[0], operands[1]);
+ int num = INTVAL (operands[2]);
+ rtx scratch = operands[3];
+ /* Work out if byte or word move is needed. Odd byte rotates need QImode.
+ Word move if no scratch is needed, otherwise use size of scratch. */
+ machine_mode move_mode = QImode;
+ int move_size, offset, size;
+
+ if (num & 0xf)
+ move_mode = QImode;
+ else if ((mode == SImode && !same_reg) || !overlapped)
+ move_mode = HImode;
+ else
+ move_mode = GET_MODE (scratch);
+
+ /* Force DI rotate to use QI moves since other DI moves are currently split
+ into QI moves so forward propagation works better. */
+ if (mode == DImode)
+ move_mode = QImode;
+ /* Make scratch smaller if needed. */
+ if (SCRATCH != GET_CODE (scratch)
+ && HImode == GET_MODE (scratch)
+ && QImode == move_mode)
+ scratch = simplify_gen_subreg (move_mode, scratch, HImode, 0);
+
+ move_size = GET_MODE_SIZE (move_mode);
+ /* Number of bytes/words to rotate. */
+ offset = (num >> 3) / move_size;
+ /* Number of moves needed. */
+ size = GET_MODE_SIZE (mode) / move_size;
+ /* Himode byte swap is special case to avoid a scratch register. */
+ if (mode == HImode && same_reg)
+ {
+ /* HImode byte swap, using xor. This is as quick as using scratch. */
+ rtx src, dst;
+ src = simplify_gen_subreg (move_mode, operands[1], mode, 0);
+ dst = simplify_gen_subreg (move_mode, operands[0], mode, 1);
+ if (!rtx_equal_p (dst, src))
+ {
+ emit_move_insn (dst, gen_rtx_XOR (QImode, dst, src));
+ emit_move_insn (src, gen_rtx_XOR (QImode, src, dst));
+ emit_move_insn (dst, gen_rtx_XOR (QImode, dst, src));
+ }
+ }
+ else
+ {
#define MAX_SIZE 8 /* GET_MODE_SIZE (DImode) / GET_MODE_SIZE (QImode) */
- /* Create linked list of moves to determine move order. */
- struct {
- rtx src, dst;
- int links;
- } move[MAX_SIZE + 8];
- int blocked, moves;
-
- gcc_assert (size <= MAX_SIZE);
- /* Generate list of subreg moves. */
- for (int i = 0; i < size; i++)
- {
- int from = i;
- int to = (from + offset) % size;
- move[i].src = simplify_gen_subreg (move_mode, operands[1],
- mode, from * move_size);
- move[i].dst = simplify_gen_subreg (move_mode, operands[0],
- mode, to * move_size);
- move[i].links = -1;
- }
- /* Mark dependence where a dst of one move is the src of another move.
- The first move is a conflict as it must wait until second is
- performed. We ignore moves to self - we catch this later. */
- if (overlapped)
- for (int i = 0; i < size; i++)
- if (reg_overlap_mentioned_p (move[i].dst, operands[1]))
- for (int j = 0; j < size; j++)
- if (j != i && rtx_equal_p (move[j].src, move[i].dst))
- {
- /* The dst of move i is the src of move j. */
- move[i].links = j;
- break;
- }
-
- blocked = -1;
- moves = 0;
- /* Go through move list and perform non-conflicting moves. As each
- non-overlapping move is made, it may remove other conflicts
- so the process is repeated until no conflicts remain. */
- do
- {
- blocked = -1;
- moves = 0;
- /* Emit move where dst is not also a src or we have used that
- src already. */
- for (int i = 0; i < size; i++)
- if (move[i].src != NULL_RTX)
- {
- if (move[i].links == -1
- || move[move[i].links].src == NULL_RTX)
- {
- moves++;
- /* Ignore NOP moves to self. */
- if (!rtx_equal_p (move[i].dst, move[i].src))
- emit_move_insn (move[i].dst, move[i].src);
-
- /* Remove conflict from list. */
- move[i].src = NULL_RTX;
- }
- else
- blocked = i;
- }
+ /* Create linked list of moves to determine move order. */
+ struct {
+ rtx src, dst;
+ int links;
+ } move[MAX_SIZE + 8];
+ int blocked, moves;
+
+ gcc_assert (size <= MAX_SIZE);
+ /* Generate list of subreg moves. */
+ for (int i = 0; i < size; i++)
+ {
+ int from = i;
+ int to = (from + offset) % size;
+ move[i].src = simplify_gen_subreg (move_mode, operands[1],
+ mode, from * move_size);
+ move[i].dst = simplify_gen_subreg (move_mode, operands[0],
+ mode, to * move_size);
+ move[i].links = -1;
+ }
+ /* Mark dependence where a dst of one move is the src of another move.
+ The first move is a conflict as it must wait until second is
+ performed. We ignore moves to self - we catch this later. */
+ if (overlapped)
+ for (int i = 0; i < size; i++)
+ if (reg_overlap_mentioned_p (move[i].dst, operands[1]))
+ for (int j = 0; j < size; j++)
+ if (j != i && rtx_equal_p (move[j].src, move[i].dst))
+ {
+ /* The dst of move i is the src of move j. */
+ move[i].links = j;
+ break;
+ }
- /* Check for deadlock. This is when no moves occurred and we have
- at least one blocked move. */
- if (moves == 0 && blocked != -1)
- {
- /* Need to use scratch register to break deadlock.
- Add move to put dst of blocked move into scratch.
- When this move occurs, it will break chain deadlock.
- The scratch register is substituted for real move. */
-
- gcc_assert (SCRATCH != GET_CODE (scratch));
-
- move[size].src = move[blocked].dst;
- move[size].dst = scratch;
- /* Scratch move is never blocked. */
- move[size].links = -1;
- /* Make sure we have valid link. */
- gcc_assert (move[blocked].links != -1);
- /* Replace src of blocking move with scratch reg. */
- move[move[blocked].links].src = scratch;
- /* Make dependent on scratch move occurring. */
- move[blocked].links = size;
- size=size+1;
- }
- }
- while (blocked != -1);
- }
- return true;
+ blocked = -1;
+ moves = 0;
+ /* Go through move list and perform non-conflicting moves. As each
+ non-overlapping move is made, it may remove other conflicts
+ so the process is repeated until no conflicts remain. */
+ do
+ {
+ blocked = -1;
+ moves = 0;
+ /* Emit move where dst is not also a src or we have used that
+ src already. */
+ for (int i = 0; i < size; i++)
+ if (move[i].src != NULL_RTX)
+ {
+ if (move[i].links == -1
+ || move[move[i].links].src == NULL_RTX)
+ {
+ moves++;
+ /* Ignore NOP moves to self. */
+ if (!rtx_equal_p (move[i].dst, move[i].src))
+ emit_move_insn (move[i].dst, move[i].src);
+
+ /* Remove conflict from list. */
+ move[i].src = NULL_RTX;
+ }
+ else
+ blocked = i;
+ }
+
+ /* Check for deadlock. This is when no moves occurred and we have
+ at least one blocked move. */
+ if (moves == 0 && blocked != -1)
+ {
+ /* Need to use scratch register to break deadlock.
+ Add move to put dst of blocked move into scratch.
+ When this move occurs, it will break chain deadlock.
+ The scratch register is substituted for real move. */
+
+ gcc_assert (SCRATCH != GET_CODE (scratch));
+
+ move[size].src = move[blocked].dst;
+ move[size].dst = scratch;
+ /* Scratch move is never blocked. */
+ move[size].links = -1;
+ /* Make sure we have valid link. */
+ gcc_assert (move[blocked].links != -1);
+ /* Replace src of blocking move with scratch reg. */
+ move[move[blocked].links].src = scratch;
+ /* Make dependent on scratch move occurring. */
+ move[blocked].links = size;
+ size=size+1;
+ }
+ }
+ while (blocked != -1);
+ }
+ return true;
}
&& SYMBOL_REF_P ((symbol = XEXP (mem, 0)))
&& (SYMBOL_REF_FLAGS (symbol) & (SYMBOL_FLAG_IO | SYMBOL_FLAG_ADDRESS)))
{
-
if (!local_p)
{
fprintf (stream, "\t.globl\t");
&& TREE_CODE (decl) != FUNCTION_DECL
&& MEM_P (rtl)
&& SYMBOL_REF_P (XEXP (rtl, 0)))
- {
+ {
rtx sym = XEXP (rtl, 0);
tree type = TREE_TYPE (decl);
tree attr = DECL_ATTRIBUTES (decl);
17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2,
0, 1,
32, 33, 34, 35
- };
+ };
static const int tiny_order_0[] = {
20, 21,
22, 23,
17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2,
0, 1,
32, 33, 34, 35
- };
+ };
static const int tiny_order_1[] = {
22, 23,
24, 25,
17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2,
1, 0,
32, 33, 34, 35
- };
+ };
/* Select specific register allocation order.
Tiny Core (ATtiny4/5/9/10/20/40) devices have only 16 registers,
: (AVR_TINY ? tiny_order_0 : order_0));
for (size_t i = 0; i < ARRAY_SIZE (order_0); ++i)
- reg_alloc_order[i] = order[i];
+ reg_alloc_order[i] = order[i];
}
*total = COSTS_N_INSNS (AVR_HAVE_JMP_CALL ? 5 : 4);
}
- if (mode == DImode)
- *total *= 2;
+ if (mode == DImode)
+ *total *= 2;
- return true;
+ return true;
default:
return false;
emit_insn (gen_delay_cycles_1 (gen_int_mode (loop_count, QImode),
avr_mem_clobber()));
cycles -= cycles_used;
- }
+ }
while (cycles >= 2)
{