? (DECL_ATTRIBUTES (decl)) \
: TYPE_ATTRIBUTES (TREE_TYPE (decl))
-/* Set to 1 by expand_prologue() when the function is an interrupt handler. */
-int current_function_interrupt;
+/* Set to true by expand_prologue() when the function is an
+ interrupt handler. */
+bool current_function_interrupt;
tree sh_deferred_function_attributes;
tree *sh_deferred_function_attributes_tail = &sh_deferred_function_attributes;
static void sh_option_override (void);
static void sh_override_options_after_change (void);
static void output_stack_adjust (int, rtx, int, HARD_REG_SET *, bool);
-static rtx_insn *frame_insn (rtx);
+static rtx_insn* emit_frame_insn (rtx);
static rtx push (int);
static void pop (int);
-static void push_regs (HARD_REG_SET *, int);
+static void push_regs (HARD_REG_SET* mask, bool interrupt_handler);
static int calc_live_regs (HARD_REG_SET *);
static HOST_WIDE_INT rounded_frame_size (int);
static bool sh_frame_pointer_required (void);
static bool sh_cannot_substitute_mem_equiv_p (rtx);
static bool sh_legitimize_address_displacement (rtx *, rtx *, machine_mode);
static int scavenge_reg (HARD_REG_SET *s);
-struct save_schedule_s;
static rtx sh_struct_value_rtx (tree, int);
static rtx sh_function_value (const_tree, const_tree, bool);
#undef TARGET_ASM_UNALIGNED_SI_OP
#define TARGET_ASM_UNALIGNED_SI_OP "\t.ualong\t"
-/* These are NULLed out on non-SH5 in TARGET_OPTION_OVERRIDE. */
-#undef TARGET_ASM_UNALIGNED_DI_OP
-#define TARGET_ASM_UNALIGNED_DI_OP "\t.uaquad\t"
-#undef TARGET_ASM_ALIGNED_DI_OP
-#define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
-
#undef TARGET_OPTION_OVERRIDE
#define TARGET_OPTION_OVERRIDE sh_option_override
sh_cpu = PROCESSOR_SH4A;
}
- /* Only the sh64-elf assembler fully supports .quad properly. */
- targetm.asm_out.aligned_op.di = NULL;
- targetm.asm_out.unaligned_op.di = NULL;
-
/* User/priviledged mode is supported only on SH3* and SH4*.
Disable it for everything else. */
if (!TARGET_SH3 && TARGET_USERMODE)
if (mode == Pmode || mode == ptr_mode)
{
- rtx op0, op1, opc;
- enum tls_model tls_kind;
-
- op0 = operands[0];
- op1 = operands[1];
+ rtx op0 = operands[0];
+ rtx op1 = operands[1];
+ rtx opc;
if (GET_CODE (op1) == CONST
&& GET_CODE (XEXP (op1, 0)) == PLUS
&& (tls_symbolic_operand (XEXP (XEXP (op1, 0), 0), Pmode)
else
opc = NULL_RTX;
+ enum tls_model tls_kind;
+
if (! reload_in_progress && ! reload_completed
&& (tls_kind = tls_symbolic_operand (op1, Pmode)) != TLS_MODEL_NONE)
{
emit_use (gen_rtx_REG (SImode, PIC_REG));
if (flag_schedule_insns)
emit_insn (gen_blockage ());
- }
+ }
switch (tls_kind)
{
{
rtx t_reg = get_t_reg_rtx ();
enum rtx_code oldcode = code;
- machine_mode mode;
/* First need a compare insn. */
switch (code)
if (code != oldcode)
std::swap (op0, op1);
- mode = GET_MODE (op0);
+ machine_mode mode = GET_MODE (op0);
if (mode == VOIDmode)
mode = GET_MODE (op1);
unspec_caller_rtx_p (rtx pat)
{
rtx base, offset;
- int i;
-
split_const (pat, &base, &offset);
+
if (GET_CODE (base) == UNSPEC)
{
if (XINT (base, 1) == UNSPEC_CALLER)
return true;
- for (i = 0; i < XVECLEN (base, 0); i++)
+ for (int i = 0; i < XVECLEN (base, 0); i++)
if (unspec_caller_rtx_p (XVECEXP (base, 0, i)))
return true;
}
static bool
sh_cannot_copy_insn_p (rtx_insn *insn)
{
- rtx pat;
-
if (!reload_completed || !flag_pic)
return false;
if (asm_noperands (insn) >= 0)
return false;
- pat = PATTERN (insn);
+ rtx pat = PATTERN (insn);
if (GET_CODE (pat) == CLOBBER || GET_CODE (pat) == USE)
return false;
static rtx_code_label *
add_constant (rtx x, machine_mode mode, rtx last_value)
{
- int i;
rtx_code_label *lab, *new_rtx;
label_ref_list_t ref, newref;
/* First see if we've already got it. */
- for (i = 0; i < pool_size; i++)
+ for (int i = 0; i < pool_size; i++)
{
if (x->code == pool_vector[i].value->code
&& mode == pool_vector[i].mode)
dump_table (rtx_insn *start, rtx_insn *barrier)
{
rtx_insn *scan = barrier;
- int i;
bool need_align = true;
rtx lab;
label_ref_list_t ref;
/* Do two passes, first time dump out the HI sized constants. */
- for (i = 0; i < pool_size; i++)
+ for (int i = 0; i < pool_size; i++)
{
pool_node *p = &pool_vector[i];
scan = emit_insn_after (gen_align_log (GEN_INT (3)), scan);
need_align = false;
- for (i = 0; i < pool_size; i++)
+ for (int i = 0; i < pool_size; i++)
{
pool_node *p = &pool_vector[i];
pool_size = 0;
}
- for (i = 0; i < pool_size; i++)
+ for (int i = 0; i < pool_size; i++)
{
pool_node *p = &pool_vector[i];
if (! reg_part)
return NULL_RTX;
reg = XEXP (reg_part, 0);
- for (i = XVECLEN (pattern, 0) - 1; i >= 0; i--)
+ for (int i = XVECLEN (pattern, 0) - 1; i >= 0; i--)
{
part = XVECEXP (pattern, 0, i);
if (part == reg_part || GET_CODE (part) == CLOBBER)
static bool
noncall_uses_reg (rtx reg, rtx_insn *insn, rtx *set)
{
- rtx pattern, reg2;
-
*set = NULL_RTX;
- reg2 = sfunc_uses_reg (insn);
+ rtx reg2 = sfunc_uses_reg (insn);
if (reg2 && REGNO (reg2) == REGNO (reg))
{
- pattern = single_set (insn);
+ rtx pattern = single_set (insn);
if (pattern
&& REG_P (SET_DEST (pattern))
&& REGNO (reg) == REGNO (SET_DEST (pattern)))
{
/* We don't use rtx_equal_p because we don't care if the mode is
different. */
- pattern = single_set (insn);
+ rtx pattern = single_set (insn);
if (pattern
&& REG_P (SET_DEST (pattern))
&& REGNO (reg) == REGNO (SET_DEST (pattern)))
return true;
}
- pattern = PATTERN (insn);
+ rtx pattern = PATTERN (insn);
if (GET_CODE (pattern) == PARALLEL)
{
- int i;
-
- for (i = XVECLEN (pattern, 0) - 1; i >= 1; i--)
+ for (int i = XVECLEN (pattern, 0) - 1; i >= 1; i--)
if (reg_mentioned_p (reg, XVECEXP (pattern, 0, i)))
return true;
pattern = XVECEXP (pattern, 0, 0);
{
enum rtx_code code;
const char *fmt;
- int i, used = 0;
+ int used = 0;
if (! x)
return used;
fmt = GET_RTX_FORMAT (code);
- for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ for (int i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
{
if (fmt[i] == 'E')
{
- int j;
- for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ for (int j = XVECLEN (x, i) - 1; j >= 0; j--)
used |= regs_used (XVECEXP (x, i, j), is_dest);
}
else if (fmt[i] == 'e')
{
int dead = 0;
rtx_insn *prev = prev_nonnote_insn (jump);
- rtx dest;
/* First, check if we already have an instruction that satisfies our need. */
if (prev && NONJUMP_INSN_P (prev) && ! prev->deleted ())
}
/* We can't use JUMP_LABEL here because it might be undefined
when not optimizing. */
- dest = XEXP (SET_SRC (PATTERN (jump)), 0);
+ rtx dest = XEXP (SET_SRC (PATTERN (jump)), 0);
/* If the branch is out of range, try to find a scratch register for it. */
if (optimize
&& (INSN_ADDRESSES (INSN_UID (dest)) - addr + (unsigned) 4092
for (scan = jump; (scan = PREV_INSN (scan)); )
{
- enum rtx_code code;
-
if (scan->deleted ())
continue;
- code = GET_CODE (scan);
+ rtx_code code = GET_CODE (scan);
if (code == CODE_LABEL || code == JUMP_INSN)
break;
if (code == INSN
for (used = dead = 0, scan = JUMP_LABEL_AS_INSN (jump);
(scan = NEXT_INSN (scan)); )
{
- enum rtx_code code;
-
if (scan->deleted ())
continue;
- code = GET_CODE (scan);
+ rtx_code code = GET_CODE (scan);
if (INSN_P (scan))
{
used |= regs_used (PATTERN (scan), 0);
int address;
};
-static void gen_far_branch (struct far_branch *);
enum mdep_reorg_phase_e mdep_reorg_phase;
+
static void
gen_far_branch (struct far_branch *bp)
{
rtx_insn *insn = bp->insert_place;
rtx_jump_insn *jump;
rtx_code_label *label = gen_label_rtx ();
- int ok;
emit_label_after (label, insn);
if (bp->far_label)
JUMP_LABEL (jump) = pat;
}
- ok = invert_jump (as_a <rtx_jump_insn *> (insn), label, 1);
+ bool ok = invert_jump (as_a <rtx_jump_insn *> (insn), label, 1);
gcc_assert (ok);
/* If we are branching around a jump (rather than a return), prevent
int
barrier_align (rtx_insn *barrier_or_label)
{
- rtx next, pat;
-
if (! barrier_or_label)
return 0;
&& PREV_INSN (barrier_or_label)
&& JUMP_TABLE_DATA_P (PREV_INSN (barrier_or_label)))
{
- pat = PATTERN (PREV_INSN (barrier_or_label));
+ rtx pat = PATTERN (PREV_INSN (barrier_or_label));
/* If this is a very small table, we want to keep the alignment after
the table to the minimum for proper code alignment. */
return ((optimize_size
? 1 : align_jumps_log);
}
- next = next_active_insn (barrier_or_label);
+ rtx next = next_active_insn (barrier_or_label);
if (! next)
return 0;
- pat = PATTERN (next);
+ rtx pat = PATTERN (next);
if (GET_CODE (pat) == UNSPEC_VOLATILE && XINT (pat, 1) == UNSPECV_ALIGN)
/* This is a barrier in front of a constant table. */
get_dest_uid (rtx label, int max_uid)
{
rtx_insn *dest = next_real_insn (label);
- int dest_uid;
+
if (! dest)
/* This can happen for an undefined label. */
return 0;
- dest_uid = INSN_UID (dest);
+ int dest_uid = INSN_UID (dest);
/* If this is a newly created branch redirection blocking instruction,
we cannot index the branch_uid or insn_addresses arrays with its
uid. But then, we won't need to, because the actual destination is
if (TARGET_RELAX)
{
- rtx note;
-
- note = find_reg_note (insn, REG_LABEL_OPERAND, NULL_RTX);
- if (note)
+ if (rtx note = find_reg_note (insn, REG_LABEL_OPERAND, NULL_RTX))
{
- rtx pattern;
-
- pattern = PATTERN (insn);
+ rtx pattern = PATTERN (insn);
if (GET_CODE (pattern) == PARALLEL)
pattern = XVECEXP (pattern, 0, 0);
switch (GET_CODE (pattern))
const char *
output_jump_label_table (void)
{
- int i;
-
if (pool_size)
{
fprintf (asm_out_file, "\t.align 2\n");
- for (i = 0; i < pool_size; i++)
+ for (int i = 0; i < pool_size; i++)
{
pool_node *p = &pool_vector[i];
output_stack_adjust (int size, rtx reg, int epilogue_p,
HARD_REG_SET *live_regs_mask, bool frame_p)
{
- rtx_insn *(*emit_fn) (rtx) = frame_p ? &frame_insn : &emit_insn;
+ rtx_insn *(*emit_fn) (rtx) = frame_p ? &emit_frame_insn : &emit_insn;
if (size)
{
HOST_WIDE_INT align = STACK_BOUNDARY / BITS_PER_UNIT;
}
}
-/* Emit the specified insn and mark it as frame related.
- FIXME: Rename this to emit_frame_insn. */
+/* Emit the specified insn and mark it as frame related. */
static rtx_insn *
-frame_insn (rtx x)
+emit_frame_insn (rtx x)
{
rtx_insn *insn = emit_insn (x);
RTX_FRAME_RELATED_P (insn) = 1;
else
x = gen_push (gen_rtx_REG (SImode, rn));
- x = frame_insn (x);
+ x = emit_frame_insn (x);
add_reg_note (x, REG_INC, gen_rtx_REG (SImode, STACK_POINTER_REGNUM));
return x;
}
/* Generate code to push the regs specified in the mask. */
static void
-push_regs (HARD_REG_SET *mask, int interrupt_handler)
+push_regs (HARD_REG_SET *mask, bool interrupt_handler)
{
- int i = interrupt_handler ? LAST_BANKED_REG + 1 : 0;
- int skip_fpscr = 0;
+ bool skip_fpscr = false;
/* Push PR last; this gives better latencies after the prologue, and
candidates for the return delay slot when there are no general
registers pushed. */
- for (; i < FIRST_PSEUDO_REGISTER; i++)
+ for (int i = interrupt_handler ? LAST_BANKED_REG + 1 : 0;
+ i < FIRST_PSEUDO_REGISTER; i++)
{
/* If this is an interrupt handler, and the SZ bit varies,
and we have to push any floating point register, we need
push (FPSCR_REG);
COMPL_HARD_REG_SET (unsaved, *mask);
fpscr_set_from_mem (NORMAL_MODE (FP_MODE), unsaved);
- skip_fpscr = 1;
+ skip_fpscr = true;
}
if (i != PR_REG
&& (i != FPSCR_REG || ! skip_fpscr)
{
unsigned int count = 0;
- for (i = FIRST_BANKED_REG; i <= LAST_BANKED_REG; i++)
+ for (int i = FIRST_BANKED_REG; i <= LAST_BANKED_REG; i++)
if (TEST_HARD_REG_BIT (*mask, i))
count++;
else
insns. */
emit_insn (gen_blockage ());
x = gen_movml_push_banked (sp_reg);
- x = frame_insn (x);
- for (i = FIRST_BANKED_REG; i <= LAST_BANKED_REG; i++)
+ x = emit_frame_insn (x);
+ for (int i = FIRST_BANKED_REG; i <= LAST_BANKED_REG; i++)
{
mem = gen_rtx_MEM (SImode, plus_constant (Pmode, sp_reg, i * 4));
reg = gen_rtx_REG (SImode, i);
emit_insn (gen_blockage ());
}
else
- for (i = FIRST_BANKED_REG; i <= LAST_BANKED_REG; i++)
+ for (int i = FIRST_BANKED_REG; i <= LAST_BANKED_REG; i++)
if (TEST_HARD_REG_BIT (*mask, i))
push (i);
}
calc_live_regs (HARD_REG_SET *live_regs_mask)
{
unsigned int reg;
- int count;
tree attrs;
bool interrupt_or_trapa_handler, trapa_handler, interrupt_handler;
bool nosave_low_regs;
- int pr_live, has_call;
attrs = DECL_ATTRIBUTES (current_function_decl);
interrupt_or_trapa_handler = sh_cfun_interrupt_handler_p ();
target_flags &= ~MASK_FPU_SINGLE;
/* If we can save a lot of saves by switching to double mode, do that. */
else if (TARGET_FPU_DOUBLE && TARGET_FMOVD && TARGET_FPU_SINGLE)
- for (count = 0, reg = FIRST_FP_REG; reg <= LAST_FP_REG; reg += 2)
+ for (int count = 0, reg = FIRST_FP_REG; reg <= LAST_FP_REG; reg += 2)
if (df_regs_ever_live_p (reg) && df_regs_ever_live_p (reg+1)
&& (! call_really_used_regs[reg]
|| interrupt_handler)
break;
}
- {
- rtx pr_initial = has_hard_reg_initial_val (Pmode, PR_REG);
- pr_live = (pr_initial
+
+ rtx pr_initial = has_hard_reg_initial_val (Pmode, PR_REG);
+ bool pr_live = (pr_initial
? (!REG_P (pr_initial)
|| REGNO (pr_initial) != (PR_REG))
: df_regs_ever_live_p (PR_REG));
- /* For Shcompact, if not optimizing, we end up with a memory reference
- using the return address pointer for __builtin_return_address even
- though there is no actual need to put the PR register on the stack. */
- pr_live |= df_regs_ever_live_p (RETURN_ADDRESS_POINTER_REGNUM);
- }
+ /* For Shcompact, if not optimizing, we end up with a memory reference
+ using the return address pointer for __builtin_return_address even
+ though there is no actual need to put the PR register on the stack. */
+ pr_live |= df_regs_ever_live_p (RETURN_ADDRESS_POINTER_REGNUM);
+
/* Force PR to be live if the prologue has to call the SHmedia
argument decoder or register saver. */
- has_call = pr_live;
+ bool has_call = pr_live;
+
+ int count;
for (count = 0, reg = FIRST_PSEUDO_REGISTER; reg-- != 0; )
{
if (reg == PR_REG
return ((size + pushed + align - 1) & -align) - pushed;
}
-/* Choose a call-clobbered target-branch register that remains
- unchanged along the whole function. We set it up as the return
- value in the prologue. */
-int
-sh_media_register_for_return (void)
-{
- int regno;
- int tr0_used;
-
- if (! crtl->is_leaf)
- return -1;
- if (lookup_attribute ("interrupt_handler",
- DECL_ATTRIBUTES (current_function_decl)))
- return -1;
- if (sh_cfun_interrupt_handler_p ())
- return -1;
-
- tr0_used = flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM);
-
- for (regno = FIRST_TARGET_REG + tr0_used; regno <= LAST_TARGET_REG; regno++)
- if (call_really_used_regs[regno] && ! df_regs_ever_live_p (regno))
- return regno;
-
- return -1;
-}
-
-/* The maximum registers we need to save are:
- - 62 general purpose registers (r15 is stack pointer, r63 is zero)
- - 32 floating point registers (for each pair, we save none,
- one single precision value, or a double precision value).
- - 8 target registers
- - add 1 entry for a delimiter. */
-#define MAX_SAVED_REGS (62+32+8)
-
-typedef struct save_entry_s
-{
- unsigned char reg;
- unsigned char mode;
- short offset;
-} save_entry;
-
-#define MAX_TEMPS 4
-
-/* There will be a delimiter entry with VOIDmode both at the start and the
- end of a filled in schedule. The end delimiter has the offset of the
- save with the smallest (i.e. most negative) offset. */
-typedef struct save_schedule_s
-{
- save_entry entries[MAX_SAVED_REGS + 2];
- int temps[MAX_TEMPS+1];
-} save_schedule;
-
/* Expand code for the function prologue. */
void
sh_expand_prologue (void)
{
- HARD_REG_SET live_regs_mask;
- int d, i;
- int d_rounding = 0;
int save_flags = target_flags;
- int pretend_args;
- int stack_usage;
tree sp_switch_attr
= lookup_attribute ("sp_switch", DECL_ATTRIBUTES (current_function_decl));
/* We have pretend args if we had an object sent partially in registers
and partially on the stack, e.g. a large structure. */
- pretend_args = crtl->args.pretend_args_size;
+ int pretend_args = crtl->args.pretend_args_size;
if (TARGET_VARARGS_PRETEND_ARGS (current_function_decl)
&& (NPARM_REGS(SImode)
> crtl->args.info.arg_count[(int) SH_ARG_INT]))
pretend_args = 0;
- output_stack_adjust (-pretend_args
- - crtl->args.info.stack_regs * 8,
- stack_pointer_rtx, 0, NULL, true);
- stack_usage = pretend_args + crtl->args.info.stack_regs * 8;
+ output_stack_adjust (-pretend_args, stack_pointer_rtx, 0, NULL, true);
+ int stack_usage = pretend_args;
/* Emit the code for SETUP_VARARGS. */
if (cfun->stdarg)
if (TARGET_VARARGS_PRETEND_ARGS (current_function_decl))
{
/* Push arg regs as if they'd been provided by caller in stack. */
- for (i = 0; i < NPARM_REGS(SImode); i++)
+ for (int i = 0; i < NPARM_REGS(SImode); i++)
{
int rn = NPARM_REGS(SImode) + FIRST_PARM_REG - i - 1;
/* The argument specifies a variable holding the address of the
stack the interrupt function should switch to/from at entry/exit. */
tree arg = TREE_VALUE ( TREE_VALUE (sp_switch_attr));
- const char *s
- = ggc_strdup (TREE_STRING_POINTER (arg));
+ const char* s = ggc_strdup (TREE_STRING_POINTER (arg));
rtx sp_switch = gen_rtx_SYMBOL_REF (Pmode, s);
lab = add_constant (sp_switch, SImode, 0);
emit_insn (gen_sp_switch_1 (newsrc));
}
- d = calc_live_regs (&live_regs_mask);
+ HARD_REG_SET live_regs_mask;
+ int d = calc_live_regs (&live_regs_mask);
/* ??? Maybe we could save some switching if we can move a mode switch
that already happens to be at the function start into the prologue. */
if (target_flags != save_flags && ! current_function_interrupt)
target_flags = save_flags;
- output_stack_adjust (-rounded_frame_size (d) + d_rounding,
+ output_stack_adjust (-rounded_frame_size (d),
stack_pointer_rtx, 0, NULL, true);
- stack_usage += rounded_frame_size (d) - d_rounding;
+ stack_usage += rounded_frame_size (d);
if (frame_pointer_needed)
- frame_insn (GEN_MOV (hard_frame_pointer_rtx, stack_pointer_rtx));
+ emit_frame_insn (GEN_MOV (hard_frame_pointer_rtx, stack_pointer_rtx));
/* If we are profiling, make sure no instructions are scheduled before
the call to mcount. Similarly if some call instructions are swapped
void
sh_expand_epilogue (bool sibcall_p)
{
- HARD_REG_SET live_regs_mask;
- int d, i;
- int d_rounding = 0;
-
int save_flags = target_flags;
- int frame_size, save_size;
- int fpscr_deferred = 0;
+ bool fpscr_deferred = false;
int e = sibcall_p ? -1 : 1;
- d = calc_live_regs (&live_regs_mask);
+ HARD_REG_SET live_regs_mask;
+ int d = calc_live_regs (&live_regs_mask);
- save_size = d;
- frame_size = rounded_frame_size (d);
+ int save_size = d;
+ int frame_size = rounded_frame_size (d);
if (frame_pointer_needed)
{
occur after the SP adjustment and clobber data in the local
frame. */
emit_insn (gen_blockage ());
- frame_insn (GEN_MOV (stack_pointer_rtx, hard_frame_pointer_rtx));
+ emit_frame_insn (GEN_MOV (stack_pointer_rtx, hard_frame_pointer_rtx));
}
else if (frame_size)
{
{
unsigned int count = 0;
- for (i = FIRST_BANKED_REG; i <= LAST_BANKED_REG; i++)
+ for (int i = FIRST_BANKED_REG; i <= LAST_BANKED_REG; i++)
if (TEST_HARD_REG_BIT (live_regs_mask, i))
count++;
else
emit_insn (gen_blockage ());
}
else
- for (i = LAST_BANKED_REG; i >= FIRST_BANKED_REG; i--)
+ for (int i = LAST_BANKED_REG; i >= FIRST_BANKED_REG; i--)
if (TEST_HARD_REG_BIT (live_regs_mask, i))
pop (i);
else
last_reg = FIRST_PSEUDO_REGISTER;
- for (i = 0; i < last_reg; i++)
+ for (int i = 0; i < last_reg; i++)
{
int j = (FIRST_PSEUDO_REGISTER - 1) - i;
if (j == FPSCR_REG && current_function_interrupt && TARGET_FMOVD
&& hard_reg_set_intersect_p (live_regs_mask,
reg_class_contents[DF_REGS]))
- fpscr_deferred = 1;
+ fpscr_deferred = true;
/* For an ISR with RESBANK attribute assigned, don't pop
following registers, R0-R14, MACH, MACL and GBR. */
else if (j != PR_REG && TEST_HARD_REG_BIT (live_regs_mask, j)
emit_insn (gen_toggle_sz ());
target_flags = save_flags;
- output_stack_adjust (crtl->args.pretend_args_size
- + save_size + d_rounding
- + crtl->args.info.stack_regs * 8,
+ output_stack_adjust (crtl->args.pretend_args_size + save_size,
stack_pointer_rtx, e, NULL, true);
if (crtl->calls_eh_return)
HARD_REG_SET live_regs_mask;
int d = calc_live_regs (&live_regs_mask);
- /* If pr_reg isn't life, we can set it (or the register given in
- sh_media_register_for_return) directly. */
+ /* If pr_reg isn't life, we can set it directly. */
if (! TEST_HARD_REG_BIT (live_regs_mask, PR_REG))
{
rtx rr = gen_rtx_REG (SImode, PR_REG);
int bufsize, regno;
alias_set_type alias_set;
- if (! TARGET_SH2E && ! TARGET_SH4)
+ if (!TARGET_FPU_ANY)
{
error ("__builtin_saveregs not supported by this subtarget");
return const0_rtx;
sh_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
gimple_seq *post_p ATTRIBUTE_UNUSED)
{
- HOST_WIDE_INT size, rsize;
- tree tmp, pptr_type_node;
+ tree tmp;
tree addr, lab_over = NULL, result = NULL;
- bool pass_by_ref;
tree eff_type;
- if (!VOID_TYPE_P (type))
- pass_by_ref = targetm.calls.must_pass_in_stack (TYPE_MODE (type), type);
- else
- pass_by_ref = false;
+ const bool pass_by_ref =
+ !VOID_TYPE_P (type)
+ && targetm.calls.must_pass_in_stack (TYPE_MODE (type), type);
if (pass_by_ref)
type = build_pointer_type (type);
- size = int_size_in_bytes (type);
- rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
- pptr_type_node = build_pointer_type (ptr_type_node);
+ HOST_WIDE_INT size = int_size_in_bytes (type);
+ HOST_WIDE_INT rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
+ tree pptr_type_node = build_pointer_type (ptr_type_node);
if ((TARGET_SH2E || TARGET_SH4)
&& ! (TARGET_HITACHI || sh_cfun_attr_renesas_p ()))
{
tree f_next_o, f_next_o_limit, f_next_fp, f_next_fp_limit, f_next_stack;
tree next_o, next_o_limit, next_fp, next_fp_limit, next_stack;
- int pass_as_float;
tree lab_false;
tree member;
}
}
+ bool pass_as_float;
if (TARGET_FPU_DOUBLE)
{
pass_as_float = ((TREE_CODE (eff_type) == REAL_TYPE && size <= 8)
CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
if (ca->force_mem)
- ca->force_mem = 0;
+ ca->force_mem = false;
if ((TARGET_HITACHI || ca->renesas_abi) && TARGET_FPU_DOUBLE)
{
int
initial_elimination_offset (int from, int to)
{
- int regs_saved;
- int regs_saved_rounding = 0;
- int total_saved_regs_space;
- int total_auto_space;
+ const int regs_saved_rounding = 0;
int save_flags = target_flags;
HARD_REG_SET live_regs_mask;
- regs_saved = calc_live_regs (&live_regs_mask);
+ int regs_saved = calc_live_regs (&live_regs_mask);
- total_auto_space = rounded_frame_size (regs_saved) - regs_saved_rounding;
+ int total_auto_space = rounded_frame_size (regs_saved) - regs_saved_rounding;
target_flags = save_flags;
- total_saved_regs_space = regs_saved + regs_saved_rounding;
+ int total_saved_regs_space = regs_saved + regs_saved_rounding;
if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
- return total_saved_regs_space + total_auto_space
- + crtl->args.info.byref_regs * 8;
+ return total_saved_regs_space + total_auto_space;
if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
- return total_saved_regs_space + total_auto_space
- + crtl->args.info.byref_regs * 8;
+ return total_saved_regs_space + total_auto_space;
/* Initial gap between fp and sp is 0. */
if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
void
sh_fix_range (const char *const_str)
{
- int i, first, last;
- char *str, *dash, *comma;
-
/* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
REG2 are either register names or register numbers. The effect
of this option is to mark the registers in the range from REG1 to
REG2 as ``fixed'' so they won't be used by the compiler. */
- i = strlen (const_str);
- str = (char *) alloca (i + 1);
- memcpy (str, const_str, i + 1);
+ char* str = strcpy ((char*)alloca (strlen (const_str) + 1), const_str);
while (1)
{
- dash = strchr (str, '-');
+ char* dash = strchr (str, '-');
if (!dash)
{
warning (0, "value of -mfixed-range must have form REG1-REG2");
return;
}
*dash = '\0';
- comma = strchr (dash + 1, ',');
+ char* comma = strchr (dash + 1, ',');
if (comma)
*comma = '\0';
- first = decode_reg_name (str);
+ int first = decode_reg_name (str);
if (first < 0)
{
warning (0, "unknown register name: %s", str);
return;
}
- last = decode_reg_name (dash + 1);
+ int last = decode_reg_name (dash + 1);
if (last < 0)
{
warning (0, "unknown register name: %s", dash + 1);
return;
}
- for (i = first; i <= last; ++i)
+ for (int i = first; i <= last; ++i)
fixed_regs[i] = call_used_regs[i] = 1;
if (!comma)
static void
sh_insert_attributes (tree node, tree *attributes)
{
- tree attrs;
-
if (TREE_CODE (node) != FUNCTION_DECL)
return;
/* Append the attributes to the deferred attributes. */
*sh_deferred_function_attributes_tail = *attributes;
- attrs = sh_deferred_function_attributes;
+ tree attrs = sh_deferred_function_attributes;
if (!attrs)
return;
int
sh2a_get_function_vector_number (rtx x)
{
- int num;
- tree list, t;
-
if ((GET_CODE (x) == SYMBOL_REF)
&& (SYMBOL_REF_FLAGS (x) & SYMBOL_FLAG_FUNCVEC_FUNCTION))
{
- t = SYMBOL_REF_DECL (x);
+ tree t = SYMBOL_REF_DECL (x);
if (TREE_CODE (t) != FUNCTION_DECL)
return 0;
- list = SH_ATTRIBUTES (t);
- while (list)
- {
- if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
- {
- num = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (list)));
- return num;
- }
-
- list = TREE_CHAIN (list);
- }
+ for (tree list = SH_ATTRIBUTES (t); list; list = TREE_CHAIN (list))
+ if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
+ return TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (list)));
return 0;
}
td = TREE_TYPE (td);
if (td == error_mark_node)
return false;
- return (lookup_attribute ("renesas", TYPE_ATTRIBUTES (td))
- != NULL_TREE);
+ return lookup_attribute ("renesas", TYPE_ATTRIBUTES (td)) != NULL_TREE;
}
/* True if __attribute__((renesas)) or -mrenesas, for the current
bool
sh2a_function_vector_p (tree func)
{
- tree list;
if (TREE_CODE (func) != FUNCTION_DECL)
return false;
- list = SH_ATTRIBUTES (func);
- while (list)
- {
- if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
- return true;
+ for (tree list = SH_ATTRIBUTES (func); list; list = TREE_CHAIN (list))
+ if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
+ return true;
- list = TREE_CHAIN (list);
- }
return false;
}
bool
fp_zero_operand (rtx op)
{
- const REAL_VALUE_TYPE *r;
-
if (GET_MODE (op) != SFmode)
return false;
- r = CONST_DOUBLE_REAL_VALUE (op);
+ const REAL_VALUE_TYPE* r = CONST_DOUBLE_REAL_VALUE (op);
return real_equal (r, &dconst0) && ! REAL_VALUE_MINUS_ZERO (*r);
}
branch_dest (rtx branch)
{
rtx dest = SET_SRC (PATTERN (branch));
- int dest_uid;
if (GET_CODE (dest) == IF_THEN_ELSE)
dest = XEXP (dest, 1);
- dest = XEXP (dest, 0);
- dest_uid = INSN_UID (dest);
- return INSN_ADDRESSES (dest_uid);
+
+ return INSN_ADDRESSES (INSN_UID (XEXP (dest, 0)));
}
\f
/* Return nonzero if REG is not used after INSN.
bool
reg_unused_after (rtx reg, rtx_insn *insn)
{
- enum rtx_code code;
- rtx set;
-
/* If the reg is set by this instruction, then it is safe for our
case. Disregard the case where this is a store to memory, since
we are checking a register used in the store address. */
- set = single_set (insn);
+ rtx set = single_set (insn);
if (set && !MEM_P (SET_DEST (set))
&& reg_overlap_mentioned_p (reg, SET_DEST (set)))
return true;
while ((insn = NEXT_INSN (insn)))
{
- rtx set;
if (!INSN_P (insn))
continue;
- code = GET_CODE (insn);
+ rtx_code code = GET_CODE (insn);
#if 0
/* If this is a label that existed before reload, then the register
else if (code == INSN && GET_CODE (PATTERN (insn)) == SEQUENCE)
{
rtx_sequence *seq = as_a <rtx_sequence *> (PATTERN (insn));
- int i;
- int retval = 0;
+ bool retval = false;
- for (i = 0; i < seq->len (); i++)
+ for (int i = 0; i < seq->len (); i++)
{
rtx_insn *this_insn = seq->insn (i);
rtx set = single_set (this_insn);
&& reg_overlap_mentioned_p (reg, PATTERN (this_insn)))
return false;
}
- if (retval == 1)
+ if (retval)
return true;
else if (code == JUMP_INSN)
return false;
}
- set = single_set (insn);
+ rtx set = single_set (insn);
if (set && reg_overlap_mentioned_p (reg, SET_SRC (set)))
return false;
if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
return !MEM_P (SET_DEST (set));
- if (set == 0 && reg_overlap_mentioned_p (reg, PATTERN (insn)))
+ if (set == NULL && reg_overlap_mentioned_p (reg, PATTERN (insn)))
return false;
if (code == CALL_INSN && call_really_used_regs[REGNO (reg)])
static void
emit_fpu_switch (rtx scratch, int index)
{
- rtx src;
-
if (fpscr_values == NULL)
{
- tree t;
-
- t = build_index_type (integer_one_node);
+ tree t = build_index_type (integer_one_node);
t = build_array_type (integer_type_node, t);
t = build_decl (BUILTINS_LOCATION,
VAR_DECL, get_identifier ("__fpscr_values"), t);
fpscr_values = t;
}
- src = DECL_RTL (fpscr_values);
+ rtx src = DECL_RTL (fpscr_values);
if (!can_create_pseudo_p ())
{
emit_move_insn (scratch, XEXP (src, 0));
{
enum attr_fp_mode fp_mode = (enum attr_fp_mode) mode;
enum attr_fp_mode norm_mode = ACTUAL_NORMAL_MODE (FP_MODE);
- rtx addr_reg;
- addr_reg = !can_create_pseudo_p () ? get_free_reg (regs_live) : NULL_RTX;
+ rtx addr_reg = !can_create_pseudo_p () ? get_free_reg (regs_live) : NULL_RTX;
emit_fpu_switch (addr_reg, fp_mode == norm_mode);
}
static bool
sequence_insn_p (rtx_insn *insn)
{
- rtx_insn *prev, *next;
-
- prev = PREV_INSN (insn);
+ rtx_insn* prev = PREV_INSN (insn);
if (prev == NULL)
return false;
- next = NEXT_INSN (prev);
+ rtx_insn* next = NEXT_INSN (prev);
if (next == NULL)
return false;
bool
nonpic_symbol_mentioned_p (rtx x)
{
- const char *fmt;
- int i;
-
if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF
|| GET_CODE (x) == PC)
return true;
|| XINT (x, 1) == UNSPEC_GOTOFFFUNCDESC))
return false;
- fmt = GET_RTX_FORMAT (GET_CODE (x));
- for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
+ const char* fmt = GET_RTX_FORMAT (GET_CODE (x));
+ for (int i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
{
if (fmt[i] == 'E')
{
- int j;
- for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ for (int j = XVECLEN (x, i) - 1; j >= 0; j--)
if (nonpic_symbol_mentioned_p (XVECEXP (x, i, j)))
return true;
}
/* Convert a non-PIC address in `orig' to a PIC address using @GOT or
@GOTOFF in `reg'. */
rtx
-legitimize_pic_address (rtx orig, machine_mode mode ATTRIBUTE_UNUSED,
- rtx reg)
+legitimize_pic_address (rtx orig, machine_mode mode ATTRIBUTE_UNUSED, rtx reg)
{
if (tls_symbolic_operand (orig, Pmode) != TLS_MODEL_NONE)
return orig;
static rtx
sh_delegitimize_address (rtx orig_x)
{
- rtx x, y;
-
orig_x = delegitimize_mem_from_attrs (orig_x);
- x = orig_x;
+ rtx x = orig_x;
if (MEM_P (x))
x = XEXP (x, 0);
if (GET_CODE (x) == CONST)
{
- y = XEXP (x, 0);
+ rtx y = XEXP (x, 0);
if (GET_CODE (y) == UNSPEC)
{
if (XINT (y, 1) == UNSPEC_GOT
static rtx
mark_constant_pool_use (rtx x)
{
- rtx_insn *insn, *lab;
- rtx pattern;
-
if (x == NULL_RTX)
return x;
/* Get the first label in the list of labels for the same constant
and delete another labels in the list. */
- lab = as_a <rtx_insn *> (x);
- for (insn = PREV_INSN (lab); insn; insn = PREV_INSN (insn))
+ rtx_insn* lab = as_a <rtx_insn*> (x);
+ for (rtx_insn* insn = PREV_INSN (lab); insn; insn = PREV_INSN (insn))
{
if (!LABEL_P (insn)
|| LABEL_REFS (insn) != NEXT_INSN (insn))
as_a<rtx_insn *> (insn)->set_deleted ();
/* Mark constants in a window. */
- for (insn = NEXT_INSN (as_a <rtx_insn *> (x)); insn; insn = NEXT_INSN (insn))
+ for (rtx_insn* insn = NEXT_INSN (as_a <rtx_insn *> (x)); insn;
+ insn = NEXT_INSN (insn))
{
if (!NONJUMP_INSN_P (insn))
continue;
- pattern = PATTERN (insn);
+ rtx pattern = PATTERN (insn);
if (GET_CODE (pattern) != UNSPEC_VOLATILE)
continue;
if (REG_NOTE_KIND (link) == 0)
{
- enum attr_type type;
- rtx dep_set;
-
if (recog_memoized (insn) < 0
|| recog_memoized (dep_insn) < 0)
return cost;
- dep_set = single_set (dep_insn);
+ rtx dep_set = single_set (dep_insn);
/* The latency that we specify in the scheduling description refers
to the actual output, not to an auto-increment register; for that,
}
if (TARGET_HARD_SH4 && !TARGET_SH4_300)
{
- enum attr_type dep_type = get_attr_type (dep_insn);
-
+ attr_type dep_type = get_attr_type (dep_insn);
+ attr_type type;
if (dep_type == TYPE_FLOAD || dep_type == TYPE_PCFLOAD)
cost--;
else if ((dep_type == TYPE_LOAD_SI || dep_type == TYPE_PCLOAD_SI)
else if (TARGET_SH4_300)
{
/* Stores need their input register two cycles later. */
+ attr_type type;
if (dep_set && cost >= 1
&& ((type = get_attr_type (insn)) == TYPE_STORE
|| type == TYPE_PSTORE
static short
find_insn_regmode_weight (rtx insn, machine_mode mode)
{
- short reg_weight = 0;
- rtx x;
-
/* Increment weight for each register born here. */
- x = PATTERN (insn);
- reg_weight += find_set_regmode_weight (x, mode);
+ rtx x = PATTERN (insn);
+ short reg_weight = find_set_regmode_weight (x, mode);
if (GET_CODE (x) == PARALLEL)
{
int j;
static int
find_r0_life_regions (basic_block b)
{
- rtx_insn *end, *insn;
- rtx pset;
- rtx r0_reg;
- int live;
+ bool live;
int set;
int death = 0;
if (REGNO_REG_SET_P (df_get_live_in (b), R0_REG))
{
set = 1;
- live = 1;
+ live = true;
}
else
{
set = 0;
- live = 0;
+ live = false;
}
- insn = BB_HEAD (b);
- end = BB_END (b);
- r0_reg = gen_rtx_REG (SImode, R0_REG);
+ rtx_insn* insn = BB_HEAD (b);
+ rtx_insn* end = BB_END (b);
+ rtx r0_reg = gen_rtx_REG (SImode, R0_REG);
while (1)
{
if (INSN_P (insn))
if (find_regno_note (insn, REG_DEAD, R0_REG))
{
death++;
- live = 0;
+ live = false;
}
+
+ rtx pset;
if (!live
&& (pset = single_set (insn))
&& reg_overlap_mentioned_p (r0_reg, SET_DEST (pset))
&& !find_regno_note (insn, REG_UNUSED, R0_REG))
{
set++;
- live = 1;
+ live = true;
}
}
if (insn == end)
return tramp;
}
-/* FIXME: This is overly conservative. A SHcompact function that
- receives arguments ``by reference'' will have them stored in its
- own stack frame, so it must not pass pointers or references to
- these arguments to other functions by means of sibling calls. */
/* If PIC, we cannot make sibling calls to global functions
because the PLT requires r12 to be live. */
static bool
for (int i = 1; i <= 3; i++, nop++)
{
- tree arg;
- machine_mode opmode, argmode;
- tree optype;
-
if (! signature_args[signature][i])
break;
- arg = CALL_EXPR_ARG (exp, i - 1);
+ tree arg = CALL_EXPR_ARG (exp, i - 1);
if (arg == error_mark_node)
return const0_rtx;
+
+ machine_mode opmode;
+ tree optype;
if (signature_args[signature][i] & 8)
{
opmode = ptr_mode;
opmode = insn_data[icode].operand[nop].mode;
optype = (*lang_hooks.types.type_for_mode) (opmode, 0);
}
- argmode = TYPE_MODE (TREE_TYPE (arg));
+
+ machine_mode argmode = TYPE_MODE (TREE_TYPE (arg));
if (argmode != opmode)
arg = build1 (NOP_EXPR, optype, arg);
op[nop] = expand_expr (arg, NULL_RTX, opmode, EXPAND_NORMAL);
return function_symbol_result (sym, lab);
}
-/* Find the number of a general purpose register in S. */
+/* Find the number of the first general purpose register in S that
+ is not set. */
static int
scavenge_reg (HARD_REG_SET *s)
{
- int r;
- for (r = FIRST_GENERAL_REG; r <= LAST_GENERAL_REG; r++)
+ for (int r = FIRST_GENERAL_REG; r <= LAST_GENERAL_REG; r++)
if (TEST_HARD_REG_BIT (*s, r))
return r;
return -1;
rtx op0 = operands[2];
rtx op1 = operands[3];
rtx result = target;
- HOST_WIDE_INT val;
if (!REG_P (op0) || REGNO (op0) != T_REG
|| !CONST_INT_P (op1))
return false;
if (!REG_P (result))
result = gen_reg_rtx (SImode);
- val = INTVAL (op1);
+ HOST_WIDE_INT val = INTVAL (op1);
if ((code == EQ && val == 1) || (code == NE && val == 0))
emit_insn (gen_movt (result, get_t_reg_rtx ()));
else if ((code == EQ && val == 0) || (code == NE && val == 1))
static rtx
extract_sfunc_addr (rtx insn)
{
- rtx pattern, part = NULL_RTX;
- int len, i;
-
- pattern = PATTERN (insn);
- len = XVECLEN (pattern, 0);
- for (i = 0; i < len; i++)
+ rtx pattern = PATTERN (insn);
+ const int len = XVECLEN (pattern, 0);
+ for (int i = 0; i < len; i++)
{
- part = XVECEXP (pattern, 0, i);
+ rtx part = XVECEXP (pattern, 0, i);
if (GET_CODE (part) == USE && GET_MODE (XEXP (part, 0)) == Pmode
&& GENERAL_REGISTER_P (true_regnum (XEXP (part, 0))))
return XEXP (part, 0);
{
pcum->arg_count [(int) SH_ARG_FLOAT] = 0;
pcum->free_single_fp_reg = 0;
- pcum->stack_regs = 0;
- pcum->byref_regs = 0;
- pcum->byref = 0;
- pcum->outgoing = (n_named_args == -1) ? 0 : 1;
+ pcum->outgoing = n_named_args != -1;
- /* XXX - Should we check TARGET_HITACHI here ??? */
- pcum->renesas_abi = sh_attr_renesas_p (fntype) ? 1 : 0;
+ /* FIXME: Should we check TARGET_HITACHI here ??? */
+ pcum->renesas_abi = sh_attr_renesas_p (fntype);
if (fntype)
{
else
{
pcum->arg_count [(int) SH_ARG_INT] = 0;
- pcum->prototype_p = FALSE;
+ pcum->prototype_p = false;
if (mode != VOIDmode)
{
/* If the default ABI is the Renesas ABI then all library
&& TARGET_FPU_DOUBLE)));
}
else
- pcum->force_mem = FALSE;
+ pcum->force_mem = false;
}
}
static void
sh_conditional_register_usage (void)
{
- int regno;
- for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno ++)
+ for (int regno = 0; regno < FIRST_PSEUDO_REGISTER; regno ++)
if (! VALID_REGISTER_P (regno))
fixed_regs[regno] = call_used_regs[regno] = 1;
/* R8 and R9 are call-clobbered on SH5, but not on earlier SH ABIs. */
call_really_used_regs[MACL_REG] = 0;
}
- for (regno = FIRST_GENERAL_REG; regno <= LAST_GENERAL_REG; regno++)
+ for (int regno = FIRST_GENERAL_REG; regno <= LAST_GENERAL_REG; regno++)
if (! fixed_regs[regno] && call_really_used_regs[regno])
SET_HARD_REG_BIT (reg_class_contents[SIBCALL_REGS], regno);
bool
sh_can_use_simple_return_p (void)
{
- HARD_REG_SET live_regs_mask;
- int d;
-
if (! reload_completed || frame_pointer_needed)
return false;
return false;
/* Finally, allow for pr save. */
- d = calc_live_regs (&live_regs_mask);
+ HARD_REG_SET live_regs_mask;
+ int d = calc_live_regs (&live_regs_mask);
if (rounded_frame_size (d) > 4)
return false;