+2015-06-06 Richard Sandiford <richard.sandiford@arm.com>
+
+ * emit-rtl.c, expr.c, gcse.c, optabs.c, optabs.h, print-rtl.c,
+ rtl.h, bb-reorder.c, builtins.c, calls.c, cfgbuild.c, cfgexpand.c,
+ cfgrtl.c, cilk-common.c, config/i386/i386.md, cse.c, dwarf2cfi.c,
+ except.c, final.c, function.c, gcse-common.c, genemit.c,
+ haifa-sched.c, ifcvt.c, jump.c, loop-invariant.c, loop-iv.c,
+ lra-constraints.c, lra.c, reload1.c, resource.c, rtlanal.c,
+ sched-deps.c, sched-ebb.c, sel-sched-ir.c, sel-sched.c,
+ shrink-wrap.c, stmt.c, store-motion.c: Replace rtx base types with
+ more derived ones.
+
2015-06-06 Mikhail Maltsev <maltsevm@gmail.com>
* combine.c (combine_split_insns): Remove cast.
int
get_uncond_jump_length (void)
{
- rtx_insn *label, *jump;
int length;
start_sequence ();
- label = emit_label (gen_label_rtx ());
- jump = emit_jump_insn (gen_jump (label));
+ rtx_code_label *label = emit_label (gen_label_rtx ());
+ rtx_insn *jump = emit_jump_insn (gen_jump (label));
length = get_attr_min_length (jump);
end_sequence ();
{
eh_landing_pad new_lp;
basic_block new_bb, last_bb, post_bb;
- rtx_insn *new_label, *jump;
- rtx post_label;
+ rtx_insn *jump;
unsigned new_partition;
edge_iterator ei;
edge e;
LABEL_PRESERVE_P (new_lp->landing_pad) = 1;
/* Put appropriate instructions in new bb. */
- new_label = emit_label (new_lp->landing_pad);
+ rtx_code_label *new_label = emit_label (new_lp->landing_pad);
expand_dw2_landing_pad_for_region (old_lp->region);
post_bb = BLOCK_FOR_INSN (old_lp->landing_pad);
post_bb = single_succ (post_bb);
- post_label = block_label (post_bb);
+ rtx_code_label *post_label = block_label (post_bb);
jump = emit_jump_insn (gen_jump (post_label));
JUMP_LABEL (jump) = post_label;
{
basic_block src = e->src;
basic_block dest = e->dest;
- rtx label;
- rtx_insn *new_jump;
+ rtx_jump_insn *new_jump;
if (dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
continue;
/* Make sure dest has a label. */
- label = block_label (dest);
+ rtx_code_label *label = block_label (dest);
/* Nothing to do for non-fallthru edges. */
if (src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
bool cond_jump_crosses;
int invert_worked;
rtx_insn *old_jump;
- rtx fall_thru_label;
+ rtx_code_label *fall_thru_label;
FOR_EACH_BB_FN (cur_bb, cfun)
{
saved on entry to this function. So we migrate the
call to the first insn of this function. */
rtx temp;
- rtx seq;
start_sequence ();
temp = expand_builtin_apply_args_1 ();
- seq = get_insns ();
+ rtx_insn *seq = get_insns ();
end_sequence ();
apply_args_value = temp;
rtx buf_addr = expand_expr (CALL_EXPR_ARG (exp, 0), subtarget,
VOIDmode, EXPAND_NORMAL);
tree label = TREE_OPERAND (CALL_EXPR_ARG (exp, 1), 0);
- rtx label_r = label_rtx (label);
+ rtx_insn *label_r = label_rtx (label);
/* This is copied from the handling of non-local gotos. */
expand_builtin_setjmp_setup (buf_addr, label_r);
if (validate_arglist (exp, POINTER_TYPE, VOID_TYPE))
{
tree label = TREE_OPERAND (CALL_EXPR_ARG (exp, 0), 0);
- rtx label_r = label_rtx (label);
+ rtx_insn *label_r = label_rtx (label);
expand_builtin_setjmp_receiver (label_r);
return const0_rtx;
cumulative_args_t args_so_far ATTRIBUTE_UNUSED)
{
rtx rounded_stack_size_rtx = GEN_INT (rounded_stack_size);
- rtx_insn *call_insn;
rtx call, funmem;
int already_popped = 0;
HOST_WIDE_INT n_popped
gcc_unreachable ();
/* Find the call we just emitted. */
- call_insn = last_call_insn ();
+ rtx_call_insn *call_insn = last_call_insn ();
/* Some target create a fresh MEM instead of reusing the one provided
above. Set its MEM_EXPR. */
if (flag_ipa_ra)
{
- rtx last, datum = orgfun;
+ rtx datum = orgfun;
gcc_assert (GET_CODE (datum) == SYMBOL_REF);
- last = last_call_insn ();
+ rtx_call_insn *last = last_call_insn ();
add_reg_note (last, REG_CALL_DECL, datum);
}
if (lp)
{
- rtx label = lp->landing_pad;
+ rtx_insn *label = lp->landing_pad;
/* During initial rtl generation, use the post_landing_pad. */
if (label == NULL)
for (i = 0; i < nlabels; ++i)
{
tree label = TREE_VALUE (gimple_asm_label_op (stmt, i));
- rtx r;
+ rtx_insn *r;
/* If asm goto has any labels in the fallthru basic block, use
a label that we emit immediately after the asm goto. Expansion
may insert further instructions into the same basic block after
static rtx_insn *skip_insns_after_block (basic_block);
static void record_effective_endpoints (void);
-static rtx label_for_bb (basic_block);
static void fixup_reorder_chain (void);
void verify_insn_chain (void);
/* Or replace possibly complicated jump insn by simple jump insn. */
else
{
- rtx target_label = block_label (target);
+ rtx_code_label *target_label = block_label (target);
rtx_insn *barrier;
rtx label;
rtx_jump_table_data *table;
{
rtvec vec;
int j;
- rtx new_label = block_label (new_bb);
+ rtx_code_label *new_label = block_label (new_bb);
if (new_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
return false;
else if ((tmp = extract_asm_operands (PATTERN (insn))) != NULL)
{
int i, n = ASM_OPERANDS_LABEL_LENGTH (tmp);
- rtx new_label, note;
+ rtx note;
if (new_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
return false;
- new_label = block_label (new_bb);
+ rtx_code_label *new_label = block_label (new_bb);
for (i = 0; i < n; ++i)
{
}
else
{
- rtx label = block_label (target);
+ rtx_code_label *label = block_label (target);
emit_jump_insn_after_setloc (gen_jump (label), BB_END (jump_block), loc);
JUMP_LABEL (BB_END (jump_block)) = label;
LABEL_NUSES (label)++;
/* Locate or create a label for a given basic block. */
-static rtx
+static rtx_insn *
label_for_bb (basic_block bb)
{
- rtx label = BB_HEAD (bb);
+ rtx_insn *label = BB_HEAD (bb);
if (!LABEL_P (label))
{
void
expand_builtin_cilk_detach (tree exp)
{
- rtx insn;
+ rtx_insn *insn;
tree fptr = get_frame_arg (exp);
if (fptr == NULL_TREE)
(clobber (reg:CC FLAGS_REG))])]
"TARGET_QIMODE_MATH"
{
- rtx div, mod, insn;
+ rtx div, mod;
rtx tmp0, tmp1;
tmp0 = gen_reg_rtx (HImode);
/* Extract remainder from AH. */
tmp1 = gen_rtx_SIGN_EXTRACT (QImode, tmp0, GEN_INT (8), GEN_INT (8));
- insn = emit_move_insn (operands[3], tmp1);
+ rtx_insn *insn = emit_move_insn (operands[3], tmp1);
mod = gen_rtx_MOD (QImode, operands[1], operands[2]);
set_unique_reg_note (insn, REG_EQUAL, mod);
(clobber (reg:CC FLAGS_REG))])]
"TARGET_QIMODE_MATH"
{
- rtx div, mod, insn;
+ rtx div, mod;
rtx tmp0, tmp1;
tmp0 = gen_reg_rtx (HImode);
/* Extract remainder from AH. */
tmp1 = gen_rtx_ZERO_EXTRACT (SImode, tmp0, GEN_INT (8), GEN_INT (8));
tmp1 = simplify_gen_subreg (QImode, tmp1, SImode, 0);
- insn = emit_move_insn (operands[3], tmp1);
+ rtx_insn *insn = emit_move_insn (operands[3], tmp1);
mod = gen_rtx_UMOD (QImode, operands[1], operands[2]);
set_unique_reg_note (insn, REG_EQUAL, mod);
and hope for the best. */
if (n_sets == 1)
{
- rtx_insn *new_rtx;
+ rtx_jump_insn *new_rtx;
rtx note;
new_rtx = emit_jump_insn_before (gen_jump (XEXP (src, 0)), insn);
set_live_p (rtx set, rtx_insn *insn ATTRIBUTE_UNUSED, /* Only used with HAVE_cc0. */
int *counts)
{
- rtx tem;
+ rtx_insn *tem;
if (set_noop_p (set))
;
int num = dwarf2out_cfi_label_num;
const char *label = dwarf2out_cfi_label ();
dw_cfi_ref xcfi;
- rtx tmp;
/* Set the location counter to the new label. */
xcfi = new_cfi ();
xcfi->dw_cfi_oprnd1.dw_cfi_addr = label;
vec_safe_push (fde->dw_fde_cfi, xcfi);
- tmp = emit_note_before (NOTE_INSN_CFI_LABEL, insn);
+ rtx_note *tmp = emit_note_before (NOTE_INSN_CFI_LABEL, insn);
NOTE_LABEL_NUMBER (tmp) = num;
}
int probability;
rtx_insn *insn_last, *insn;
int njumps = 0;
- rtx call_insn = NULL_RTX;
+ rtx_insn *call_insn = NULL;
/* We're not good at redistributing frame information. */
if (RTX_FRAME_RELATED_P (trial))
rtx_insn *(*make_raw) (rtx))
{
rtx_insn *after = safe_as_a <rtx_insn *> (uncast_after);
- rtx last = emit_pattern_after_noloc (pattern, after, NULL, make_raw);
+ rtx_insn *last = emit_pattern_after_noloc (pattern, after, NULL, make_raw);
if (pattern == NULL_RTX || !loc)
- return safe_as_a <rtx_insn *> (last);
+ return last;
after = NEXT_INSN (after);
while (1)
break;
after = NEXT_INSN (after);
}
- return safe_as_a <rtx_insn *> (last);
+ return last;
}
/* Insert PATTERN after AFTER. MAKE_RAW indicates how to turn PATTERN
machine_mode unwind_word_mode = targetm.unwind_word_mode ();
machine_mode filter_mode = targetm.eh_return_filter_mode ();
eh_landing_pad lp;
- rtx mem, fc, before, exc_ptr_reg, filter_reg;
+ rtx mem, fc, exc_ptr_reg, filter_reg;
rtx_insn *seq;
- rtx first_reachable_label;
basic_block bb;
eh_region r;
edge e;
/* Jump to one of the directly reachable regions. */
disp_index = 0;
- first_reachable_label = NULL;
+ rtx_code_label *first_reachable_label = NULL;
/* If there's exactly one call site in the function, don't bother
generating a switch statement. */
seq2 = get_insns ();
end_sequence ();
- before = label_rtx (lp->post_landing_pad);
+ rtx_insn *before = label_rtx (lp->post_landing_pad);
bb = emit_to_new_bb_before (seq2, before);
e = make_edge (bb, bb->next_bb, EDGE_FALLTHRU);
e->count = bb->count;
{
if (lp)
{
- rtx lab = lp->landing_pad;
+ rtx_code_label *lab = lp->landing_pad;
if (lab && LABEL_P (lab))
(*callback) (lab);
}
rtx_insn *first_no_action_insn = NULL;
int call_site = 0;
int cur_sec = 0;
- rtx section_switch_note = NULL_RTX;
+ rtx_insn *section_switch_note = NULL;
rtx_insn *first_no_action_insn_before_switch = NULL;
rtx_insn *last_no_action_insn_before_switch = NULL;
int saved_call_site_base = call_site_base;
eh_region region;
bool nothrow;
int this_action;
- rtx this_landing_pad;
+ rtx_code_label *this_landing_pad;
insn = iter;
if (NONJUMP_INSN_P (insn)
if (this_action >= 0)
this_landing_pad = lp->landing_pad;
else
- this_landing_pad = NULL_RTX;
+ this_landing_pad = NULL;
/* Differing actions or landing pads implies a change in call-site
info, which implies some EH_REGION note should be emitted. */
fprintf (out, "(nil),");
if (lp->post_landing_pad)
{
- rtx lab = label_rtx (lp->post_landing_pad);
+ rtx_insn *lab = label_rtx (lp->post_landing_pad);
fprintf (out, "%i%s}", INSN_UID (lab),
NOTE_P (lab) ? "(del)" : "");
}
if ((icode = optab_handler (movmisalign_optab, mode))
!= CODE_FOR_nothing)
{
- rtx reg, insn;
+ rtx reg;
op0 = adjust_address (op0, mode, 0);
/* We've already validated the memory, and we're creating a
reg = gen_reg_rtx (mode);
/* Nor can the insn generator. */
- insn = GEN_FCN (icode) (reg, op0);
+ rtx_insn *insn = GEN_FCN (icode) (reg, op0);
emit_insn (insn);
return reg;
}
int
insn_current_reference_address (rtx_insn *branch)
{
- rtx dest, seq;
+ rtx dest;
int seq_uid;
if (! INSN_ADDRESSES_SET_P ())
return 0;
- seq = NEXT_INSN (PREV_INSN (branch));
+ rtx_insn *seq = NEXT_INSN (PREV_INSN (branch));
seq_uid = INSN_UID (seq);
if (!JUMP_P (branch))
/* This can happen for example on the PA; the objective is to know the
}
else
t = op1;
- rtx pat = gen_extend_insn (op0, t, promoted_nominal_mode,
- data->passed_mode, unsignedp);
+ rtx_insn *pat = gen_extend_insn (op0, t, promoted_nominal_mode,
+ data->passed_mode, unsignedp);
emit_insn (pat);
insns = get_insns ();
void
expand_function_end (void)
{
- rtx clobber_after;
-
/* If arg_pointer_save_area was referenced only from a nested
function, we will not have initialized it yet. Do that now. */
if (arg_pointer_save_area && ! crtl->arg_pointer_save_area_init)
We delay actual code generation after the current_function_value_rtx
is computed. */
- clobber_after = get_last_insn ();
+ rtx_insn *clobber_after = get_last_insn ();
/* Output the label for the actual return from the function. */
emit_label (return_label);
certainly doesn't fall thru into the exit block. */
if (!BARRIER_P (clobber_after))
{
- rtx seq;
-
start_sequence ();
clobber_return_register ();
- seq = get_insns ();
+ rtx_insn *seq = get_insns ();
end_sequence ();
emit_insn_after (seq, clobber_after);
if (! EXIT_IGNORE_STACK
&& cfun->calls_alloca)
{
- rtx tem = 0, seq;
+ rtx tem = 0;
start_sequence ();
emit_stack_save (SAVE_FUNCTION, &tem);
- seq = get_insns ();
+ rtx_insn *seq = get_insns ();
end_sequence ();
emit_insn_before (seq, parm_birth_insn);
if (! crtl->arg_pointer_save_area_init)
{
- rtx seq;
-
/* Save the arg pointer at the beginning of the function. The
generated stack slot may not be a valid memory address, so we
have to check it and fix it if necessary. */
start_sequence ();
emit_move_insn (validize_mem (copy_rtx (ret)),
crtl->args.internal_arg_pointer);
- seq = get_insns ();
+ rtx_insn *seq = get_insns ();
end_sequence ();
push_topmost_sequence ();
static void
emit_use_return_register_into_block (basic_block bb)
{
- rtx seq;
- rtx_insn *insn;
start_sequence ();
use_return_register ();
- seq = get_insns ();
+ rtx_insn *seq = get_insns ();
end_sequence ();
- insn = BB_END (bb);
+ rtx_insn *insn = BB_END (bb);
if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, PATTERN (insn)))
insn = prev_cc0_setter (insn);
void
emit_return_into_block (bool simple_p, basic_block bb)
{
- rtx jump, pat;
- jump = emit_jump_insn_after (gen_return_pattern (simple_p), BB_END (bb));
- pat = PATTERN (jump);
+ rtx_jump_insn *jump = emit_jump_insn_after (gen_return_pattern (simple_p),
+ BB_END (bb));
+ rtx pat = PATTERN (jump);
if (GET_CODE (pat) == PARALLEL)
pat = XVECEXP (pat, 0, 0);
gcc_assert (ANY_RETURN_P (pat));
{
int i;
basic_block bb;
- rtx label;
edge_iterator ei;
edge e;
auto_vec<basic_block> src_bbs (EDGE_COUNT (last_bb->preds));
if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun))
src_bbs.quick_push (e->src);
- label = BB_HEAD (last_bb);
+ rtx_insn *label = BB_HEAD (last_bb);
FOR_EACH_VEC_ELT (src_bbs, i, bb)
{
void
canon_list_insert (rtx dest, const_rtx x ATTRIBUTE_UNUSED, void *data)
{
- rtx dest_addr, insn;
+ rtx dest_addr;
int bb;
modify_pair pair;
dest_addr = get_addr (XEXP (dest, 0));
dest_addr = canon_rtx (dest_addr);
- insn = ((struct gcse_note_stores_info *)data)->insn;
+ rtx_insn *insn = ((struct gcse_note_stores_info *)data)->insn;
bb = BLOCK_FOR_INSN (insn)->index;
pair.dest = dest;
static void update_ld_motion_stores (struct gcse_expr *);
static void clear_modify_mem_tables (void);
static void free_modify_mem_tables (void);
-static rtx gcse_emit_move_after (rtx, rtx, rtx_insn *);
static bool is_too_expensive (const char *);
#define GNEW(T) ((T *) gmalloc (sizeof (T)))
/* Emit move from SRC to DEST noting the equivalence with expression computed
in INSN. */
-static rtx
+static rtx_insn *
gcse_emit_move_after (rtx dest, rtx src, rtx_insn *insn)
{
rtx_insn *new_rtx;
rtx pat = PATTERN (insn);
rtx src = SET_SRC (pat);
rtx reg = expr->reaching_reg;
- rtx copy;
/* If we've already copied it, continue. */
if (expr->reaching_reg == src)
fprintf (dump_file, "\n");
}
- copy = gen_move_insn (reg, copy_rtx (SET_SRC (pat)));
+ rtx_insn *copy = gen_move_insn (reg, copy_rtx (SET_SRC (pat)));
emit_insn_before (copy, insn);
SET_SRC (pat) = reg;
df_insn_rescan (insn);
printf (" rtx operand%d;\n", i);
for (; i <= stats.max_scratch_opno; i++)
printf (" rtx operand%d ATTRIBUTE_UNUSED;\n", i);
- printf (" rtx _val = 0;\n");
+ printf (" rtx_insn *_val = 0;\n");
printf (" start_sequence ();\n");
/* The fourth operand of DEFINE_EXPAND is some code to be executed
Between these two blocks recovery blocks will be emitted. */
basic_block single, empty;
- rtx_insn *x;
- rtx label;
/* If the fallthrough edge to exit we've found is from the block we've
created before, don't do anything more. */
make_single_succ_edge (empty, EXIT_BLOCK_PTR_FOR_FN (cfun),
EDGE_FALLTHRU);
- label = block_label (empty);
- x = emit_jump_insn_after (gen_jump (label), BB_END (single));
+ rtx_code_label *label = block_label (empty);
+ rtx_jump_insn *x = emit_jump_insn_after (gen_jump (label),
+ BB_END (single));
JUMP_LABEL (x) = label;
LABEL_NUSES (label)++;
haifa_init_insn (x);
basic_block
sched_create_recovery_block (basic_block *before_recovery_ptr)
{
- rtx label;
rtx_insn *barrier;
basic_block rec;
barrier = get_last_bb_insn (before_recovery);
gcc_assert (BARRIER_P (barrier));
- label = emit_label_after (gen_label_rtx (), barrier);
+ rtx_insn *label = emit_label_after (gen_label_rtx (), barrier);
rec = create_basic_block (label, label, before_recovery);
sched_create_recovery_edges (basic_block first_bb, basic_block rec,
basic_block second_bb)
{
- rtx label;
- rtx jump;
int edge_flags;
/* This is fixing of incoming edge. */
edge_flags = 0;
make_edge (first_bb, rec, edge_flags);
- label = block_label (second_bb);
- jump = emit_jump_insn_after (gen_jump (label), BB_END (rec));
+ rtx_code_label *label = block_label (second_bb);
+ rtx_jump_insn *jump = emit_jump_insn_after (gen_jump (label), BB_END (rec));
JUMP_LABEL (jump) = label;
LABEL_NUSES (label)++;
basic_block else_bb = else_edge->dest;
basic_block other_bb, trap_bb;
rtx_insn *trap, *jump;
- rtx cond, seq;
+ rtx cond;
rtx_insn *cond_earliest;
enum rtx_code code;
}
/* Attempt to generate the conditional trap. */
- seq = gen_cond_trap (code, copy_rtx (XEXP (cond, 0)),
- copy_rtx (XEXP (cond, 1)),
- TRAP_CODE (PATTERN (trap)));
+ rtx_insn *seq = gen_cond_trap (code, copy_rtx (XEXP (cond, 0)),
+ copy_rtx (XEXP (cond, 1)),
+ TRAP_CODE (PATTERN (trap)));
if (seq == NULL)
return FALSE;
else if (trap_bb == then_bb)
{
rtx lab;
- rtx_insn *newjump;
lab = JUMP_LABEL (jump);
- newjump = emit_jump_insn_after (gen_jump (lab), jump);
+ rtx_jump_insn *newjump = emit_jump_insn_after (gen_jump (lab), jump);
LABEL_NUSES (lab) += 1;
JUMP_LABEL (newjump) = lab;
emit_barrier_after (newjump);
if (GET_MODE_CLASS (mode) == MODE_CC || CC0_P (arg0))
{
- const_rtx prev;
/* Try to search for the comparison to determine the real mode.
This code is expensive, but with sane machine description it
will be never used, since REVERSIBLE_CC_MODE will return true
/* These CONST_CAST's are okay because prev_nonnote_insn just
returns its argument and we assign it to a const_rtx
variable. */
- for (prev = prev_nonnote_insn (CONST_CAST_RTX (insn));
+ for (rtx_insn *prev = prev_nonnote_insn (CONST_CAST_RTX (insn));
prev != 0 && !LABEL_P (prev);
- prev = prev_nonnote_insn (CONST_CAST_RTX (prev)))
+ prev = prev_nonnote_insn (prev))
{
const_rtx set = set_of (arg0, prev);
if (set && GET_CODE (set) == SET
if (simple && REG_P (dest) && DF_REG_DEF_COUNT (REGNO (dest)) > 1)
{
df_ref use;
- rtx ref;
unsigned int i = REGNO (dest);
struct df_insn_info *insn_info;
df_ref def_rec;
for (use = DF_REG_USE_CHAIN (i); use; use = DF_REF_NEXT_REG (use))
{
- ref = DF_REF_INSN (use);
+ rtx_insn *ref = DF_REF_INSN (use);
insn_info = DF_INSN_INFO_GET (ref);
FOR_EACH_INSN_INFO_DEF (def_rec, insn_info)
for (pnote = &cond_list; *pnote; pnote = pnote_next)
{
- rtx note = *pnote;
+ rtx_expr_list *note = *pnote;
rtx old_cond = XEXP (note, 0);
pnote_next = (rtx_expr_list **)&XEXP (note, 1);
/* Likewise for the conditions. */
for (pnote = &cond_list; *pnote; pnote = pnote_next)
{
- rtx note = *pnote;
+ rtx_expr_list *note = *pnote;
rtx old_cond = XEXP (note, 0);
pnote_next = (rtx_expr_list **)&XEXP (note, 1);
rtx_insn **before, rtx_insn **after)
{
int i, in;
- rtx new_in_reg, new_out_reg, reg, clobber;
+ rtx new_in_reg, new_out_reg, reg;
machine_mode inmode, outmode;
rtx in_rtx = *curr_id->operand_loc[ins[0]];
rtx out_rtx = out < 0 ? in_rtx : *curr_id->operand_loc[out];
NEW_OUT_REG living above. We add clobber clause for
this. This is just a temporary clobber. We can remove
it at the end of LRA work. */
- clobber = emit_clobber (new_out_reg);
+ rtx_insn *clobber = emit_clobber (new_out_reg);
LRA_TEMP_CLOBBER_P (PATTERN (clobber)) = 1;
LRA_SUBREG_P (new_in_reg) = 1;
if (GET_CODE (in_rtx) == SUBREG)
|| reg_renumber[src_regno] >= 0)
{
bool before_p;
- rtx use_insn = curr_insn;
+ rtx_insn *use_insn = curr_insn;
before_p = (JUMP_P (curr_insn)
|| (CALL_P (curr_insn) && reg->type == OP_IN));
/* Emit insn x = y + z. Return NULL if we failed to do it.
Otherwise, return the insn. We don't use gen_add3_insn as it might
clobber CC. */
-static rtx
+static rtx_insn *
emit_add3_insn (rtx x, rtx y, rtx z)
{
rtx_insn *last;
if (have_addptr3_insn (x, y, z))
{
- rtx insn = gen_addptr3_insn (x, y, z);
+ rtx_insn *insn = gen_addptr3_insn (x, y, z);
/* If the target provides an "addptr" pattern it hopefully does
for a reason. So falling back to the normal add would be
/* Emit insn x = x + y. Return the insn. We use gen_add2_insn as the
last resort. */
-static rtx
+static rtx_insn *
emit_add2_insn (rtx x, rtx y)
{
- rtx insn;
-
- insn = emit_add3_insn (x, x, y);
+ rtx_insn *insn = emit_add3_insn (x, x, y);
if (insn == NULL_RTX)
{
insn = gen_add2_insn (x, y);
rtx a1, a2, base, index, disp, scale, index_scale;
bool ok_p;
- rtx add3_insn = emit_add3_insn (x, y, z);
+ rtx_insn *add3_insn = emit_add3_insn (x, y, z);
old = max_reg_num ();
if (add3_insn != NULL)
;
adding the address segment to register. */
lra_assert (x != y && x != z);
emit_move_insn (x, y);
- rtx insn = emit_add2_insn (x, z);
+ rtx_insn *insn = emit_add2_insn (x, z);
lra_assert (insn != NULL_RTX);
}
else
/* Generate x = index_scale; x = x + base. */
lra_assert (index_scale != NULL_RTX && base != NULL_RTX);
emit_move_insn (x, index_scale);
- rtx insn = emit_add2_insn (x, base);
+ rtx_insn *insn = emit_add2_insn (x, base);
lra_assert (insn != NULL_RTX);
}
else if (scale == NULL_RTX)
delete_insns_since (last);
/* Generate x = disp; x = x + base. */
emit_move_insn (x, disp);
- rtx add2_insn = emit_add2_insn (x, base);
+ rtx_insn *add2_insn = emit_add2_insn (x, base);
lra_assert (add2_insn != NULL_RTX);
}
/* Generate x = x + index. */
if (index != NULL_RTX)
{
- rtx insn = emit_add2_insn (x, index);
+ rtx_insn *insn = emit_add2_insn (x, index);
lra_assert (insn != NULL_RTX);
}
}
ok_p = false;
if (recog_memoized (move_insn) >= 0)
{
- rtx insn = emit_add2_insn (x, disp);
+ rtx_insn *insn = emit_add2_insn (x, disp);
if (insn != NULL_RTX)
{
insn = emit_add2_insn (x, base);
delete_insns_since (last);
/* Generate x = disp; x = x + base; x = x + index_scale. */
emit_move_insn (x, disp);
- rtx insn = emit_add2_insn (x, base);
+ rtx_insn *insn = emit_add2_insn (x, base);
lra_assert (insn != NULL_RTX);
insn = emit_add2_insn (x, index_scale);
lra_assert (insn != NULL_RTX);
if (optab_handler (mov_optab, mode) != CODE_FOR_nothing
|| ! rtx_equal_p (target, xtarget))
{
- rtx temp = emit_move_insn (target, xtarget);
+ rtx_insn *temp = emit_move_insn (target, xtarget);
set_dst_reg_note (temp, REG_EQUAL,
gen_rtx_fmt_ee (optab_to_code (binoptab),
/* Generate and return an insn body to add Y to X. */
-rtx
+rtx_insn *
gen_add2_insn (rtx x, rtx y)
{
enum insn_code icode = optab_handler (add_optab, GET_MODE (x));
/* Generate and return an insn body to add r1 and c,
storing the result in r0. */
-rtx
+rtx_insn *
gen_add3_insn (rtx r0, rtx r1, rtx c)
{
enum insn_code icode = optab_handler (add_optab, GET_MODE (r0));
|| !insn_operand_matches (icode, 0, r0)
|| !insn_operand_matches (icode, 1, r1)
|| !insn_operand_matches (icode, 2, c))
- return NULL_RTX;
+ return NULL;
return GEN_FCN (icode) (r0, r1, c);
}
/* Generate and return an insn body to add Y to X. */
-rtx
+rtx_insn *
gen_addptr3_insn (rtx x, rtx y, rtx z)
{
enum insn_code icode = optab_handler (addptr3_optab, GET_MODE (x));
/* Generate and return an insn body to subtract Y from X. */
-rtx
+rtx_insn *
gen_sub2_insn (rtx x, rtx y)
{
enum insn_code icode = optab_handler (sub_optab, GET_MODE (x));
/* Generate and return an insn body to subtract r1 and c,
storing the result in r0. */
-rtx
+rtx_insn *
gen_sub3_insn (rtx r0, rtx r1, rtx c)
{
enum insn_code icode = optab_handler (sub_optab, GET_MODE (r0));
|| !insn_operand_matches (icode, 0, r0)
|| !insn_operand_matches (icode, 1, r1)
|| !insn_operand_matches (icode, 2, c))
- return NULL_RTX;
+ return NULL;
return GEN_FCN (icode) (r0, r1, c);
}
/* Generate the body of an insn to extend Y (with mode MFROM)
into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
-rtx
+rtx_insn *
gen_extend_insn (rtx x, rtx y, machine_mode mto,
machine_mode mfrom, int unsignedp)
{
/* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
CODE. Return 0 on failure. */
-rtx
+rtx_insn *
gen_cond_trap (enum rtx_code code, rtx op1, rtx op2, rtx tcode)
{
machine_mode mode = GET_MODE (op1);
enum insn_code icode;
- rtx insn;
+ rtx_insn *insn;
rtx trap_rtx;
if (mode == VOIDmode)
prepare_cmp_insn (op1, op2, code, NULL_RTX, false, OPTAB_DIRECT,
&trap_rtx, &mode);
if (!trap_rtx)
- insn = NULL_RTX;
+ insn = NULL;
else
insn = GEN_FCN (icode) (trap_rtx, XEXP (trap_rtx, 0), XEXP (trap_rtx, 1),
tcode);
maybe_expand_insn (enum insn_code icode, unsigned int nops,
struct expand_operand *ops)
{
- rtx pat = maybe_gen_insn (icode, nops, ops);
+ rtx_insn *pat = maybe_gen_insn (icode, nops, ops);
if (pat)
{
emit_insn (pat);
maybe_expand_jump_insn (enum insn_code icode, unsigned int nops,
struct expand_operand *ops)
{
- rtx pat = maybe_gen_insn (icode, nops, ops);
+ rtx_insn *pat = maybe_gen_insn (icode, nops, ops);
if (pat)
{
emit_jump_insn (pat);
/* Create but don't emit one rtl instruction to perform certain operations.
Modes must match; operands must meet the operation's predicates.
Likewise for subtraction and for just copying. */
-extern rtx gen_add2_insn (rtx, rtx);
-extern rtx gen_add3_insn (rtx, rtx, rtx);
+extern rtx_insn *gen_add2_insn (rtx, rtx);
+extern rtx_insn *gen_add3_insn (rtx, rtx, rtx);
extern int have_add2_insn (rtx, rtx);
-extern rtx gen_addptr3_insn (rtx, rtx, rtx);
+extern rtx_insn *gen_addptr3_insn (rtx, rtx, rtx);
extern int have_addptr3_insn (rtx, rtx, rtx);
-extern rtx gen_sub2_insn (rtx, rtx);
-extern rtx gen_sub3_insn (rtx, rtx, rtx);
+extern rtx_insn *gen_sub2_insn (rtx, rtx);
+extern rtx_insn *gen_sub3_insn (rtx, rtx, rtx);
extern int have_sub2_insn (rtx, rtx);
/* Return the INSN_CODE to use for an extend operation. */
/* Generate the body of an insn to extend Y (with mode MFROM)
into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
-extern rtx gen_extend_insn (rtx, rtx, machine_mode,
- machine_mode, int);
+extern rtx_insn *gen_extend_insn (rtx, rtx, machine_mode, machine_mode, int);
/* Return the insn_code for a FLOAT_EXPR. */
enum insn_code can_float_p (machine_mode, machine_mode, int);
extern void init_sync_libfuncs (int max);
/* Generate a conditional trap instruction. */
-extern rtx gen_cond_trap (enum rtx_code, rtx, rtx, rtx);
+extern rtx_insn *gen_cond_trap (enum rtx_code, rtx, rtx, rtx);
/* Return true if target supports vector operations for VEC_PERM_EXPR. */
extern bool can_vec_perm_p (machine_mode, bool, const unsigned char *);
and then call debug_rtx_list to print it, using DEBUG_RTX_COUNT.
The found insn is returned to enable further debugging analysis. */
-DEBUG_FUNCTION const_rtx
+DEBUG_FUNCTION const rtx_insn *
debug_rtx_find (const rtx_insn *x, int uid)
{
while (x != 0 && INSN_UID (x) != uid)
|| reg_equiv_invariant (REGNO (SET_DEST (set)))))
{
unsigned regno = REGNO (SET_DEST (set));
- rtx init = reg_equiv_init (regno);
+ rtx_insn_list *init = reg_equiv_init (regno);
if (init)
{
rtx t = eliminate_regs_1 (SET_SRC (set), VOIDmode, insn,
for (j = 0; j < reload_n_operands; j++)
{
- rtx x = emit_insn_after (outaddr_address_reload_insns[j], insn);
+ rtx_insn *x = emit_insn_after (outaddr_address_reload_insns[j], insn);
x = emit_insn_after (output_address_reload_insns[j], x);
x = emit_insn_after (output_reload_insns[j], x);
emit_insn_after (other_output_reload_insns[j], x);
unsigned int i;
struct target_info *tinfo = NULL;
rtx_insn *insn;
- rtx jump_insn = 0;
rtx jump_target;
HARD_REG_SET scratch;
struct resources set, needed;
CLEAR_RESOURCE (&set);
CLEAR_RESOURCE (&needed);
- jump_insn = find_dead_or_set_registers (target, res, &jump_target, 0,
- set, needed);
+ rtx_insn *jump_insn = find_dead_or_set_registers (target, res, &jump_target,
+ 0, set, needed);
/* If we hit an unconditional branch, we have another way of finding out
what is live: we can see what is live at the branch target and include
extern void debug_rtx (const_rtx);
extern void debug_rtx_list (const rtx_insn *, int);
extern void debug_rtx_range (const rtx_insn *, const rtx_insn *);
-extern const_rtx debug_rtx_find (const rtx_insn *, int);
+extern const rtx_insn *debug_rtx_find (const rtx_insn *, int);
extern void print_mem_expr (FILE *, const_tree);
extern void print_rtl (FILE *, const_rtx);
extern void print_simple_rtl (FILE *, const_rtx);
remove_node_from_expr_list (const_rtx node, rtx_expr_list **listp)
{
rtx_expr_list *temp = *listp;
- rtx prev = NULL_RTX;
+ rtx_expr_list *prev = NULL;
while (temp)
{
remove_node_from_insn_list (const rtx_insn *node, rtx_insn_list **listp)
{
rtx_insn_list *temp = *listp;
- rtx prev = NULL;
+ rtx_insn_list *prev = NULL;
while (temp)
{
if (JUMP_P (insn))
{
- rtx next;
- next = next_nonnote_nondebug_insn (insn);
+ rtx_insn *next = next_nonnote_nondebug_insn (insn);
if (next && BARRIER_P (next))
reg_pending_barrier = MOVE_BARRIER;
else
static bool
chain_to_prev_insn_p (rtx_insn *insn)
{
- rtx prev, x;
-
/* INSN forms a group with the previous instruction. */
if (SCHED_GROUP_P (insn))
return true;
part of R, the clobber was added specifically to help us track the
liveness of R. There's no point scheduling the clobber and leaving
INSN behind, especially if we move the clobber to another block. */
- prev = prev_nonnote_nondebug_insn (insn);
+ rtx_insn *prev = prev_nonnote_nondebug_insn (insn);
if (prev
&& INSN_P (prev)
&& BLOCK_FOR_INSN (prev) == BLOCK_FOR_INSN (insn)
&& GET_CODE (PATTERN (prev)) == CLOBBER)
{
- x = XEXP (PATTERN (prev), 0);
+ rtx x = XEXP (PATTERN (prev), 0);
if (set_of (x, insn))
return true;
}
&& BB_END (last_bb) == insn);
{
- rtx x;
-
- x = NEXT_INSN (insn);
+ rtx_insn *x = NEXT_INSN (insn);
if (e)
gcc_checking_assert (NOTE_P (x) || LABEL_P (x));
else
}
else
{
- insn_t note;
-
- note = bb_note (bb);
+ rtx_note *note = bb_note (bb);
head = next_nonnote_insn (note);
if (head && (BARRIER_P (head) || BLOCK_FOR_INSN (head) != bb))
static void
return_bb_to_pool (basic_block bb)
{
- rtx note = bb_note (bb);
+ rtx_note *note = bb_note (bb);
gcc_assert (NOTE_BASIC_BLOCK (note) == bb
&& bb->aux == NULL);
find_seqno_for_bookkeeping (insn_t place_to_insert, insn_t join_point)
{
int seqno;
- rtx next;
/* Check if we are about to insert bookkeeping copy before a jump, and use
jump's seqno for the copy; otherwise, use JOIN_POINT's seqno. */
- next = NEXT_INSN (place_to_insert);
+ rtx_insn *next = NEXT_INSN (place_to_insert);
if (INSN_P (next)
&& JUMP_P (next)
&& BLOCK_FOR_INSN (next) == BLOCK_FOR_INSN (place_to_insert))
static void ATTRIBUTE_UNUSED
move_nop_to_previous_block (insn_t nop, basic_block prev_bb)
{
- insn_t prev_insn, next_insn, note;
+ insn_t prev_insn, next_insn;
gcc_assert (sel_bb_head_p (nop)
&& prev_bb == BLOCK_FOR_INSN (nop)->prev_bb);
- note = bb_note (BLOCK_FOR_INSN (nop));
+ rtx_note *note = bb_note (BLOCK_FOR_INSN (nop));
prev_insn = sel_bb_end (prev_bb);
next_insn = NEXT_INSN (nop);
gcc_assert (prev_insn != NULL_RTX
init_seqno_1 (basic_block bb, sbitmap visited_bbs, bitmap blocks_to_reschedule)
{
int bbi = BLOCK_TO_BB (bb->index);
- insn_t insn, note = bb_note (bb);
+ insn_t insn;
insn_t succ_insn;
succ_iterator si;
+ rtx_note *note = bb_note (bb);
bitmap_set_bit (visited_bbs, bbi);
if (blocks_to_reschedule)
bitmap_clear_bit (blocks_to_reschedule, bb->index);
FOR_EACH_BB_REVERSE_FN (bb, cfun)
{
basic_block copy_bb, tbb;
- rtx_insn *insert_point;
int eflags;
if (!bitmap_clear_bit (&bb_tail, bb->index))
BB_COPY_PARTITION (copy_bb, bb);
}
- insert_point = emit_note_after (NOTE_INSN_DELETED,
- BB_END (copy_bb));
+ rtx_note *insert_point = emit_note_after (NOTE_INSN_DELETED,
+ BB_END (copy_bb));
emit_barrier_after (BB_END (copy_bb));
tbb = bb;
else if (*pdest_bb == NULL)
{
basic_block bb;
- rtx_insn *start;
bb = create_basic_block (NULL, NULL, exit_pred);
BB_COPY_PARTITION (bb, e->src);
- start = emit_jump_insn_after (gen_simple_return (),
- BB_END (bb));
+ rtx_jump_insn *start = emit_jump_insn_after (gen_simple_return (),
+ BB_END (bb));
JUMP_LABEL (start) = simple_return_rtx;
emit_barrier_after (start);
int i, ncases;
struct case_node *n;
rtx *labelvec;
- rtx fallback_label = label_rtx (case_list->code_label);
+ rtx_insn *fallback_label = label_rtx (case_list->code_label);
rtx_code_label *table_label = gen_label_rtx ();
bool has_gaps = false;
edge default_edge = stmt_bb ? EDGE_SUCC (stmt_bb, 0) : NULL;
/* Returns a list of registers mentioned in X.
FIXME: A regset would be prettier and less expensive. */
-static rtx
+static rtx_expr_list *
extract_mentioned_regs (rtx x)
{
- rtx mentioned_regs = NULL;
+ rtx_expr_list *mentioned_regs = NULL;
subrtx_var_iterator::array_type array;
FOR_EACH_SUBRTX_VAR (iter, array, x, NONCONST)
{