+2014-08-28 David Malcolm <dmalcolm@redhat.com>
+
+ * rtx-classes-status.txt (TODO): NEXT_INSN/PREV_INSN are done.
+
2014-08-27 Sebastian Pop <s.pop@samsung.com>
* config/isl.m4 (_ISL_CHECK_CT_PROG): Removed.
+2014-08-28 David Malcolm <dmalcolm@redhat.com>
+
+ * rtl.h (RTX_PREV): Added checked casts to uses of PREV_INSN and
+ NEXT_INSN.
+ (PREV_INSN): Strengthen param from const_rtx to const rtx_insn *.
+ (NEXT_INSN): Likewise.
+ (JUMP_LABEL_AS_INSN): Add a "const" modifier to param.
+ (reg_used_between_p): Strengthen params 2 and 3 from const_rtx to
+ const rtx_insn *.
+ (no_labels_between_p): Likewise for both params.
+
+ * config/aarch64/aarch64.c (aarch64_output_casesi): Add a checked
+ cast when using NEXT_INSN on operands[2].
+ * config/alpha/alpha.c (alpha_set_memflags): Strengthen local
+ "insn" from rtx to rtx_insn *, adding a checked cast.
+ (alpha_handle_trap_shadows): Strengthen locals "i", "n" from rtx to
+ rtx_insn *.
+ * config/arc/arc-protos.h (arc_ccfsm_record_condition): Likewise
+ for third param.
+ (arc_text_label): Likewise for param "insn".
+ * config/arc/arc.c (arc_expand_epilogue): Likewise for local
+ "insn".
+ (arc_ccfsm_record_condition): Likewise for param "jump".
+ (arc_text_label): Likewise for local "label".
+ * config/arc/arc.md (doloop_begin_i): Likewise for local "scan".
+ Introduce a local "seq" via a dyn_cast to rtx_sequence *, and use
+ a method for typesafety. Add a checked cast.
+ * config/arc/constraints.md (Clb): Add a checked cast when getting
+ the CODE_LABEL from a LABEL_REF.
+ * config/arm/arm.c (require_pic_register): Strengthen locals
+ "seq", "insn" from rtx to rtx_insn *.
+ (create_fix_barrier): Likewise for locals "selected", "next".
+ (thumb1_reorg): Likewise for locals "prev", "insn".
+ (arm_expand_prologue): Likewise for local "last".
+ (thumb1_output_casesi): Add a checked cast when using NEXT_INSN on
+ operands[0].
+ (thumb2_output_casesi): Likewise for operands[2].
+ * config/avr/avr-log.c (avr_log_vadump): Within 'L' case,
+ strengthen local "insn" from rtx to rtx_insn *.
+ * config/bfin/bfin.c (find_next_insn_start): Likewise for return
+ type and param "insn".
+ (find_prev_insn_start): Likewise.
+ (hwloop_optimize): Likewise for locals "insn", "last_insn",
+ "prev".
+ (gen_one_bundle): Likewise for loal "t".
+ (find_load): Likewise for param "insn".
+ (workaround_speculation): Likewise for locals "insn", "next",
+ "target", "next_tgt".
+ * config/c6x/c6x.c (assign_reservations): Likewise for both params
+ and for locals "insn", "within", "last".
+ (count_unit_reqs): Likewise for params "head", "tail" and local
+ "insn".
+ (try_rename_operands): Likewise for params "head", "tail".
+ (reshuffle_units): Likewise for locals "head", "tail", "insn".
+ (struct c6x_sched_context): Likewise for fields
+ "last_scheduled_insn", "last_scheduled_iter0".
+ (init_sched_state): Replace NULL_RTX with NULL.
+ (reorg_split_calls): Strengthen local "new_cycle_first" from rtx
+ to rtx_insn *.
+ (undo_split_delayed_nonbranch): Likewise for param and for local
+ "prev".
+ (conditionalize_after_sched): Likewise for local "insn".
+ (bb_earliest_end_cycle): Likewise.
+ (filter_insns_above): Likewise for locals "insn", "next".
+ (hwloop_optimize): Remove redundant checked cast.
+ (hwloop_fail): Strengthen local "t" from rtx to rtx_insn *.
+ * config/cris/cris.c (cris_initial_frame_pointer_offset): Replace
+ NULL_RTX with NULL.
+ (cris_simple_epilogue): Likewise.
+ (cris_expand_prologue): Likewise.
+ (cris_expand_epilogue): Likewise.
+ * config/frv/frv.c (frv_function_contains_far_jump): Strengthen
+ local "insn" from rtx to rtx_insn *.
+ (frv_ifcvt_modify_tests): Likewise for locals "last_insn", "insn".
+ (struct frv_packet_group): Likewise for the elements within array
+ fields "insns", "sorted", and for field "nop".
+ (frv_packet): Likewise for the elements within array field
+ "insns".
+ (frv_add_insn_to_packet): Likewise for param "insn".
+ (frv_insert_nop_in_packet): Likewise for param "insn" and local
+ "last".
+ (frv_for_each_packet): Likewise for locals "insn", "next_insn".
+ (frv_sort_insn_group_1): Likewise for local "insn".
+ (frv_optimize_membar_local): Likewise.
+ (frv_align_label): Likewise for locals "x", "last", "barrier",
+ "label".
+ * config/ia64/ia64.c (last_scheduled_insn): Likewise for this
+ local.
+ (ia64_sched_init): Likewise for local "insn".
+ (scheduled_good_insn): Likewise for param "last".
+ (struct _ia64_sched_context): Likewise for field
+ "last_scheduled_insn".
+ (ia64_init_sched_context): Replace NULL_RTX with NULL.
+ (struct bundle_state): Likewise for field "insn".
+ (issue_nops_and_insn): Likewise for param "insn".
+ (get_next_important_insn): Likewise for return type and both
+ params.
+ (ia64_add_bundle_selector_before): Likewise for param "insn".
+ (bundling): Likewise for params "prev_head_insn", "tail" and
+ locals "insn", "next_insn", "b". Eliminate top-level local rtx
+ "nop" in favor of new locals rtx "nop_pat" and rtx_insn *nop;
+ * config/iq2000/iq2000-protos.h (iq2000_fill_delay_slot):
+ Strengthen final param from rtx to rtx_insn *.
+ (iq2000_move_1word): Likewise for second param.
+ * config/iq2000/iq2000.c (iq2000_fill_delay_slot): Likewise for
+ param "cur_insn" and local "next_insn".
+ (iq2000_move_1word): Likewise for param "insn".
+ * config/iq2000/iq2000.md (insn before ADDR_DIFF_VEC): Add checked
+ casts when using NEXT_INSN on operands[1].
+ * config/m32c/m32c.c (m32c_function_needs_enter): Strengthen local
+ "insn" from rtx to rtx_insn *.
+ * config/m68k/m68k.c (m68k_jump_table_ref_p): Split out uses of
+ "x", introducing local rtx_insn * "insn" for when working with the
+ CODE_LABEL of the LABEL_REF.
+ (m68k_sched_md_init_global): Strengthen local "insn" from rtx to
+ rtx_insn *.
+ * config/mcore/mcore-protos.h (mcore_is_dead): Likewise for first
+ param.
+ * config/mcore/mcore.c (emit_new_cond_insn): Likewise for return
+ type.
+ (conditionalize_block): Likewise for return type and param.
+ (mcore_is_dead): Likewise for param "first" and local "insn".
+ (emit_new_cond_insn): Likewise for return type.
+ (conditionalize_block): Likewise for return type, param, and
+ locals "insn", "blk_1_br", "end_blk_2_insn", "start_blk_3_lab",
+ "newinsn".
+ (conditionalize_optimization): Likewise for local "insn".
+ * config/mep/mep.c (mep_jmp_return_reorg): Add checked cast when
+ using NEXT_INSN.
+ * config/microblaze/microblaze.md: Add checked casts when using
+ NEXT_INSN.
+ * config/mips/mips.c (mips_expand_prologue): Eliminate top-level
+ rtx "insn" in favor of various more tightly-scoped rtx "insn" and
+ and rtx_insn * "insn".
+ * config/mips/mips.md (casesi_internal_mips16_<mode>): Add a
+ checked cast when using NEXT_INSN on operands[2].
+ * config/mn10300/mn10300.c (mn10300_insert_setlb_lcc): Strengthen
+ local "insn" from rtx to rtx_insn *.
+ * config/nds32/nds32-fp-as-gp.c (nds32_fp_as_gp_check_available):
+ Likewise.
+ * config/nds32/nds32-md-auxiliary.c (nds32_output_casesi_pc_relative):
+ Add a checked cast when using NEXT_INSN on operands[1].
+ * config/pa/pa-protos.h (pa_following_call): Strengthen param from
+ rtx to rtx_insn *.
+ (pa_output_cbranch): Likewise for final param.
+ (pa_output_lbranch): Likewise for second param.
+ (pa_output_bb): Likewise for third param.
+ (pa_output_bvb): Likewise.
+ (pa_output_dbra): Likewise for second param.
+ (pa_output_movb): Likewise.
+ (pa_output_parallel_movb): Likewise.
+ (pa_output_parallel_addb): Likewise.
+ (pa_output_millicode_call): Likewise for first param.
+ (pa_output_mul_insn): Likewise for second param.
+ (pa_output_div_insn): Likewise for third param.
+ (pa_output_mod_insn): Likewise for second param.
+ (pa_jump_in_call_delay): Likewise for param.
+ * config/pa/pa.c (pa_output_mul_insn): Likewise for param "insn".
+ (pa_output_div_insn): Likewise.
+ (pa_output_mod_insn): Likewise.
+ (pa_output_cbranch): Likewise.
+ (pa_output_lbranch): Likewise.
+ (pa_output_bb): Likewise.
+ (pa_output_bvb): Likewise.
+ (pa_output_dbra): Likewise.
+ (pa_output_movb): Likewise.
+ (pa_output_millicode_call): Likewise; use method of rtx_sequence *
+ to simplify and for typesafety.
+ (pa_output_call): Use method of rtx_sequence *.
+ (forward_branch_p): Strengthen param "insn" from rtx to rtx_insn *.
+ (pa_jump_in_call_delay): Likewise.
+ (pa_output_parallel_movb): Likewise.
+ (pa_output_parallel_addb): Likewise.
+ (pa_following_call): Likewise.
+ (pa_combine_instructions): Likewise for locals "anchor",
+ "floater".
+ (pa_can_combine_p): Likewise for params "anchor", "floater" and
+ locals "start", "end".
+ * config/picochip/picochip.c (picochip_reset_vliw): Likewise for
+ param "insn" and local "local_insn".
+ (picochip_final_prescan_insn): Likewise for local "local_insn".
+ * config/rs6000/rs6000.c (compute_save_world_info): Likewise for
+ local "insn".
+ (uses_TOC): Likewise.
+ * config/s390/s390.c (get_some_local_dynamic_name): Likewise.
+ (s390_mainpool_finish): Eliminate top-level local rtx "insn",
+ splitting out to more tightly-scoped locals, 3 as rtx and one as
+ rtx_insn *.
+ (s390_optimize_nonescaping_tx): Strengthen local "tmp" from rtx
+ to rtx_insn *.
+ (s390_emit_prologue): Introduce a local "insn" to be an rtx_insn *
+ where needed.
+ * config/sh/sh-protos.h (barrier_align): Strenghten param from rtx
+ to rtx_insn *.
+ (fixup_addr_diff_vecs): Likewise.
+ (reg_unused_after): Likewise for param 2.
+ (sh_can_redirect_branch): Likewise for both params.
+ (check_use_sfunc_addr): Likewise for param 1.
+ * config/sh/sh.c (fixup_mova): Likewise for local "worker".
+ (find_barrier): Likewise for local "last_got".
+ (gen_block_redirect): Likewise for return type, param "jump" and
+ locals "prev", "scan", "next", "insn".
+ (struct far_branch): Likewise for fields "near_label",
+ "insert_place", "far_label".
+ (gen_far_branch): Likewise for local "jump".
+ (fixup_addr_diff_vecs): Likewise for param "first" and locals
+ "insn", "prev".
+ (barrier_align): Likewise for param and for locals "prev", "x".
+ Introduce local rtx_sequence * "prev_seq" and use insn method for
+ typesafety and clarity.
+ (sh_reorg): Strengthen local "scan" from rtx to rtx_insn *.
+ (get_dest_uid): Likewise for local "dest".
+ (split_branches): Likewise for locals "next", "beyond", "label",
+ "block", "far_label". Add checked casts when assigning to
+ bp->far_label and "far_label".
+ (reg_unused_after): Strengthen param "scan" from rtx to rtx_insn *.
+ (sequence_insn_p): Likewise.
+ (mark_constant_pool_use): Likewise for locals "insn", "lab". Add a
+ more loop-scoped rtx "insn" when walking LABEL_REFS.
+ (sh_can_redirect_branch): Strengthen both params from rtx to
+ rtx_insn *.
+ (check_use_sfunc_addr): Likewise for param "insn". Introduce a
+ new local rtx_sequence * "seq" via a dyn_cast, and use a method
+ for clarity and typesafety.
+ * config/sh/sh.md (define_expand "epilogue"): Strengthen local
+ "insn" from rtx to rtx_insn *.
+ (define_insn "casesi_worker_1"): Add a checked cast to rtx_insn *
+ when using NEXT_INSN on the CODE_LABEL in operands[2].
+ (define_insn "casesi_worker_2"): Likewise.
+ (define_insn "casesi_shift_media"): Likewise.
+ (define_insn "casesi_load_media"): Likewise for the CODE_LABEL in
+ operands[3].
+ * config/sh/sh_optimize_sett_clrt.cc (struct ccreg_value):
+ Strengthen field "insn" from rtx to rtx_insn *.
+ (sh_optimize_sett_clrt::execute): Likewise for locals "next_i", "i".
+ (sh_optimize_sett_clrt::find_last_ccreg_values): Likewise for
+ param "start_insn" and local "start_insn".
+ * config/sh/sh_treg_combine.cc (struct set_of_reg): Likewise for
+ field "insn".
+ (find_set_of_reg_bb): Likewise for param "insn".
+ (trace_reg_uses_1): Likewise for param "start_insn" and local "i".
+ (trace_reg_uses): Likewise for param "start_insn".
+ (sh_treg_combine::cbranch_trace): Likewise for field
+ "cbranch_insn".
+ (sh_treg_combine::cbranch_trace::cbranch_trace): Likewise for
+ param "insn".
+ (sh_treg_combine::record_set_of_reg): Likewise for param
+ "start_insn" and local "i".
+ (sh_treg_combine::can_remove_cstore): Likewise for local
+ "prev_insn".
+ (sh_treg_combine::try_optimize_cbranch): Likewise for param
+ "insn".
+ (sh_treg_combine::execute): Likewise for local "i".
+ * config/sparc/sparc-protos.h (empty_delay_slot): Likewise for
+ param.
+ (sparc_check_64): Likewise for second param.
+ * config/sparc/sparc.c (sparc_do_work_around_errata): Likewise for
+ locals "insn", "next". Introduce local rtx_sequence * "seq" via a
+ dyn_cast, using its insn method for typesafety and clarity.
+ (empty_delay_slot): Strengthen param "insn" from rtx to
+ rtx_insn *.
+ (set_extends): Likewise.
+ (sparc_check_64): Likewise.
+ * config/stormy16/stormy16.c (xstormy16_split_cbranch): Likewise
+ for locals "seq", "last_insn".
+ (combine_bnp): Likewise for param "insn".
+ (xstormy16_reorg): Likewise for local "insn".
+ * config/v850/v850.c (substitute_ep_register): Likewise for params
+ "first_insn", "last_insn" and local "insn".
+ (v850_reorg): Likewise for fields "first_insn", "last_insn" within
+ elements of "regs" array, and local "insn".
+ * except.c (emit_note_eh_region_end): Likewise for param "insn".
+ * final.c (final_sequence): Strengthen this global from rtx to
+ rtx_sequence *.
+ (shorten_branches): Strenthen locals "rel_lab", "prev" from rtx to
+ rtx_insn *.
+ (final_scan_insn): Update assignment to "final_sequence" to be
+ from "seq", the cast version of "body", for type-safety.
+ * function.c (assign_parm_setup_reg): Strengthen locals "insn",
+ "insns" from rtx to rtx_insn *.
+ (thread_prologue_and_epilogue_insns): Likewise for local "seq".
+ * genattr.c (main): When writing out generated insn-attr.h,
+ strengthen params 1 and 3 of eligible_for_delay,
+ eligible_for_annul_true, eligible_for_annul_false from rtx to
+ rtx_insn *.
+ * genattrtab.c (write_eligible_delay): Likewise when writing out
+ generated insn-attrtab.c; also local "insn" the generated
+ functions.
+ * hw-doloop.c (discover_loops): Strengthen local "insn" from rtx
+ to rtx_insn *.
+ * hw-doloop.h (struct GTY hwloop_info_d): Strengthen field
+ "start_label" from rtx to rtx_insn *.
+ * ira.c (decrease_live_ranges_number): Likewise for local "p".
+ (ira_update_equiv_info_by_shuffle_insn): Likewise for param
+ "insns" and local "insn".
+ (validate_equiv_mem): Likewise for param "start" and local "insn".
+ (memref_used_between_p): Likewise for params "start", "end" and
+ local "insn".
+ * ira.h (ira_update_equiv_info_by_shuffle_insn): Likewise for
+ final param.
+ * loop-doloop.c (doloop_optimize): Within region guarded by
+ INSN_P (doloop_pat), introduce a new local rtx_insn *
+ "doloop_insn" via a checked cast, and use it for typesafety,
+ eventually writing the value back into doloop_pat.
+ * output.h (final_sequence): Strengthen this global from rtx to
+ rtx_sequence *.
+ * recog.c (peep2_attempt): Rename param "insn" to "uncast_insn",
+ reintroducing "insn" as an rtx_insn * via a checked cast.
+ Strengthen param "attempt" and local "new_insn"from rtx to
+ rtx_insn *.
+ (peephole2_optimize): Strengthen locals "insn", "attempt" from rtx
+ to rtx_insn *.
+ * ree.c (emit_note_eh_region_end): Likewise for local "insn".
+ * reload1.c (reload_as_needed): Eliminate top-level locals "x" and
+ "p" in favor of more tightly-scoped replacements, sometimes rtx
+ and sometimes rtx_insn *, as appropriate.
+ (delete_output_reload): Eliminate top-level rtx "i1", splitting
+ into two loop-scoped locals, one an rtx, the other an rtx_insn *.
+ * reorg.c (delete_scheduled_jump): Add checked cast. Strengthen
+ local "trial" from rtx to rtx_insn *.
+ (redirect_with_delay_slots_safe_p): Strengthen param "jump" from
+ rtx to rtx_insn *. Strenghten local "pat" from rtx to
+ rtx_sequence * and use methods for clarity and typesafety.
+ (redirect_with_delay_list_safe_p): Strengthen param "jump" from
+ rtx to rtx_insn *. Strenghten local "li" from rtx to
+ rtx_insn_list * and use its methods for clarity and typesafety.
+ (steal_delay_list_from_target): Strengthen param "insn" from rtx
+ to rtx_insn *.
+ (steal_delay_list_from_fallthrough): Likewise.
+ (try_merge_delay_insns): Likewise for param "thread" and locals
+ "trial", "next_trial", "delay_insn".
+ (redundant_insn): Likewise for param "target" and local "trial".
+ (own_thread_p): Likewise for param "thread" and locals
+ "active_insn", "insn".
+ (get_label_before): Likewise for param "insn".
+ (fill_simple_delay_slots): Likewise for local "new_label"; use
+ JUMP_LABEL_AS_INSN as necessary when calling own_thread_p.
+ (label_before_next_insn): Strengthen return type and local "insn"
+ from rtx to rtx_insn *.
+ (relax_delay_slots): Likewise for locals "other", "tmp".
+ (make_return_insns): Likewise for param "first" and locals "insn",
+ "jump_insn", "prev". Move declaration of "pat" to its assignment
+ and strengthen from rtx to rtx_sequence *. Use its methods for
+ clarity and typesafety.
+ * rtlanal.c (no_labels_between_p): Strengthen params from
+ const_rtx to const rtx_insn *. Strengthen local "p" from rtx to
+ rtx_insn *.
+ (reg_used_between_p): Strengthen params "from_insn", "to_insn"
+ from const_rtx to const rtx_insn *.
+ (reg_set_between_p): Rename param "from_insn" to
+ "uncast_from_insn", and reintroduce "from_insn" as a
+ const rtx_insn * via a checked cast.
+ (modified_between_p): Likewise for param "start" as "uncast_start".
+ (tablejump_p): Add a cast when invoking NEXT_INSN on "label".
+ * sel-sched-ir.c (get_seqno_by_preds): Strengthen param and locals
+ "tmp", head" from rtx to rtx_insn *.
+ (recompute_rev_top_order): Likewise for local "insn".
+ * sel-sched-ir.h (get_seqno_by_preds): Likewise for param.
+ * store-motion.c (build_store_vectors): Likewise for local "insn".
+ Strengthen local "st" from rtx to rtx_insn_list * and use methods
+ for clarity and typesafety.
+ * tree-ssa-loop-ivopts.c (seq_cost): Strengthen param "seq" from
+ rtx to rtx_insn *.
+ (computation_cost): Likewise for local "seq".
+ (get_address_cost): Likewise.
+
2014-08-28 David Malcolm <dmalcolm@redhat.com>
* rtl.h (tablejump_p): Strengthen first param from const_rtx to
{
char buf[100];
char label[100];
- rtx diff_vec = PATTERN (NEXT_INSN (operands[2]));
+ rtx diff_vec = PATTERN (NEXT_INSN (as_a <rtx_insn *> (operands[2])));
int index;
static const char *const patterns[4][2] =
{
void
alpha_set_memflags (rtx seq, rtx ref)
{
- rtx insn;
+ rtx_insn *insn;
if (!MEM_P (ref))
return;
&& !MEM_READONLY_P (ref))
return;
- for (insn = seq; insn; insn = NEXT_INSN (insn))
+ for (insn = as_a <rtx_insn *> (seq); insn; insn = NEXT_INSN (insn))
if (INSN_P (insn))
for_each_rtx (&PATTERN (insn), alpha_set_memflags_1, (void *) ref);
else
{
struct shadow_summary shadow;
int trap_pending, exception_nesting;
- rtx i, n;
+ rtx_insn *i, *n;
trap_pending = 0;
exception_nesting = 0;
extern rtx gen_mhi (void);
extern bool arc_branch_size_unknown_p (void);
struct arc_ccfsm;
-extern void arc_ccfsm_record_condition (rtx, bool, rtx, struct arc_ccfsm *);
+extern void arc_ccfsm_record_condition (rtx, bool, rtx_insn *,
+ struct arc_ccfsm *);
extern void arc_expand_prologue (void);
extern void arc_expand_epilogue (int);
extern void arc_init_expanders (void);
extern bool arc_sets_cc_p (rtx insn);
extern int arc_label_align (rtx label);
extern bool arc_need_delay (rtx_insn *insn);
-extern bool arc_text_label (rtx);
+extern bool arc_text_label (rtx_insn *insn);
+
extern int arc_decl_pretend_args (tree decl);
extern bool arc_short_comparison_p (rtx, int);
extern bool arc_epilogue_uses (int regno);
epilogue_done:
if (!TARGET_EPILOGUE_CFI)
{
- rtx insn;
+ rtx_insn *insn;
for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
RTX_FRAME_RELATED_P (insn) = 0;
the ccfsm state accordingly.
REVERSE says branch will branch when the condition is false. */
void
-arc_ccfsm_record_condition (rtx cond, bool reverse, rtx jump,
+arc_ccfsm_record_condition (rtx cond, bool reverse, rtx_insn *jump,
struct arc_ccfsm *state)
{
rtx_insn *seq_insn = NEXT_INSN (PREV_INSN (jump));
/* Return true if LABEL is in executable code. */
bool
-arc_text_label (rtx label)
+arc_text_label (rtx_insn *label)
{
rtx_insn *next;
(use (match_operand 4 "const_int_operand" "C_0,X,X"))]
""
{
- rtx scan;
+ rtx_insn *scan;
int len, size = 0;
int n_insns = 0;
rtx loop_start = operands[4];
{
if (!INSN_P (scan))
continue;
- if (GET_CODE (PATTERN (scan)) == SEQUENCE)
- scan = XVECEXP (PATTERN (scan), 0, 0);
+ if (rtx_sequence *seq = dyn_cast <rtx_sequence *> (PATTERN (scan)))
+ scan = seq->insn (0);
if (JUMP_P (scan))
{
if (recog_memoized (scan) != CODE_FOR_doloop_end_i)
n_insns += 2;
if (simplejump_p (scan))
{
- scan = XEXP (SET_SRC (PATTERN (scan)), 0);
+ scan = as_a <rtx_insn *> (XEXP (SET_SRC (PATTERN (scan)), 0));
continue;
}
if (JUMP_LABEL (scan)
(define_constraint "Clb"
"label"
(and (match_code "label_ref")
- (match_test "arc_text_label (XEXP (op, 0))")))
+ (match_test "arc_text_label (as_a <rtx_insn *> (XEXP (op, 0)))")))
(define_constraint "Cal"
"constant for arithmetic/logical operations"
}
else
{
- rtx seq, insn;
+ rtx_insn *seq, *insn;
if (!cfun->machine->pic_reg)
cfun->machine->pic_reg = gen_reg_rtx (Pmode);
rtx_barrier *barrier;
rtx_insn *from = fix->insn;
/* The instruction after which we will insert the jump. */
- rtx selected = NULL;
+ rtx_insn *selected = NULL;
int selected_cost;
/* The address at which the jump instruction will be placed. */
HOST_WIDE_INT selected_address;
CALL_ARG_LOCATION note. */
if (CALL_P (selected))
{
- rtx next = NEXT_INSN (selected);
+ rtx_insn *next = NEXT_INSN (selected);
if (next && NOTE_P (next)
&& NOTE_KIND (next) == NOTE_INSN_CALL_ARG_LOCATION)
selected = next;
{
rtx dest, src;
rtx pat, op0, set = NULL;
- rtx prev, insn = BB_END (bb);
+ rtx_insn *prev, *insn = BB_END (bb);
bool insn_clobbered = false;
while (insn != BB_HEAD (bb) && !NONDEBUG_INSN_P (insn))
{
/* This add can produce multiple insns for a large constant, so we
need to get tricky. */
- rtx last = get_last_insn ();
+ rtx_insn *last = get_last_insn ();
amount = GEN_INT (offsets->saved_args + saved_regs
- offsets->outgoing_args);
const char *
thumb1_output_casesi (rtx *operands)
{
- rtx diff_vec = PATTERN (NEXT_INSN (operands[0]));
+ rtx diff_vec = PATTERN (NEXT_INSN (as_a <rtx_insn *> (operands[0])));
gcc_assert (GET_CODE (diff_vec) == ADDR_DIFF_VEC);
const char *
thumb2_output_casesi (rtx *operands)
{
- rtx diff_vec = PATTERN (NEXT_INSN (operands[2]));
+ rtx diff_vec = PATTERN (NEXT_INSN (as_a <rtx_insn *> (operands[2])));
gcc_assert (GET_CODE (diff_vec) == ADDR_DIFF_VEC);
case 'L':
{
- rtx insn = va_arg (ap, rtx);
+ rtx_insn *insn = safe_as_a <rtx_insn *> (va_arg (ap, rtx));
while (insn)
{
/* This function acts like NEXT_INSN, but is aware of three-insn bundles and
skips all subsequent parallel instructions if INSN is the start of such
a group. */
-static rtx
-find_next_insn_start (rtx insn)
+static rtx_insn *
+find_next_insn_start (rtx_insn *insn)
{
if (GET_MODE (insn) == SImode)
{
/* This function acts like PREV_INSN, but is aware of three-insn bundles and
skips all subsequent parallel instructions if INSN is the start of such
a group. */
-static rtx
-find_prev_insn_start (rtx insn)
+static rtx_insn *
+find_prev_insn_start (rtx_insn *insn)
{
insn = PREV_INSN (insn);
gcc_assert (GET_MODE (insn) != SImode);
hwloop_optimize (hwloop_info loop)
{
basic_block bb;
- rtx insn, last_insn;
+ rtx_insn *insn, *last_insn;
rtx loop_init, start_label, end_label;
rtx iter_reg, scratchreg, scratch_init, scratch_init_insn;
rtx lc_reg, lt_reg, lb_reg;
}
else
{
- last_insn = NULL_RTX;
+ last_insn = NULL;
break;
}
}
last_insn = emit_insn_after (gen_forced_nop (), last_insn);
}
- loop->last_insn = safe_as_a <rtx_insn *> (last_insn);
+ loop->last_insn = last_insn;
/* The loop is good for replacement. */
start_label = loop->start_label;
if (loop->incoming_src)
{
- rtx prev = BB_END (loop->incoming_src);
+ rtx_insn *prev = BB_END (loop->incoming_src);
if (vec_safe_length (loop->incoming) > 1
|| !(loop->incoming->last ()->flags & EDGE_FALLTHRU))
{
/* Verify that we really can do the multi-issue. */
if (slot[0])
{
- rtx t = NEXT_INSN (slot[0]);
+ rtx_insn *t = NEXT_INSN (slot[0]);
while (t != slot[1])
{
if (! NOTE_P (t) || NOTE_KIND (t) != NOTE_INSN_DELETED)
a three-insn bundle, see if one of them is a load and return that if so.
Return NULL_RTX if the insn does not contain loads. */
static rtx
-find_load (rtx insn)
+find_load (rtx_insn *insn)
{
if (!NONDEBUG_INSN_P (insn))
return NULL_RTX;
static void
workaround_speculation (void)
{
- rtx insn, next;
+ rtx_insn *insn, *next;
rtx last_condjump = NULL_RTX;
int cycles_since_jump = INT_MAX;
int delay_added = 0;
&& (INSN_CODE (insn) == CODE_FOR_cbranch_predicted_taken
|| cbranch_predicted_taken_p (insn)))
{
- rtx target = JUMP_LABEL (insn);
+ rtx_insn *target = JUMP_LABEL_AS_INSN (insn);
rtx label = target;
- rtx next_tgt;
+ rtx_insn *next_tgt;
cycles_since_jump = 0;
for (; target && cycles_since_jump < 3; target = next_tgt)
/* After scheduling, walk the insns between HEAD and END and assign unit
reservations. */
static void
-assign_reservations (rtx head, rtx end)
+assign_reservations (rtx_insn *head, rtx_insn *end)
{
- rtx insn;
+ rtx_insn *insn;
for (insn = head; insn != NEXT_INSN (end); insn = NEXT_INSN (insn))
{
unsigned int sched_mask, reserved;
- rtx within, last;
+ rtx_insn *within, *last;
int pass;
int rsrv[2];
int rsrv_count[2][4];
continue;
reserved = 0;
- last = NULL_RTX;
+ last = NULL;
/* Find the last insn in the packet. It has a state recorded for it,
which we can use to determine the units we should be using. */
for (within = insn;
/* Walk the insns between and including HEAD and TAIL, and mark the
resource requirements in the unit_reqs table. */
static void
-count_unit_reqs (unit_req_table reqs, rtx head, rtx tail)
+count_unit_reqs (unit_req_table reqs, rtx_insn *head, rtx_insn *tail)
{
- rtx insn;
+ rtx_insn *insn;
memset (reqs, 0, sizeof (unit_req_table));
We recompute this information locally after our transformation, and keep
it only if we managed to improve the balance. */
static void
-try_rename_operands (rtx head, rtx tail, unit_req_table reqs, rtx insn,
+try_rename_operands (rtx_insn *head, rtx_insn *tail, unit_req_table reqs,
+ rtx insn,
insn_rr_info *info, unsigned int op_mask, int orig_side)
{
enum reg_class super_class = orig_side == 0 ? B_REGS : A_REGS;
static void
reshuffle_units (basic_block loop)
{
- rtx head = BB_HEAD (loop);
- rtx tail = BB_END (loop);
- rtx insn;
+ rtx_insn *head = BB_HEAD (loop);
+ rtx_insn *tail = BB_END (loop);
+ rtx_insn *insn;
unit_req_table reqs;
edge e;
edge_iterator ei;
int delays_finished_at;
/* The following variable value is the last issued insn. */
- rtx last_scheduled_insn;
+ rtx_insn *last_scheduled_insn;
/* The last issued insn that isn't a shadow of another. */
rtx_insn *last_scheduled_iter0;
static void
init_sched_state (c6x_sched_context_t sc)
{
- sc->last_scheduled_insn = NULL_RTX;
+ sc->last_scheduled_insn = NULL;
sc->last_scheduled_iter0 = NULL;
sc->issued_this_cycle = 0;
memset (sc->jump_cycles, 0, sizeof sc->jump_cycles);
= INSN_INFO_ENTRY (INSN_UID (last_same_clock)).unit_mask;
if (GET_MODE (insn) == TImode)
{
- rtx new_cycle_first = NEXT_INSN (insn);
+ rtx_insn *new_cycle_first = NEXT_INSN (insn);
while (!NONDEBUG_INSN_P (new_cycle_first)
|| GET_CODE (PATTERN (new_cycle_first)) == USE
|| GET_CODE (PATTERN (new_cycle_first)) == CLOBBER)
/* Examine if INSN is the result of splitting a load into a real load and a
shadow, and if so, undo the transformation. */
static void
-undo_split_delayed_nonbranch (rtx insn)
+undo_split_delayed_nonbranch (rtx_insn *insn)
{
int icode = recog_memoized (insn);
enum attr_type type;
- rtx prev_pat, insn_pat, prev;
+ rtx prev_pat, insn_pat;
+ rtx_insn *prev;
if (icode < 0)
return;
conditionalize_after_sched (void)
{
basic_block bb;
- rtx insn;
+ rtx_insn *insn;
FOR_EACH_BB_FN (bb, cfun)
FOR_BB_INSNS (bb, insn)
{
bb_earliest_end_cycle (basic_block bb, rtx ignore)
{
int earliest = 0;
- rtx insn;
+ rtx_insn *insn;
FOR_BB_INSNS (bb, insn)
{
static void
filter_insns_above (basic_block bb, int max_uid)
{
- rtx insn, next;
+ rtx_insn *insn, *next;
bool prev_ti = false;
int prev_cycle = -1;
require. */
prev = NULL;
n_execute_packets = 0;
- for (insn = as_a <rtx_insn *> (loop->start_label);
+ for (insn = loop->start_label;
insn != loop->loop_end;
insn = NEXT_INSN (insn))
{
emit_insn_before (insn, loop->loop_end);
else
{
- rtx t = loop->start_label;
+ rtx_insn *t = loop->start_label;
while (!NOTE_P (t) || NOTE_KIND (t) != NOTE_INSN_BASIC_BLOCK)
t = NEXT_INSN (t);
emit_insn_after (insn, t);
push_topmost_sequence ();
got_really_used
= reg_used_between_p (pic_offset_table_rtx, get_insns (),
- NULL_RTX);
+ NULL);
pop_topmost_sequence ();
}
{
push_topmost_sequence ();
got_really_used
- = reg_used_between_p (pic_offset_table_rtx, get_insns (), NULL_RTX);
+ = reg_used_between_p (pic_offset_table_rtx, get_insns (), NULL);
pop_topmost_sequence ();
}
it's still used. */
push_topmost_sequence ();
got_really_used
- = reg_used_between_p (pic_offset_table_rtx, get_insns (), NULL_RTX);
+ = reg_used_between_p (pic_offset_table_rtx, get_insns (), NULL);
pop_topmost_sequence ();
}
it's still used. */
push_topmost_sequence ();
got_really_used
- = reg_used_between_p (pic_offset_table_rtx, get_insns (), NULL_RTX);
+ = reg_used_between_p (pic_offset_table_rtx, get_insns (), NULL);
pop_topmost_sequence ();
}
static void frv_start_packet_block (void);
static void frv_finish_packet (void (*) (void));
static bool frv_pack_insn_p (rtx);
-static void frv_add_insn_to_packet (rtx);
-static void frv_insert_nop_in_packet (rtx);
+static void frv_add_insn_to_packet (rtx_insn *);
+static void frv_insert_nop_in_packet (rtx_insn *);
static bool frv_for_each_packet (void (*) (void));
static bool frv_sort_insn_group_1 (enum frv_insn_group,
unsigned int, unsigned int,
static int
frv_function_contains_far_jump (void)
{
- rtx insn = get_insns ();
+ rtx_insn *insn = get_insns ();
while (insn != NULL
&& !(JUMP_P (insn)
&& get_attr_far_jump (insn) == FAR_JUMP_YES))
/* Scan all of the blocks for registers that must not be allocated. */
for (j = 0; j < num_bb; j++)
{
- rtx last_insn = BB_END (bb[j]);
- rtx insn = BB_HEAD (bb[j]);
+ rtx_insn *last_insn = BB_END (bb[j]);
+ rtx_insn *insn = BB_HEAD (bb[j]);
unsigned int regno;
if (dump_file)
/* A list of the instructions that belong to this group, in the order
they appear in the rtl stream. */
- rtx insns[ARRAY_SIZE (frv_unit_codes)];
+ rtx_insn *insns[ARRAY_SIZE (frv_unit_codes)];
/* The contents of INSNS after they have been sorted into the correct
assembly-language order. Element X issues to unit X. The list may
contain extra nops. */
- rtx sorted[ARRAY_SIZE (frv_unit_codes)];
+ rtx_insn *sorted[ARRAY_SIZE (frv_unit_codes)];
/* The member of frv_nops[] to use in sorted[]. */
- rtx nop;
+ rtx_insn *nop;
};
/* The current state of the packing pass, implemented by frv_pack_insns. */
struct frv_packet_group groups[NUM_GROUPS];
/* The instructions that make up the current packet. */
- rtx insns[ARRAY_SIZE (frv_unit_codes)];
+ rtx_insn *insns[ARRAY_SIZE (frv_unit_codes)];
unsigned int num_insns;
} frv_packet;
/* Add instruction INSN to the current packet. */
static void
-frv_add_insn_to_packet (rtx insn)
+frv_add_insn_to_packet (rtx_insn *insn)
{
struct frv_packet_group *packet_group;
add to the end. */
static void
-frv_insert_nop_in_packet (rtx insn)
+frv_insert_nop_in_packet (rtx_insn *insn)
{
struct frv_packet_group *packet_group;
- rtx last;
+ rtx_insn *last;
packet_group = &frv_packet.groups[frv_unit_groups[frv_insn_unit (insn)]];
last = frv_packet.insns[frv_packet.num_insns - 1];
static bool
frv_for_each_packet (void (*handle_packet) (void))
{
- rtx insn, next_insn;
+ rtx_insn *insn, *next_insn;
frv_packet.issue_rate = frv_issue_rate ();
unsigned int i;
state_t test_state;
size_t dfa_size;
- rtx insn;
+ rtx_insn *insn;
/* Early success if we've filled all the slots. */
if (lower_slot == upper_slot)
rtx *last_membar)
{
HARD_REG_SET used_regs;
- rtx next_membar, set, insn;
+ rtx next_membar, set;
+ rtx_insn *insn;
bool next_is_end_p;
/* NEXT_IO is the next I/O operation to be performed after the current
frv_align_label (void)
{
unsigned int alignment, target, nop;
- rtx x, last, barrier, label;
+ rtx_insn *x, *last, *barrier, *label;
/* Walk forward to the start of the next packet. Set ALIGNMENT to the
maximum alignment of that packet, LABEL to the last label between
static void finish_bundle_state_table (void);
static int try_issue_nops (struct bundle_state *, int);
static int try_issue_insn (struct bundle_state *, rtx);
-static void issue_nops_and_insn (struct bundle_state *, int, rtx, int, int);
+static void issue_nops_and_insn (struct bundle_state *, int, rtx_insn *,
+ int, int);
static int get_max_pos (state_t);
static int get_template (state_t, int);
-static rtx get_next_important_insn (rtx, rtx);
+static rtx_insn *get_next_important_insn (rtx_insn *, rtx_insn *);
static bool important_for_bundling_p (rtx);
static bool unknown_for_bundling_p (rtx);
-static void bundling (FILE *, int, rtx, rtx);
+static void bundling (FILE *, int, rtx_insn *, rtx_insn *);
static void ia64_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
HOST_WIDE_INT, tree);
/* The following variable value is the last issued insn. */
-static rtx last_scheduled_insn;
+static rtx_insn *last_scheduled_insn;
/* The following variable value is pointer to a DFA state used as
temporary variable. */
int max_ready ATTRIBUTE_UNUSED)
{
#ifdef ENABLE_CHECKING
- rtx insn;
+ rtx_insn *insn;
if (!sel_sched_p () && reload_completed)
for (insn = NEXT_INSN (current_sched_info->prev_head);
insn = NEXT_INSN (insn))
gcc_assert (!SCHED_GROUP_P (insn));
#endif
- last_scheduled_insn = NULL_RTX;
+ last_scheduled_insn = NULL;
init_insn_group_barriers ();
current_cycle = 0;
/* Returns 1 when a meaningful insn was scheduled between the last group
barrier and LAST. */
static int
-scheduled_good_insn (rtx last)
+scheduled_good_insn (rtx_insn *last)
{
if (last && recog_memoized (last) >= 0)
return 1;
struct _ia64_sched_context
{
state_t prev_cycle_state;
- rtx last_scheduled_insn;
+ rtx_insn *last_scheduled_insn;
struct reg_write_state rws_sum[NUM_REGS];
struct reg_write_state rws_insn[NUM_REGS];
int first_instruction;
if (clean_p)
{
state_reset (sc->prev_cycle_state);
- sc->last_scheduled_insn = NULL_RTX;
+ sc->last_scheduled_insn = NULL;
memset (sc->rws_sum, 0, sizeof (rws_sum));
memset (sc->rws_insn, 0, sizeof (rws_insn));
sc->first_instruction = 1;
/* Unique bundle state number to identify them in the debugging
output */
int unique_num;
- rtx insn; /* corresponding insn, NULL for the 1st and the last state */
+ rtx_insn *insn; /* corresponding insn, NULL for the 1st and the last state */
/* number nops before and after the insn */
short before_nops_num, after_nops_num;
int insn_num; /* insn number (0 - for initial state, 1 - for the 1st
static void
issue_nops_and_insn (struct bundle_state *originator, int before_nops_num,
- rtx insn, int try_bundle_end_p, int only_bundle_end_p)
+ rtx_insn *insn, int try_bundle_end_p,
+ int only_bundle_end_p)
{
struct bundle_state *curr_state;
/* The following function returns an insn important for insn bundling
followed by INSN and before TAIL. */
-static rtx
-get_next_important_insn (rtx insn, rtx tail)
+static rtx_insn *
+get_next_important_insn (rtx_insn *insn, rtx_insn *tail)
{
for (; insn && insn != tail; insn = NEXT_INSN (insn))
if (important_for_bundling_p (insn))
return insn;
- return NULL_RTX;
+ return NULL;
}
/* True when INSN is unknown, but important, for bundling. */
/* Add a bundle selector TEMPLATE0 before INSN. */
static void
-ia64_add_bundle_selector_before (int template0, rtx insn)
+ia64_add_bundle_selector_before (int template0, rtx_insn *insn)
{
rtx b = gen_bundle_selector (GEN_INT (template0));
EBB. */
static void
-bundling (FILE *dump, int verbose, rtx prev_head_insn, rtx tail)
+bundling (FILE *dump, int verbose, rtx_insn *prev_head_insn, rtx_insn *tail)
{
struct bundle_state *curr_state, *next_state, *best_state;
- rtx insn, next_insn;
+ rtx_insn *insn, *next_insn;
int insn_num;
int i, bundle_end_p, only_bundle_end_p, asm_p;
int pos = 0, max_pos, template0, template1;
- rtx b;
- rtx nop;
+ rtx_insn *b;
enum attr_type type;
insn_num = 0;
/* Emit nops after the current insn. */
for (i = 0; i < curr_state->after_nops_num; i++)
{
- nop = gen_nop ();
- emit_insn_after (nop, insn);
+ rtx nop_pat = gen_nop ();
+ rtx_insn *nop = emit_insn_after (nop_pat, insn);
pos--;
gcc_assert (pos >= 0);
if (pos % 3 == 0)
/* Emit nops after the current insn. */
for (i = 0; i < curr_state->before_nops_num; i++)
{
- nop = gen_nop ();
- ia64_emit_insn_before (nop, insn);
- nop = PREV_INSN (insn);
+ rtx nop_pat = gen_nop ();
+ ia64_emit_insn_before (nop_pat, insn);
+ rtx_insn *nop = PREV_INSN (insn);
insn = nop;
pos--;
gcc_assert (pos >= 0);
start_bundle = true;
else
{
- rtx next_insn;
+ rtx_insn *next_insn;
for (next_insn = NEXT_INSN (insn);
next_insn && next_insn != tail;
extern int iq2000_check_split (rtx, enum machine_mode);
extern int iq2000_reg_mode_ok_for_base_p (rtx, enum machine_mode, int);
-extern const char * iq2000_fill_delay_slot (const char *, enum delay_type, rtx *, rtx);
-extern const char * iq2000_move_1word (rtx *, rtx, int);
+extern const char * iq2000_fill_delay_slot (const char *, enum delay_type, rtx *, rtx_insn *);
+extern const char * iq2000_move_1word (rtx *, rtx_insn *, int);
extern HOST_WIDE_INT iq2000_debugger_offset (rtx, HOST_WIDE_INT);
extern void final_prescan_insn (rtx_insn *, rtx *, int);
extern HOST_WIDE_INT compute_frame_size (HOST_WIDE_INT);
const char *
iq2000_fill_delay_slot (const char *ret, enum delay_type type, rtx operands[],
- rtx cur_insn)
+ rtx_insn *cur_insn)
{
rtx set_reg;
enum machine_mode mode;
- rtx next_insn = cur_insn ? NEXT_INSN (cur_insn) : NULL_RTX;
+ rtx_insn *next_insn = cur_insn ? NEXT_INSN (cur_insn) : NULL;
int num_nops;
if (type == DELAY_LOAD || type == DELAY_FCMP)
/* Return the appropriate instructions to move one operand to another. */
const char *
-iq2000_move_1word (rtx operands[], rtx insn, int unsignedp)
+iq2000_move_1word (rtx operands[], rtx_insn *insn, int unsignedp)
{
const char *ret = 0;
rtx op0 = operands[0];
(plus:SI (match_operand:SI 0 "register_operand" "d")
(label_ref:SI (match_operand 1 "" ""))))
(use (label_ref:SI (match_dup 1)))]
- "!(Pmode == DImode) && NEXT_INSN (operands[1]) != 0
- && GET_CODE (PATTERN (NEXT_INSN (operands[1]))) == ADDR_DIFF_VEC"
+ "!(Pmode == DImode) && NEXT_INSN (as_a <rtx_insn *> (operands[1])) != 0
+ && GET_CODE (PATTERN (NEXT_INSN (as_a <rtx_insn *> (operands[1])))) == ADDR_DIFF_VEC"
"*
{
return \"j\\t%0\";
static bool
m32c_function_needs_enter (void)
{
- rtx insn;
+ rtx_insn *insn;
struct sequence_stack *seq;
rtx sp = gen_rtx_REG (Pmode, SP_REGNO);
rtx fb = gen_rtx_REG (Pmode, FB_REGNO);
if (GET_CODE (x) != LABEL_REF)
return false;
- x = XEXP (x, 0);
- if (!NEXT_INSN (x) && !PREV_INSN (x))
+ rtx_insn *insn = as_a <rtx_insn *> (XEXP (x, 0));
+ if (!NEXT_INSN (insn) && !PREV_INSN (insn))
return true;
- x = next_nonnote_insn (x);
- return x && JUMP_TABLE_DATA_P (x);
+ insn = next_nonnote_insn (insn);
+ return insn && JUMP_TABLE_DATA_P (insn);
}
/* Return true if X is a legitimate address for values of mode MODE.
/* Check that all instructions have DFA reservations and
that all instructions can be issued from a clean state. */
{
- rtx insn;
+ rtx_insn *insn;
state_t state;
state = alloca (state_size ());
- for (insn = get_insns (); insn != NULL_RTX; insn = NEXT_INSN (insn))
+ for (insn = get_insns (); insn != NULL; insn = NEXT_INSN (insn))
{
if (INSN_P (insn) && recog_memoized (insn) >= 0)
{
extern const char * mcore_output_bseti (rtx, int);
extern const char * mcore_output_cmov (rtx *, int, const char *);
extern char * mcore_output_call (rtx *, int);
-extern int mcore_is_dead (rtx, rtx);
+extern int mcore_is_dead (rtx_insn *, rtx);
extern int mcore_expand_insv (rtx *);
extern bool mcore_expand_block_move (rtx *);
extern const char * mcore_output_andn (rtx, rtx *);
static void layout_mcore_frame (struct mcore_frame *);
static void mcore_setup_incoming_varargs (cumulative_args_t, enum machine_mode, tree, int *, int);
static cond_type is_cond_candidate (rtx);
-static rtx emit_new_cond_insn (rtx, int);
-static rtx conditionalize_block (rtx);
+static rtx_insn *emit_new_cond_insn (rtx, int);
+static rtx_insn *conditionalize_block (rtx_insn *);
static void conditionalize_optimization (void);
static void mcore_reorg (void);
static rtx handle_structs_in_regs (enum machine_mode, const_tree, int);
can ignore subregs by extracting the actual register. BRC */
int
-mcore_is_dead (rtx first, rtx reg)
+mcore_is_dead (rtx_insn *first, rtx reg)
{
- rtx insn;
+ rtx_insn *insn;
/* For mcore, subregs can't live independently of their parent regs. */
if (GET_CODE (reg) == SUBREG)
/* Emit a conditional version of insn and replace the old insn with the
new one. Return the new insn if emitted. */
-static rtx
+static rtx_insn *
emit_new_cond_insn (rtx insn, int cond)
{
rtx c_insn = 0;
delete_insn (insn);
- return c_insn;
+ return as_a <rtx_insn *> (c_insn);
}
/* Attempt to change a basic block into a series of conditional insns. This
we can delete the L2 label if NUSES==1 and re-apply the optimization
starting at the last instruction of block 2. This may allow an entire
if-then-else statement to be conditionalized. BRC */
-static rtx
-conditionalize_block (rtx first)
+static rtx_insn *
+conditionalize_block (rtx_insn *first)
{
- rtx insn;
+ rtx_insn *insn;
rtx br_pat;
- rtx end_blk_1_br = 0;
- rtx end_blk_2_insn = 0;
- rtx start_blk_3_lab = 0;
+ rtx_insn *end_blk_1_br = 0;
+ rtx_insn *end_blk_2_insn = 0;
+ rtx_insn *start_blk_3_lab = 0;
int cond;
int br_lab_num;
int blk_size = 0;
for (insn = NEXT_INSN (end_blk_1_br); insn != start_blk_3_lab;
insn = NEXT_INSN (insn))
{
- rtx newinsn;
+ rtx_insn *newinsn;
if (INSN_DELETED_P (insn))
continue;
static void
conditionalize_optimization (void)
{
- rtx insn;
+ rtx_insn *insn;
for (insn = get_insns (); insn; insn = conditionalize_block (insn))
continue;
&& (NOTE_P (ret)
|| LABEL_P (ret)
|| GET_CODE (PATTERN (ret)) == USE))
- ret = NEXT_INSN (ret);
+ ret = NEXT_INSN (as_a <rtx_insn *> (ret));
if (ret)
{
(plus:SI (match_operand:SI 0 "register_operand" "d")
(label_ref:SI (match_operand 1 "" ""))))
(use (label_ref:SI (match_dup 1)))]
- "NEXT_INSN (operands[1]) != 0
- && GET_CODE (PATTERN (NEXT_INSN (operands[1]))) == ADDR_DIFF_VEC
+ "NEXT_INSN (as_a <rtx_insn *> (operands[1])) != 0
+ && GET_CODE (PATTERN (NEXT_INSN (as_a <rtx_insn *> (operands[1])))) == ADDR_DIFF_VEC
&& flag_pic"
{
output_asm_insn ("addk\t%0,%0,r20",operands);
const struct mips_frame_info *frame;
HOST_WIDE_INT size;
unsigned int nargs;
- rtx insn;
if (cfun->machine->global_pointer != INVALID_REGNUM)
{
/* Build the save instruction. */
mask = frame->mask;
- insn = mips16e_build_save_restore (false, &mask, &offset,
- nargs, step1);
+ rtx insn = mips16e_build_save_restore (false, &mask, &offset,
+ nargs, step1);
RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
mips_frame_barrier ();
size -= step1;
}
/* Allocate the first part of the frame. */
- insn = gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx,
- GEN_INT (-step1));
+ rtx insn = gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (-step1));
RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
mips_frame_barrier ();
size -= step1;
}
else
{
- insn = gen_add3_insn (stack_pointer_rtx,
- stack_pointer_rtx,
- GEN_INT (-step1));
+ rtx insn = gen_add3_insn (stack_pointer_rtx,
+ stack_pointer_rtx,
+ GEN_INT (-step1));
RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
mips_frame_barrier ();
size -= step1;
offset = frame->hard_frame_pointer_offset;
if (offset == 0)
{
- insn = mips_emit_move (hard_frame_pointer_rtx, stack_pointer_rtx);
+ rtx insn = mips_emit_move (hard_frame_pointer_rtx, stack_pointer_rtx);
RTX_FRAME_RELATED_P (insn) = 1;
}
else if (SMALL_OPERAND (offset))
{
- insn = gen_add3_insn (hard_frame_pointer_rtx,
- stack_pointer_rtx, GEN_INT (offset));
+ rtx insn = gen_add3_insn (hard_frame_pointer_rtx,
+ stack_pointer_rtx, GEN_INT (offset));
RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
}
else
/* We need to search back to the last use of K0 or K1. */
if (cfun->machine->interrupt_handler_p)
{
+ rtx_insn *insn;
for (insn = get_last_insn (); insn != NULL_RTX; insn = PREV_INSN (insn))
if (INSN_P (insn)
&& for_each_rtx (&PATTERN (insn), mips_kernel_reg_p, NULL))
(clobber (reg:SI MIPS16_T_REGNUM))]
"TARGET_MIPS16_SHORT_JUMP_TABLES"
{
- rtx diff_vec = PATTERN (NEXT_INSN (operands[2]));
+ rtx diff_vec = PATTERN (NEXT_INSN (as_a <rtx_insn *> (operands[2])));
gcc_assert (GET_CODE (diff_vec) == ADDR_DIFF_VEC);
static bool
mn10300_block_contains_call (basic_block block)
{
- rtx insn;
+ rtx_insn *insn;
FOR_BB_INSNS (block, insn)
if (CALL_P (insn))
int symbol_count = 0;
int threshold;
- rtx insn;
+ rtx_insn *insn;
/* We check if there already requires prologue.
Note that $gp will be saved in prologue for PIC code generation.
enum machine_mode mode;
rtx diff_vec;
- diff_vec = PATTERN (NEXT_INSN (operands[1]));
+ diff_vec = PATTERN (NEXT_INSN (as_a <rtx_insn *> (operands[1])));
gcc_assert (GET_CODE (diff_vec) == ADDR_DIFF_VEC);
extern rtx pa_eh_return_handler_rtx (void);
/* Used in insn-*.c. */
-extern int pa_following_call (rtx);
+extern int pa_following_call (rtx_insn *);
/* Define functions in pa.c and used in insn-output.c. */
extern const char *pa_output_fp_move_double (rtx *);
extern const char *pa_output_block_move (rtx *, int);
extern const char *pa_output_block_clear (rtx *, int);
-extern const char *pa_output_cbranch (rtx *, int, rtx);
-extern const char *pa_output_lbranch (rtx, rtx, int);
-extern const char *pa_output_bb (rtx *, int, rtx, int);
-extern const char *pa_output_bvb (rtx *, int, rtx, int);
-extern const char *pa_output_dbra (rtx *, rtx, int);
-extern const char *pa_output_movb (rtx *, rtx, int, int);
-extern const char *pa_output_parallel_movb (rtx *, rtx);
-extern const char *pa_output_parallel_addb (rtx *, rtx);
+extern const char *pa_output_cbranch (rtx *, int, rtx_insn *);
+extern const char *pa_output_lbranch (rtx, rtx_insn *, int);
+extern const char *pa_output_bb (rtx *, int, rtx_insn *, int);
+extern const char *pa_output_bvb (rtx *, int, rtx_insn *, int);
+extern const char *pa_output_dbra (rtx *, rtx_insn *, int);
+extern const char *pa_output_movb (rtx *, rtx_insn *, int, int);
+extern const char *pa_output_parallel_movb (rtx *, rtx_insn *);
+extern const char *pa_output_parallel_addb (rtx *, rtx_insn *);
extern const char *pa_output_call (rtx_insn *, rtx, int);
extern const char *pa_output_indirect_call (rtx_insn *, rtx);
-extern const char *pa_output_millicode_call (rtx, rtx);
-extern const char *pa_output_mul_insn (int, rtx);
-extern const char *pa_output_div_insn (rtx *, int, rtx);
-extern const char *pa_output_mod_insn (int, rtx);
+extern const char *pa_output_millicode_call (rtx_insn *, rtx);
+extern const char *pa_output_mul_insn (int, rtx_insn *);
+extern const char *pa_output_div_insn (rtx *, int, rtx_insn *);
+extern const char *pa_output_mod_insn (int, rtx_insn *);
extern const char *pa_singlemove_string (rtx *);
extern void pa_output_addr_vec (rtx, rtx);
extern void pa_output_addr_diff_vec (rtx, rtx);
extern int pa_emit_move_sequence (rtx *, enum machine_mode, rtx);
extern int pa_emit_hpdiv_const (rtx *, int);
extern int pa_is_function_label_plus_const (rtx);
-extern int pa_jump_in_call_delay (rtx);
+extern int pa_jump_in_call_delay (rtx_insn *);
extern int pa_fpstore_bypass_p (rtx, rtx);
extern int pa_attr_length_millicode_call (rtx_insn *);
extern int pa_attr_length_call (rtx_insn *, int);
static inline rtx force_mode (enum machine_mode, rtx);
static void pa_reorg (void);
static void pa_combine_instructions (void);
-static int pa_can_combine_p (rtx, rtx, rtx, int, rtx, rtx, rtx);
-static bool forward_branch_p (rtx);
+static int pa_can_combine_p (rtx, rtx_insn *, rtx_insn *, int, rtx, rtx, rtx);
+static bool forward_branch_p (rtx_insn *);
static void compute_zdepwi_operands (unsigned HOST_WIDE_INT, unsigned *);
static void compute_zdepdi_operands (unsigned HOST_WIDE_INT, unsigned *);
static int compute_movmem_length (rtx);
the proper registers. */
const char *
-pa_output_mul_insn (int unsignedp ATTRIBUTE_UNUSED, rtx insn)
+pa_output_mul_insn (int unsignedp ATTRIBUTE_UNUSED, rtx_insn *insn)
{
import_milli (mulI);
return pa_output_millicode_call (insn, gen_rtx_SYMBOL_REF (Pmode, "$$mulI"));
}
const char *
-pa_output_div_insn (rtx *operands, int unsignedp, rtx insn)
+pa_output_div_insn (rtx *operands, int unsignedp, rtx_insn *insn)
{
int divisor;
/* Output a $$rem millicode to do mod. */
const char *
-pa_output_mod_insn (int unsignedp, rtx insn)
+pa_output_mod_insn (int unsignedp, rtx_insn *insn)
{
if (unsignedp)
{
parameters. */
const char *
-pa_output_cbranch (rtx *operands, int negated, rtx insn)
+pa_output_cbranch (rtx *operands, int negated, rtx_insn *insn)
{
static char buf[100];
bool useskip;
bytes for the portable runtime, non-PIC and PIC cases, respectively. */
const char *
-pa_output_lbranch (rtx dest, rtx insn, int xdelay)
+pa_output_lbranch (rtx dest, rtx_insn *insn, int xdelay)
{
rtx xoperands[2];
above. it returns the appropriate output template to emit the branch. */
const char *
-pa_output_bb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx insn, int which)
+pa_output_bb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx_insn *insn, int which)
{
static char buf[100];
bool useskip;
branch. */
const char *
-pa_output_bvb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx insn,
+pa_output_bvb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx_insn *insn,
int which)
{
static char buf[100];
Note it may perform some output operations on its own before
returning the final output string. */
const char *
-pa_output_dbra (rtx *operands, rtx insn, int which_alternative)
+pa_output_dbra (rtx *operands, rtx_insn *insn, int which_alternative)
{
int length = get_attr_length (insn);
Note it may perform some output operations on its own before
returning the final output string. */
const char *
-pa_output_movb (rtx *operands, rtx insn, int which_alternative,
+pa_output_movb (rtx *operands, rtx_insn *insn, int which_alternative,
int reverse_comparison)
{
int length = get_attr_length (insn);
CALL_DEST is the routine we are calling. */
const char *
-pa_output_millicode_call (rtx insn, rtx call_dest)
+pa_output_millicode_call (rtx_insn *insn, rtx call_dest)
{
int attr_length = get_attr_length (insn);
int seq_length = dbr_sequence_length ();
sequence insn's address. */
if (INSN_ADDRESSES_SET_P ())
{
- seq_insn = NEXT_INSN (PREV_INSN (XVECEXP (final_sequence, 0, 0)));
+ seq_insn = NEXT_INSN (PREV_INSN (final_sequence->insn (0)));
distance = (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn))))
- INSN_ADDRESSES (INSN_UID (seq_insn)) - 8);
sequence insn's address. This would break the regular call/return@
relationship assumed by the table based eh unwinder, so only do that
if the call is not possibly throwing. */
- rtx seq_insn = NEXT_INSN (PREV_INSN (XVECEXP (final_sequence, 0, 0)));
+ rtx seq_insn = NEXT_INSN (PREV_INSN (final_sequence->insn (0)));
int distance = (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn))))
- INSN_ADDRESSES (INSN_UID (seq_insn)) - 8);
/* Return TRUE if INSN branches forward. */
static bool
-forward_branch_p (rtx insn)
+forward_branch_p (rtx_insn *insn)
{
rtx lab = JUMP_LABEL (insn);
/* Return 1 if INSN is in the delay slot of a call instruction. */
int
-pa_jump_in_call_delay (rtx insn)
+pa_jump_in_call_delay (rtx_insn *insn)
{
if (! JUMP_P (insn))
/* Output an unconditional move and branch insn. */
const char *
-pa_output_parallel_movb (rtx *operands, rtx insn)
+pa_output_parallel_movb (rtx *operands, rtx_insn *insn)
{
int length = get_attr_length (insn);
/* Output an unconditional add and branch insn. */
const char *
-pa_output_parallel_addb (rtx *operands, rtx insn)
+pa_output_parallel_addb (rtx *operands, rtx_insn *insn)
{
int length = get_attr_length (insn);
the delay slot of the call. */
int
-pa_following_call (rtx insn)
+pa_following_call (rtx_insn *insn)
{
if (! TARGET_JUMP_IN_DELAY)
return 0;
static void
pa_combine_instructions (void)
{
- rtx anchor, new_rtx;
+ rtx_insn *anchor;
+ rtx new_rtx;
/* This can get expensive since the basic algorithm is on the
order of O(n^2) (or worse). Only do it for -O2 or higher
|| (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
&& ! forward_branch_p (anchor)))
{
- rtx floater;
+ rtx_insn *floater;
for (floater = PREV_INSN (anchor);
floater;
/* Anything except a regular INSN will stop our search. */
if (! NONJUMP_INSN_P (floater))
{
- floater = NULL_RTX;
+ floater = NULL;
break;
}
/* Anything except a regular INSN will stop our search. */
if (! NONJUMP_INSN_P (floater))
{
- floater = NULL_RTX;
+ floater = NULL;
break;
}
}
static int
-pa_can_combine_p (rtx new_rtx, rtx anchor, rtx floater, int reversed, rtx dest,
+pa_can_combine_p (rtx new_rtx, rtx_insn *anchor, rtx_insn *floater,
+ int reversed, rtx dest,
rtx src1, rtx src2)
{
int insn_code_number;
- rtx start, end;
+ rtx_insn *start, *end;
/* Create a PARALLEL with the patterns of ANCHOR and
FLOATER, try to recognize it, then test constraints
/* Reset any information about the current VLIW packing status. */
static void
-picochip_reset_vliw (rtx insn)
+picochip_reset_vliw (rtx_insn *insn)
{
- rtx local_insn = insn;
+ rtx_insn *local_insn = insn;
/* Nothing to do if VLIW scheduling isn't being used. */
if (picochip_schedule_type != DFA_TYPE_SPEED)
picochip_final_prescan_insn (rtx_insn *insn, rtx * opvec ATTRIBUTE_UNUSED,
int num_operands ATTRIBUTE_UNUSED)
{
- rtx local_insn;
+ rtx_insn *local_insn;
picochip_current_prescan_insn = insn;
are none. (This check is expensive, but seldom executed.) */
if (WORLD_SAVE_P (info_ptr))
{
- rtx insn;
+ rtx_insn *insn;
for (insn = get_last_insn_anywhere (); insn; insn = PREV_INSN (insn))
if (CALL_P (insn) && SIBLING_CALL_P (insn))
{
static int
uses_TOC (void)
{
- rtx insn;
+ rtx_insn *insn;
for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
if (INSN_P (insn))
static const char *
get_some_local_dynamic_name (void)
{
- rtx insn;
+ rtx_insn *insn;
if (cfun->machine->some_ld_name)
return cfun->machine->some_ld_name;
s390_mainpool_finish (struct constant_pool *pool)
{
rtx base_reg = cfun->machine->base_reg;
- rtx insn;
/* If the pool is empty, we're done. */
if (pool->size == 0)
located in the .rodata section, so we emit it after the function. */
if (TARGET_CPU_ZARCH)
{
- insn = gen_main_base_64 (base_reg, pool->label);
+ rtx insn = gen_main_base_64 (base_reg, pool->label);
insn = emit_insn_after (insn, pool->pool_insn);
INSN_ADDRESSES_NEW (insn, -1);
remove_insn (pool->pool_insn);
else if (INSN_ADDRESSES (INSN_UID (pool->emit_pool_after))
+ pool->size + 8 /* alignment slop */ < 4096)
{
- insn = gen_main_base_31_small (base_reg, pool->label);
+ rtx insn = gen_main_base_31_small (base_reg, pool->label);
insn = emit_insn_after (insn, pool->pool_insn);
INSN_ADDRESSES_NEW (insn, -1);
remove_insn (pool->pool_insn);
{
rtx pool_end = gen_label_rtx ();
- insn = gen_main_base_31_large (base_reg, pool->label, pool_end);
+ rtx insn = gen_main_base_31_large (base_reg, pool->label, pool_end);
insn = emit_jump_insn_after (insn, pool->pool_insn);
JUMP_LABEL (insn) = pool_end;
INSN_ADDRESSES_NEW (insn, -1);
/* Replace all literal pool references. */
- for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ for (rtx_insn *insn = get_insns (); insn; insn = NEXT_INSN (insn))
{
if (INSN_P (insn))
replace_ltrel_base (&PATTERN (insn));
if (XINT (SET_SRC (pat), 1) == UNSPECV_TBEGIN)
{
- rtx tmp;
+ rtx_insn *tmp;
tbegin_insn = insn;
{
rtx_insn *insns = s390_load_got ();
- for (insn = insns; insn; insn = NEXT_INSN (insn))
+ for (rtx_insn *insn = insns; insn; insn = NEXT_INSN (insn))
annotate_constant_pool_refs (&PATTERN (insn));
emit_insn (insns);
extern const char *output_far_jump (rtx_insn *, rtx);
extern rtx sfunc_uses_reg (rtx);
-extern int barrier_align (rtx);
+extern int barrier_align (rtx_insn *);
extern int sh_loop_align (rtx);
extern bool fp_zero_operand (rtx);
extern bool fp_one_operand (rtx);
extern bool gen_shl_sext (rtx, rtx, rtx, rtx);
extern rtx gen_datalabel_ref (rtx);
extern int regs_used (rtx, int);
-extern void fixup_addr_diff_vecs (rtx);
+extern void fixup_addr_diff_vecs (rtx_insn *);
extern int get_dest_uid (rtx, int);
extern void final_prescan_insn (rtx_insn *, rtx *, int);
extern enum tls_model tls_symbolic_operand (rtx, enum machine_mode);
extern bool system_reg_operand (rtx, enum machine_mode);
-extern bool reg_unused_after (rtx, rtx);
+extern bool reg_unused_after (rtx, rtx_insn *);
extern void expand_sf_unop (rtx (*)(rtx, rtx, rtx), rtx *);
extern void expand_sf_binop (rtx (*)(rtx, rtx, rtx, rtx), rtx *);
extern void expand_df_unop (rtx (*)(rtx, rtx, rtx), rtx *);
extern void expand_df_binop (rtx (*)(rtx, rtx, rtx, rtx), rtx *);
extern int sh_insn_length_adjustment (rtx_insn *);
-extern bool sh_can_redirect_branch (rtx, rtx);
+extern bool sh_can_redirect_branch (rtx_insn *, rtx_insn *);
extern void sh_expand_unop_v2sf (enum rtx_code, rtx, rtx);
extern void sh_expand_binop_v2sf (enum rtx_code, rtx, rtx, rtx);
extern bool sh_expand_t_scc (rtx *);
(enum machine_mode, enum machine_mode, enum reg_class);
extern bool sh_small_register_classes_for_mode_p (enum machine_mode);
extern void sh_mark_label (rtx, int);
-extern bool check_use_sfunc_addr (rtx, rtx);
+extern bool check_use_sfunc_addr (rtx_insn *, rtx);
#ifdef HARD_CONST
extern void fpscr_set_from_mem (int, HARD_REG_SET);
static bool mova_p (rtx_insn *);
static rtx_insn *find_barrier (int, rtx_insn *, rtx_insn *);
static bool noncall_uses_reg (rtx, rtx, rtx *);
-static rtx gen_block_redirect (rtx, int, int);
+static rtx_insn *gen_block_redirect (rtx_insn *, int, int);
static void sh_reorg (void);
static void sh_option_override (void);
static void output_stack_adjust (int, rtx, int, HARD_REG_SET *, bool);
static bool sh_legitimate_constant_p (enum machine_mode, rtx);
static int mov_insn_size (enum machine_mode, bool);
static int mov_insn_alignment_mask (enum machine_mode, bool);
-static bool sequence_insn_p (rtx);
+static bool sequence_insn_p (rtx_insn *);
static void sh_canonicalize_comparison (int *, rtx *, rtx *, bool);
static void sh_canonicalize_comparison (enum rtx_code&, rtx&, rtx&,
enum machine_mode, bool);
}
else
{
- rtx worker = mova;
+ rtx_insn *worker = mova;
rtx lab = gen_label_rtx ();
rtx wpat, wpat0, wpat1, wsrc, target, base, diff;
int si_limit;
int hi_limit;
rtx_insn *orig = from;
- rtx last_got = NULL_RTX;
+ rtx_insn *last_got = NULL;
rtx_insn *last_symoff = NULL;
/* For HImode: range is 510, add 4 because pc counts from address of
instructions. (plus add r0,r12).
Remember if we see one without the other. */
if (GET_CODE (src) == UNSPEC && PIC_ADDR_P (XVECEXP (src, 0, 0)))
- last_got = last_got ? NULL_RTX : from;
+ last_got = last_got ? NULL : from;
else if (PIC_ADDR_P (src))
- last_got = last_got ? NULL_RTX : from;
+ last_got = last_got ? NULL : from;
/* We must explicitly check the mode, because sometimes the
front end will generate code to load unsigned constants into
pass 1. Pass 2 if a definite blocking insn is needed.
-1 is used internally to avoid deep recursion.
If a blocking instruction is made or recognized, return it. */
-static rtx
-gen_block_redirect (rtx jump, int addr, int need_block)
+static rtx_insn *
+gen_block_redirect (rtx_insn *jump, int addr, int need_block)
{
int dead = 0;
- rtx prev = prev_nonnote_insn (jump);
+ rtx_insn *prev = prev_nonnote_insn (jump);
rtx dest;
/* First, check if we already have an instruction that satisfies our need. */
&& (INSN_ADDRESSES (INSN_UID (dest)) - addr + (unsigned) 4092
> 4092 + 4098))
{
- rtx scan;
+ rtx_insn *scan;
/* Don't look for the stack pointer as a scratch register,
it would cause trouble if an interrupt occurred. */
unsigned attempt = 0x7fff, used;
break;
}
}
- for (used = dead = 0, scan = JUMP_LABEL (jump);
+ for (used = dead = 0, scan = JUMP_LABEL_AS_INSN (jump);
(scan = NEXT_INSN (scan)); )
{
enum rtx_code code;
if (code == JUMP_INSN)
{
if (jump_left-- && simplejump_p (scan))
- scan = JUMP_LABEL (scan);
+ scan = JUMP_LABEL_AS_INSN (scan);
else
break;
}
else if (optimize && need_block >= 0)
{
- rtx next = next_active_insn (next_active_insn (dest));
+ rtx_insn *next = next_active_insn (next_active_insn (dest));
if (next && JUMP_P (next)
&& GET_CODE (PATTERN (next)) == SET
&& recog_memoized (next) == CODE_FOR_jump_compact)
it should try to schedule instructions from the target of the
branch; simplejump_p fails for indirect jumps even if they have
a JUMP_LABEL. */
- rtx insn = emit_insn_before (gen_indirect_jump_scratch
- (reg, GEN_INT (unspec_bbr_uid++)),
- jump);
+ rtx_insn *insn = emit_insn_before (gen_indirect_jump_scratch
+ (reg, GEN_INT (unspec_bbr_uid++)),
+ jump);
/* ??? We would like this to have the scope of the jump, but that
scope will change when a delay slot insn of an inner scope is added.
Hence, after delay slot scheduling, we'll have to expect
{
/* A label (to be placed) in front of the jump
that jumps to our ultimate destination. */
- rtx near_label;
+ rtx_insn *near_label;
/* Where we are going to insert it if we cannot move the jump any farther,
or the jump itself if we have picked up an existing jump. */
- rtx insert_place;
+ rtx_insn *insert_place;
/* The ultimate destination. */
- rtx far_label;
+ rtx_insn *far_label;
struct far_branch *prev;
/* If the branch has already been created, its address;
else the address of its first prospective user. */
gen_far_branch (struct far_branch *bp)
{
rtx insn = bp->insert_place;
- rtx jump;
+ rtx_insn *jump;
rtx label = gen_label_rtx ();
int ok;
/* Fix up ADDR_DIFF_VECs. */
void
-fixup_addr_diff_vecs (rtx first)
+fixup_addr_diff_vecs (rtx_insn *first)
{
- rtx insn;
+ rtx_insn *insn;
for (insn = first; insn; insn = NEXT_INSN (insn))
{
- rtx vec_lab, pat, prev, prevpat, x, braf_label;
+ rtx vec_lab, pat, prevpat, x, braf_label;
+ rtx_insn *prev;
if (! JUMP_TABLE_DATA_P (insn)
|| GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
vec_lab = XEXP (XEXP (pat, 0), 0);
/* Search the matching casesi_jump_2. */
- for (prev = vec_lab; ; prev = PREV_INSN (prev))
+ for (prev = as_a <rtx_insn *> (vec_lab); ; prev = PREV_INSN (prev))
{
if (!JUMP_P (prev))
continue;
/* BARRIER_OR_LABEL is either a BARRIER or a CODE_LABEL immediately following
a barrier. Return the base 2 logarithm of the desired alignment. */
int
-barrier_align (rtx barrier_or_label)
+barrier_align (rtx_insn *barrier_or_label)
{
rtx next, pat;
/* Skip to the insn before the JUMP_INSN before the barrier under
investigation. */
- rtx prev = prev_real_insn (prev_active_insn (barrier_or_label));
+ rtx_insn *prev = prev_real_insn (prev_active_insn (barrier_or_label));
for (slot = 2, credit = (1 << (CACHE_LOG - 2)) + 2;
credit >= 0 && prev && NONJUMP_INSN_P (prev);
if (GET_CODE (PATTERN (prev)) == USE
|| GET_CODE (PATTERN (prev)) == CLOBBER)
continue;
- if (GET_CODE (PATTERN (prev)) == SEQUENCE)
+ if (rtx_sequence *prev_seq = dyn_cast <rtx_sequence *> (PATTERN (prev)))
{
- prev = XVECEXP (PATTERN (prev), 0, 1);
+ prev = prev_seq->insn (1);
if (INSN_UID (prev) == INSN_UID (next))
{
/* Delay slot was filled with insn at jump target. */
}
if (prev && jump_to_label_p (prev))
{
- rtx x;
+ rtx_insn *x;
if (jump_to_next
|| next_real_insn (JUMP_LABEL (prev)) == next
/* If relax_delay_slots() decides NEXT was redundant
|| (prev_nonnote_insn (insn)
== XEXP (MOVA_LABELREF (mova), 0))))
{
- rtx scan;
+ rtx_insn *scan;
int total;
num_mova--;
int
get_dest_uid (rtx label, int max_uid)
{
- rtx dest = next_real_insn (label);
+ rtx_insn *dest = next_real_insn (label);
int dest_uid;
if (! dest)
/* This can happen for an undefined label. */
enum attr_type type = get_attr_type (insn);
if (type == TYPE_CBRANCH)
{
- rtx next, beyond;
+ rtx_insn *next, *beyond;
if (get_attr_length (insn) > 4)
{
rtx src = SET_SRC (PATTERN (insn));
rtx olabel = XEXP (XEXP (src, 1), 0);
int addr = INSN_ADDRESSES (INSN_UID (insn));
- rtx label = 0;
+ rtx_insn *label = 0;
int dest_uid = get_dest_uid (olabel, max_uid);
struct far_branch *bp = uid_branch[dest_uid];
uid_branch[dest_uid] = bp;
bp->prev = far_branch_list;
far_branch_list = bp;
- bp->far_label
- = XEXP (XEXP (SET_SRC (PATTERN (insn)), 1), 0);
+ bp->far_label = as_a <rtx_insn *> (
+ XEXP (XEXP (SET_SRC (PATTERN (insn)), 1),
+ 0));
LABEL_NUSES (bp->far_label)++;
}
else
label = bp->near_label;
if (! label && bp->address - addr >= CONDJUMP_MIN)
{
- rtx block = bp->insert_place;
+ rtx_insn *block = bp->insert_place;
if (GET_CODE (PATTERN (block)) == RETURN)
block = PREV_INSN (block);
else if (type == TYPE_JUMP || type == TYPE_RETURN)
{
int addr = INSN_ADDRESSES (INSN_UID (insn));
- rtx far_label = 0;
+ rtx_insn *far_label = 0;
int dest_uid = 0;
struct far_branch *bp;
if (type == TYPE_JUMP)
{
- far_label = XEXP (SET_SRC (PATTERN (insn)), 0);
+ far_label = as_a <rtx_insn *> (
+ XEXP (SET_SRC (PATTERN (insn)), 0));
dest_uid = get_dest_uid (far_label, max_uid);
if (! dest_uid)
{
We assume REG is a reload reg, and therefore does
not live past labels. It may live past calls or jumps though. */
bool
-reg_unused_after (rtx reg, rtx insn)
+reg_unused_after (rtx reg, rtx_insn *insn)
{
enum rtx_code code;
rtx set;
#endif
static bool
-sequence_insn_p (rtx insn)
+sequence_insn_p (rtx_insn *insn)
{
rtx_insn *prev, *next;
static rtx
mark_constant_pool_use (rtx x)
{
- rtx insn, lab, pattern;
+ rtx_insn *insn, *lab;
+ rtx pattern;
if (x == NULL_RTX)
return x;
/* Get the first label in the list of labels for the same constant
and delete another labels in the list. */
- lab = x;
- for (insn = PREV_INSN (x); insn; insn = PREV_INSN (insn))
+ lab = as_a <rtx_insn *> (x);
+ for (insn = PREV_INSN (lab); insn; insn = PREV_INSN (insn))
{
if (!LABEL_P (insn)
|| LABEL_REFS (insn) != NEXT_INSN (insn))
lab = insn;
}
- for (insn = LABEL_REFS (lab); insn; insn = LABEL_REFS (insn))
+ for (rtx insn = LABEL_REFS (lab); insn; insn = LABEL_REFS (insn))
INSN_DELETED_P (insn) = 1;
/* Mark constants in a window. */
- for (insn = NEXT_INSN (x); insn; insn = NEXT_INSN (insn))
+ for (insn = NEXT_INSN (as_a <rtx_insn *> (x)); insn; insn = NEXT_INSN (insn))
{
if (!NONJUMP_INSN_P (insn))
continue;
of an unconditional jump BRANCH2. We only want to do this if the
resulting branch will have a short displacement. */
bool
-sh_can_redirect_branch (rtx branch1, rtx branch2)
+sh_can_redirect_branch (rtx_insn *branch1, rtx_insn *branch2)
{
if (flag_expensive_optimizations && simplejump_p (branch2))
{
INSN is the use_sfunc_addr instruction, and REG is the register it
guards. */
bool
-check_use_sfunc_addr (rtx insn, rtx reg)
+check_use_sfunc_addr (rtx_insn *insn, rtx reg)
{
/* Search for the sfunc. It should really come right after INSN. */
while ((insn = NEXT_INSN (insn)))
if (! INSN_P (insn))
continue;
- if (GET_CODE (PATTERN (insn)) == SEQUENCE)
- insn = XVECEXP (PATTERN (insn), 0, 0);
+ if (rtx_sequence *seq = dyn_cast<rtx_sequence *> (PATTERN (insn)))
+ insn = seq->insn (0);
if (GET_CODE (PATTERN (insn)) != PARALLEL
|| get_attr_type (insn) != TYPE_SFUNC)
continue;
sh_expand_epilogue (true);
if (TARGET_SHCOMPACT)
{
- rtx insn, set;
+ rtx_insn *insn;
+ rtx set;
/* If epilogue clobbers r0, preserve it in macl. */
for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
(clobber (match_scratch:SI 3 "=X,1"))]
"TARGET_SH1"
{
- rtx diff_vec = PATTERN (NEXT_INSN (operands[2]));
+ rtx diff_vec = PATTERN (NEXT_INSN (as_a <rtx_insn *> (operands[2])));
gcc_assert (GET_CODE (diff_vec) == ADDR_DIFF_VEC);
(clobber (match_operand:SI 4 "" "=X,1"))]
"TARGET_SH2 && reload_completed && flag_pic"
{
- rtx diff_vec = PATTERN (NEXT_INSN (operands[2]));
+ rtx diff_vec = PATTERN (NEXT_INSN (as_a <rtx_insn *> (operands[2])));
gcc_assert (GET_CODE (diff_vec) == ADDR_DIFF_VEC);
switch (GET_MODE (diff_vec))
UNSPEC_CASESI)))]
"TARGET_SHMEDIA"
{
- rtx diff_vec = PATTERN (NEXT_INSN (operands[2]));
+ rtx diff_vec = PATTERN (NEXT_INSN (as_a <rtx_insn *> (operands[2])));
gcc_assert (GET_CODE (diff_vec) == ADDR_DIFF_VEC);
(label_ref:DI (match_operand 3 "" ""))] UNSPEC_CASESI)))]
"TARGET_SHMEDIA"
{
- rtx diff_vec = PATTERN (NEXT_INSN (operands[3]));
+ rtx diff_vec = PATTERN (NEXT_INSN (as_a <rtx_insn *> (operands[3])));
gcc_assert (GET_CODE (diff_vec) == ADDR_DIFF_VEC);
struct ccreg_value
{
// The insn at which the ccreg value was determined.
- // Might be NULL_RTX if e.g. an unknown value is recorded for an
+ // Might be NULL if e.g. an unknown value is recorded for an
// empty basic block.
- rtx insn;
+ rtx_insn *insn;
// The basic block where the insn was discovered.
basic_block bb;
// Given a start insn and its basic block, recursively determine all
// possible ccreg values in all basic block paths that can lead to the
// start insn.
- void find_last_ccreg_values (rtx start_insn, basic_block bb,
+ void find_last_ccreg_values (rtx_insn *start_insn, basic_block bb,
std::vector<ccreg_value>& values_out,
std::vector<basic_block>& prev_visited_bb) const;
// be optimized.
basic_block bb;
FOR_EACH_BB_REVERSE_FN (bb, fun)
- for (rtx next_i, i = NEXT_INSN (BB_HEAD (bb));
+ for (rtx_insn *next_i, *i = NEXT_INSN (BB_HEAD (bb));
i != NULL_RTX && i != BB_END (bb); i = next_i)
{
next_i = NEXT_INSN (i);
void
sh_optimize_sett_clrt
-::find_last_ccreg_values (rtx start_insn, basic_block bb,
+::find_last_ccreg_values (rtx_insn *start_insn, basic_block bb,
std::vector<ccreg_value>& values_out,
std::vector<basic_block>& prev_visited_bb) const
{
log_msg ("(prev visited [bb %d])", prev_visited_bb.back ()->index);
log_msg ("\n");
- for (rtx i = start_insn; i != NULL_RTX && i != PREV_INSN (BB_HEAD (bb));
+ for (rtx_insn *i = start_insn; i != NULL && i != PREV_INSN (BB_HEAD (bb));
i = PREV_INSN (i))
{
if (!INSN_P (i))
struct set_of_reg
{
- // The insn where the search stopped or NULL_RTX.
- rtx insn;
+ // The insn where the search stopped or NULL.
+ rtx_insn *insn;
// The set rtx of the specified reg if found, NULL_RTX otherwise.
// Notice that the set rtx can also be in a parallel.
// Given a reg rtx and a start insn find the insn (in the same basic block)
// that sets the reg.
static set_of_reg
-find_set_of_reg_bb (rtx reg, rtx insn)
+find_set_of_reg_bb (rtx reg, rtx_insn *insn)
{
set_of_reg result = { insn, NULL_RTX };
- if (!REG_P (reg) || insn == NULL_RTX)
+ if (!REG_P (reg) || insn == NULL)
return result;
- for (result.insn = insn; result.insn != NULL_RTX;
+ for (result.insn = insn; result.insn != NULL;
result.insn = prev_nonnote_insn_bb (result.insn))
{
if (BARRIER_P (result.insn))
// Internal function of trace_reg_uses.
static void
-trace_reg_uses_1 (rtx reg, rtx start_insn, basic_block bb, int& count,
+trace_reg_uses_1 (rtx reg, rtx_insn *start_insn, basic_block bb, int& count,
std::vector<basic_block>& visited_bb, rtx abort_at_insn)
{
if (bb == NULL)
if (end_insn == NULL_RTX)
log_return_void ("[bb %d] end_insn is null\n", bb->index);
- for (rtx i = NEXT_INSN (start_insn); i != end_insn; i = NEXT_INSN (i))
+ for (rtx_insn *i = NEXT_INSN (start_insn); i != end_insn; i = NEXT_INSN (i))
{
if (INSN_P (i))
{
// that insn. If the insn 'abort_at_insn' uses the specified reg, it is also
// counted.
static int
-trace_reg_uses (rtx reg, rtx start_insn, rtx abort_at_insn)
+trace_reg_uses (rtx reg, rtx_insn *start_insn, rtx abort_at_insn)
{
log_msg ("\ntrace_reg_uses\nreg = ");
log_rtx (reg);
// A ccreg trace for a conditional branch.
struct cbranch_trace
{
- rtx cbranch_insn;
+ rtx_insn *cbranch_insn;
branch_condition_type_t cbranch_type;
// The comparison against zero right before the conditional branch.
// the BB of the cbranch itself and might be empty.
std::list<bb_entry> bb_entries;
- cbranch_trace (rtx insn)
+ cbranch_trace (rtx_insn *insn)
: cbranch_insn (insn),
cbranch_type (unknown_branch_condition),
setcc ()
set_not_found,
other_set_found
};
- record_return_t record_set_of_reg (rtx reg, rtx start_insn, bb_entry& e);
+ record_return_t record_set_of_reg (rtx reg, rtx_insn *start_insn,
+ bb_entry& e);
// Tells whether the cbranch insn of the specified bb_entry can be removed
// safely without triggering any side effects.
// Given a branch insn, try to optimize its branch condition.
// If any insns are modified or added they are added to 'm_touched_insns'.
- void try_optimize_cbranch (rtx i);
+ void try_optimize_cbranch (rtx_insn *i);
};
}
sh_treg_combine::record_return_t
-sh_treg_combine::record_set_of_reg (rtx reg, rtx start_insn,
+sh_treg_combine::record_set_of_reg (rtx reg, rtx_insn *start_insn,
bb_entry& new_entry)
{
log_msg ("\n[bb %d]\n", new_entry.bb->index);
new_entry.cstore_type = cstore_unknown;
- for (rtx i = start_insn; i != NULL_RTX; )
+ for (rtx_insn *i = start_insn; i != NULL; )
{
new_entry.cstore = find_set_of_reg_bb (reg, i);
// must not be a usage of the copied regs between the reg-reg copies.
// Otherwise we assume that the result of the cstore is used in some
// other way.
- rtx prev_insn = e.cstore.insn;
+ rtx_insn *prev_insn = e.cstore.insn;
for (std::vector<set_of_reg>::const_reverse_iterator i =
e.cstore_reg_reg_copies.rbegin ();
i != e.cstore_reg_reg_copies.rend (); ++i)
}
void
-sh_treg_combine::try_optimize_cbranch (rtx insn)
+sh_treg_combine::try_optimize_cbranch (rtx_insn *insn)
{
cbranch_trace trace (insn);
basic_block bb;
FOR_EACH_BB_FN (bb, fun)
{
- rtx i = BB_END (bb);
+ rtx_insn *i = BB_END (bb);
if (any_condjump_p (i) && onlyjump_p (i))
try_optimize_cbranch (i);
}
extern int registers_ok_for_ldd_peep (rtx, rtx);
extern int mems_ok_for_ldd_peep (rtx, rtx, rtx);
extern rtx widen_mem_for_ldd_peep (rtx, rtx, enum machine_mode);
-extern int empty_delay_slot (rtx);
+extern int empty_delay_slot (rtx_insn *);
extern int emit_cbcond_nop (rtx);
extern int eligible_for_call_delay (rtx);
extern int eligible_for_return_delay (rtx);
extern int v9_regcmp_p (enum rtx_code);
/* Function used for V8+ code generation. Returns 1 if the high
32 bits of REG are 0 before INSN. */
-extern int sparc_check_64 (rtx, rtx);
+extern int sparc_check_64 (rtx, rtx_insn *);
extern rtx gen_df_reg (rtx, int);
extern void sparc_expand_compare_and_swap (rtx op[]);
extern void sparc_expand_vector_init (rtx, rtx);
static rtx sparc_builtin_saveregs (void);
static int epilogue_renumber (rtx *, int);
static bool sparc_assemble_integer (rtx, unsigned int, int);
-static int set_extends (rtx);
+static int set_extends (rtx_insn *);
static void sparc_asm_function_prologue (FILE *, HOST_WIDE_INT);
static void sparc_asm_function_epilogue (FILE *, HOST_WIDE_INT);
#ifdef TARGET_SOLARIS
static unsigned int
sparc_do_work_around_errata (void)
{
- rtx insn, next;
+ rtx_insn *insn, *next;
/* Force all instructions to be split into their final form. */
split_all_insns_noflow ();
rtx set;
/* Look into the instruction in a delay slot. */
- if (NONJUMP_INSN_P (insn) && GET_CODE (PATTERN (insn)) == SEQUENCE)
- insn = XVECEXP (PATTERN (insn), 0, 1);
+ if (NONJUMP_INSN_P (insn))
+ if (rtx_sequence *seq = dyn_cast <rtx_sequence *> (PATTERN (insn)))
+ insn = seq->insn (1);
/* Look for a single-word load into an odd-numbered FP register. */
if (sparc_fix_at697f
nop into its delay slot. */
int
-empty_delay_slot (rtx insn)
+empty_delay_slot (rtx_insn *insn)
{
rtx seq;
}
static int
-set_extends (rtx insn)
+set_extends (rtx_insn *insn)
{
register rtx pat = PATTERN (insn);
unknown. Return 1 if the high bits are zero, -1 if the register is
sign extended. */
int
-sparc_check_64 (rtx x, rtx insn)
+sparc_check_64 (rtx x, rtx_insn *insn)
{
/* If a register is set only once it is safe to ignore insns this
code does not know how to handle. The loop will either recognize
{
rtx op0 = XEXP (comparison, 0);
rtx op1 = XEXP (comparison, 1);
- rtx seq, last_insn;
+ rtx_insn *seq, *last_insn;
rtx compare;
start_sequence ();
patterns. */
static void
-combine_bnp (rtx insn)
+combine_bnp (rtx_insn *insn)
{
int insn_code, regno, need_extend;
unsigned int mask;
static void
xstormy16_reorg (void)
{
- rtx insn;
+ rtx_insn *insn;
for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
{
taking care to save and preserve the ep. */
static void
-substitute_ep_register (rtx first_insn,
- rtx last_insn,
+substitute_ep_register (rtx_insn *first_insn,
+ rtx_insn *last_insn,
int uses,
int regno,
rtx * p_r1,
rtx * p_ep)
{
rtx reg = gen_rtx_REG (Pmode, regno);
- rtx insn;
+ rtx_insn *insn;
if (!*p_r1)
{
struct
{
int uses;
- rtx first_insn;
- rtx last_insn;
+ rtx_insn *first_insn;
+ rtx_insn *last_insn;
}
regs[FIRST_PSEUDO_REGISTER];
int use_ep = FALSE;
rtx r1 = NULL_RTX;
rtx ep = NULL_RTX;
- rtx insn;
+ rtx_insn *insn;
rtx pattern;
/* If not ep mode, just return now. */
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
{
regs[i].uses = 0;
- regs[i].first_insn = NULL_RTX;
- regs[i].last_insn = NULL_RTX;
+ regs[i].first_insn = NULL;
+ regs[i].last_insn = NULL;
}
for (insn = get_insns (); insn != NULL_RTX; insn = NEXT_INSN (insn))
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
{
regs[i].uses = 0;
- regs[i].first_insn = NULL_RTX;
- regs[i].last_insn = NULL_RTX;
+ regs[i].first_insn = NULL;
+ regs[i].last_insn = NULL;
}
break;
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
{
regs[i].uses = 0;
- regs[i].first_insn = NULL_RTX;
- regs[i].last_insn = NULL_RTX;
+ regs[i].first_insn = NULL;
+ regs[i].last_insn = NULL;
}
}
}
for (i = regno; i < endregno; i++)
{
regs[i].uses = 0;
- regs[i].first_insn = NULL_RTX;
- regs[i].last_insn = NULL_RTX;
+ regs[i].first_insn = NULL;
+ regs[i].last_insn = NULL;
}
}
}
}
static rtx_note *
-emit_note_eh_region_end (rtx insn)
+emit_note_eh_region_end (rtx_insn *insn)
{
rtx_insn *next = NEXT_INSN (insn);
/* If we are outputting an insn sequence, this contains the sequence rtx.
Zero otherwise. */
-rtx final_sequence;
+rtx_sequence *final_sequence;
#ifdef ASSEMBLER_DIALECT
{
rtx body = PATTERN (insn);
int old_length = insn_lengths[uid];
- rtx rel_lab = XEXP (XEXP (body, 0), 0);
+ rtx_insn *rel_lab =
+ safe_as_a <rtx_insn *> (XEXP (XEXP (body, 0), 0));
rtx min_lab = XEXP (XEXP (body, 2), 0);
rtx max_lab = XEXP (XEXP (body, 3), 0);
int rel_addr = INSN_ADDRESSES (INSN_UID (rel_lab));
int min_addr = INSN_ADDRESSES (INSN_UID (min_lab));
int max_addr = INSN_ADDRESSES (INSN_UID (max_lab));
- rtx prev;
+ rtx_insn *prev;
int rel_align = 0;
addr_diff_vec_flags flags;
enum machine_mode vec_mode;
/* A delayed-branch sequence */
int i;
- final_sequence = body;
+ final_sequence = seq;
/* The first insn in this SEQUENCE might be a JUMP_INSN that will
force the restoration of a comparison that was previously
&& insn_operand_matches (icode, 1, op1))
{
enum rtx_code code = unsignedp ? ZERO_EXTEND : SIGN_EXTEND;
- rtx insn, insns, t = op1;
+ rtx_insn *insn, *insns;
+ rtx t = op1;
HARD_REG_SET hardregs;
start_sequence ();
}
else
t = op1;
- insn = gen_extend_insn (op0, t, promoted_nominal_mode,
- data->passed_mode, unsignedp);
+ insn = as_a <rtx_insn *> (
+ gen_extend_insn (op0, t, promoted_nominal_mode,
+ data->passed_mode, unsignedp));
emit_insn (insn);
insns = get_insns ();
if (! have_delay)
{
printf ("extern int num_delay_slots (rtx);\n");
- printf ("extern int eligible_for_delay (rtx, int, rtx, int);\n\n");
+ printf ("extern int eligible_for_delay (rtx_insn *, int, rtx_insn *, int);\n\n");
printf ("extern int const_num_delay_slots (rtx);\n\n");
have_delay = 1;
}
if (XVECEXP (desc, 1, i + 1) && ! have_annul_true)
{
printf ("#define ANNUL_IFTRUE_SLOTS\n");
- printf ("extern int eligible_for_annul_true (rtx, int, rtx, int);\n");
+ printf ("extern int eligible_for_annul_true (rtx_insn *, int, rtx_insn *, int);\n");
have_annul_true = 1;
}
if (XVECEXP (desc, 1, i + 2) && ! have_annul_false)
{
printf ("#define ANNUL_IFFALSE_SLOTS\n");
- printf ("extern int eligible_for_annul_false (rtx, int, rtx, int);\n");
+ printf ("extern int eligible_for_annul_false (rtx_insn *, int, rtx_insn *, int);\n");
have_annul_false = 1;
}
}
/* Write function prelude. */
fprintf (outf, "int\n");
- fprintf (outf, "eligible_for_%s (rtx delay_insn ATTRIBUTE_UNUSED, int slot, \n"
- " rtx candidate_insn, int flags ATTRIBUTE_UNUSED)\n",
+ fprintf (outf, "eligible_for_%s (rtx_insn *delay_insn ATTRIBUTE_UNUSED, int slot, \n"
+ " rtx_insn *candidate_insn, int flags ATTRIBUTE_UNUSED)\n",
kind);
fprintf (outf, "{\n");
- fprintf (outf, " rtx insn;\n");
+ fprintf (outf, " rtx_insn *insn;\n");
fprintf (outf, "\n");
fprintf (outf, " gcc_assert (slot < %d);\n", max_slots);
fprintf (outf, "\n");
FOR_EACH_BB_FN (bb, cfun)
{
rtx_insn *tail = BB_END (bb);
- rtx insn, reg;
+ rtx_insn *insn;
+ rtx reg;
while (tail && NOTE_P (tail) && tail != BB_HEAD (bb))
tail = PREV_INSN (tail);
/* There's a degenerate case we can handle - an empty loop consisting
of only a back branch. Handle that by deleting the branch. */
- insn = JUMP_LABEL (tail);
+ insn = JUMP_LABEL_AS_INSN (tail);
while (insn && !NONDEBUG_INSN_P (insn))
insn = NEXT_INSN (insn);
if (insn == tail)
rtx iter_reg;
/* The new label placed at the beginning of the loop. */
- rtx start_label;
+ rtx_insn *start_label;
/* The new label placed at the end of the loop. */
rtx end_label;
{
basic_block bb;
rtx_insn *insn;
- rtx set, src, dest, dest_death, p, q, note;
+ rtx set, src, dest, dest_death, q, note;
+ rtx_insn *p;
int sregno, dregno;
if (! flag_expensive_optimizations)
to update equiv info for register shuffles on the region borders
and for caller save/restore insns. */
void
-ira_update_equiv_info_by_shuffle_insn (int to_regno, int from_regno, rtx insns)
+ira_update_equiv_info_by_shuffle_insn (int to_regno, int from_regno, rtx_insn *insns)
{
- rtx insn, x, note;
+ rtx_insn *insn;
+ rtx x, note;
if (! ira_reg_equiv[from_regno].defined_p
&& (! ira_reg_equiv[to_regno].defined_p
Return 1 if MEMREF remains valid. */
static int
-validate_equiv_mem (rtx start, rtx reg, rtx memref)
+validate_equiv_mem (rtx_insn *start, rtx reg, rtx memref)
{
- rtx insn;
+ rtx_insn *insn;
rtx note;
equiv_mem = memref;
/* TRUE if some insn in the range (START, END] references a memory location
that would be affected by a store to MEMREF. */
static int
-memref_used_between_p (rtx memref, rtx start, rtx end)
+memref_used_between_p (rtx memref, rtx_insn *start, rtx_insn *end)
{
- rtx insn;
+ rtx_insn *insn;
for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
insn = NEXT_INSN (insn))
extern void ira_set_pseudo_classes (bool, FILE *);
extern void ira_implicitly_set_insn_hard_regs (HARD_REG_SET *);
extern void ira_expand_reg_equiv (void);
-extern void ira_update_equiv_info_by_shuffle_insn (int, int, rtx);
+extern void ira_update_equiv_info_by_shuffle_insn (int, int, rtx_insn *);
extern void ira_sort_regnos_for_alter_reg (int *, int, unsigned int *);
extern void ira_mark_allocation_change (int);
doloop_pat = doloop_seq;
if (INSN_P (doloop_pat))
{
- while (NEXT_INSN (doloop_pat) != NULL_RTX)
- doloop_pat = NEXT_INSN (doloop_pat);
- if (!JUMP_P (doloop_pat))
- doloop_pat = NULL_RTX;
+ rtx_insn *doloop_insn = as_a <rtx_insn *> (doloop_pat);
+ while (NEXT_INSN (doloop_insn) != NULL_RTX)
+ doloop_insn = NEXT_INSN (doloop_insn);
+ if (!JUMP_P (doloop_insn))
+ doloop_insn = NULL;
+ doloop_pat = doloop_insn;
}
if (! doloop_pat
insn output code.
This variable is defined in final.c. */
-extern rtx final_sequence;
+extern rtx_sequence *final_sequence;
/* The line number of the beginning of the current function. Various
md code needs this so that it can output relative linenumbers. */
if the replacement is rejected. */
static rtx
-peep2_attempt (basic_block bb, rtx insn, int match_len, rtx attempt)
+peep2_attempt (basic_block bb, rtx uncast_insn, int match_len, rtx_insn *attempt)
{
+ rtx_insn *insn = safe_as_a <rtx_insn *> (uncast_insn);
int i;
rtx_insn *last, *before_try, *x;
rtx eh_note, as_note;
- rtx old_insn, new_insn;
+ rtx old_insn;
+ rtx_insn *new_insn;
bool was_call = false;
/* If we are splitting an RTX_FRAME_RELATED_P insn, do not allow it to
static void
peephole2_optimize (void)
{
- rtx insn;
+ rtx_insn *insn;
bitmap live;
int i;
basic_block bb;
insn = BB_HEAD (bb);
for (;;)
{
- rtx attempt, head;
+ rtx_insn *attempt;
+ rtx head;
int match_len;
if (!past_end && !NONDEBUG_INSN_P (insn))
/* Match the peephole. */
head = peep2_insn_data[peep2_current].insn;
- attempt = peephole2_insns (PATTERN (head), head, &match_len);
+ attempt = safe_as_a <rtx_insn *> (
+ peephole2_insns (PATTERN (head), head, &match_len));
if (attempt != NULL)
{
rtx last = peep2_attempt (bb, head, match_len, attempt);
REGNO (SET_DEST (pat)));
emit_move_insn (new_dst, new_src);
- rtx insn = get_insns();
+ rtx_insn *insn = get_insns();
end_sequence ();
if (NEXT_INSN (insn))
return false;
#if defined (AUTO_INC_DEC)
int i;
#endif
- rtx x;
rtx_note *marker;
memset (spill_reg_rtx, 0, sizeof spill_reg_rtx);
if (n_reloads > 0)
{
rtx_insn *next = NEXT_INSN (insn);
- rtx p;
/* ??? PREV can get deleted by reload inheritance.
Work around this by emitting a marker note. */
fixup_eh_region_note (insn, prev, next);
/* Adjust the location of REG_ARGS_SIZE. */
- p = find_reg_note (insn, REG_ARGS_SIZE, NULL_RTX);
+ rtx p = find_reg_note (insn, REG_ARGS_SIZE, NULL_RTX);
if (p)
{
remove_note (insn, p);
we have generated are valid. If not, give an error
and delete them. */
if (asm_noperands (PATTERN (insn)) >= 0)
- for (p = NEXT_INSN (prev); p != next; p = NEXT_INSN (p))
+ for (rtx_insn *p = NEXT_INSN (prev);
+ p != next;
+ p = NEXT_INSN (p))
if (p != insn && INSN_P (p)
&& GET_CODE (PATTERN (p)) != USE
&& (recog_memoized (p) < 0
/* There may have been CLOBBER insns placed after INSN. So scan
between INSN and NEXT and use them to forget old reloads. */
- for (x = NEXT_INSN (insn); x != old_next; x = NEXT_INSN (x))
+ for (rtx_insn *x = NEXT_INSN (insn); x != old_next; x = NEXT_INSN (x))
if (NONJUMP_INSN_P (x) && GET_CODE (PATTERN (x)) == CLOBBER)
note_stores (PATTERN (x), forget_old_reloads_1, NULL);
rtx reload_reg = rld[i].reg_rtx;
enum machine_mode mode = GET_MODE (reload_reg);
int n = 0;
- rtx p;
+ rtx_insn *p;
for (p = PREV_INSN (old_next); p != prev; p = PREV_INSN (p))
{
if (TEST_HARD_REG_BIT (reg_reloaded_valid,
in_hard_regno))
{
- for (x = old_prev ? NEXT_INSN (old_prev) : insn;
+ for (rtx_insn *x = (old_prev ?
+ NEXT_INSN (old_prev) : insn);
x != old_next;
x = NEXT_INSN (x))
if (x == reg_reloaded_insn[in_hard_regno])
/* If a pseudo that got a hard register is auto-incremented,
we must purge records of copying it into pseudos without
hard registers. */
- for (x = REG_NOTES (insn); x; x = XEXP (x, 1))
+ for (rtx x = REG_NOTES (insn); x; x = XEXP (x, 1))
if (REG_NOTE_KIND (x) == REG_INC)
{
/* See if this pseudo reg was reloaded in this insn.
int k;
int n_occurrences;
int n_inherited = 0;
- rtx i1;
rtx substed;
unsigned regno;
int nregs;
n_occurrences += count_occurrences (PATTERN (insn),
eliminate_regs (substed, VOIDmode,
NULL_RTX), 0);
- for (i1 = reg_equiv_alt_mem_list (REGNO (reg)); i1; i1 = XEXP (i1, 1))
+ for (rtx i1 = reg_equiv_alt_mem_list (REGNO (reg)); i1; i1 = XEXP (i1, 1))
{
gcc_assert (!rtx_equal_p (XEXP (i1, 0), substed));
n_occurrences += count_occurrences (PATTERN (insn), XEXP (i1, 0), 0);
and we're within the same basic block, then the value can only
pass through the reload reg and end up here.
Otherwise, give up--return. */
- for (i1 = NEXT_INSN (output_reload_insn);
+ for (rtx_insn *i1 = NEXT_INSN (output_reload_insn);
i1 != insn; i1 = NEXT_INSN (i1))
{
if (NOTE_INSN_BASIC_BLOCK_P (i1))
static int mostly_true_jump (rtx);
static rtx get_branch_condition (rtx, rtx);
static int condition_dominates_p (rtx, rtx);
-static int redirect_with_delay_slots_safe_p (rtx, rtx, rtx);
-static int redirect_with_delay_list_safe_p (rtx, rtx, rtx);
+static int redirect_with_delay_slots_safe_p (rtx_insn *, rtx, rtx);
+static int redirect_with_delay_list_safe_p (rtx_insn *, rtx, rtx_insn_list *);
static int check_annul_list_true_false (int, rtx);
-static rtx_insn_list *steal_delay_list_from_target (rtx, rtx,
+static rtx_insn_list *steal_delay_list_from_target (rtx_insn *, rtx,
rtx_sequence *,
rtx_insn_list *,
struct resources *,
struct resources *,
int, int *, int *,
rtx_insn **);
-static rtx_insn_list *steal_delay_list_from_fallthrough (rtx, rtx,
+static rtx_insn_list *steal_delay_list_from_fallthrough (rtx_insn *, rtx,
rtx_sequence *,
rtx_insn_list *,
struct resources *,
struct resources *,
struct resources *,
int, int *, int *);
-static void try_merge_delay_insns (rtx, rtx);
-static rtx redundant_insn (rtx, rtx, rtx);
-static int own_thread_p (rtx, rtx, int);
+static void try_merge_delay_insns (rtx, rtx_insn *);
+static rtx redundant_insn (rtx, rtx_insn *, rtx);
+static int own_thread_p (rtx_insn *, rtx, int);
static void update_block (rtx, rtx);
static int reorg_redirect_jump (rtx, rtx);
static void update_reg_dead_notes (rtx, rtx);
int *, rtx_insn_list *);
static void fill_eager_delay_slots (void);
static void relax_delay_slots (rtx_insn *);
-static void make_return_insns (rtx);
+static void make_return_insns (rtx_insn *);
\f
/* A wrapper around next_active_insn which takes care to return ret_rtx
unchanged. */
any insns already in the delay slot of JUMP. */
static int
-redirect_with_delay_slots_safe_p (rtx jump, rtx newlabel, rtx seq)
+redirect_with_delay_slots_safe_p (rtx_insn *jump, rtx newlabel, rtx seq)
{
int flags, i;
- rtx pat = PATTERN (seq);
+ rtx_sequence *pat = as_a <rtx_sequence *> (PATTERN (seq));
/* Make sure all the delay slots of this jump would still
be valid after threading the jump. If they are still
valid, then return nonzero. */
flags = get_jump_flags (jump, newlabel);
- for (i = 1; i < XVECLEN (pat, 0); i++)
+ for (i = 1; i < pat->len (); i++)
if (! (
#ifdef ANNUL_IFFALSE_SLOTS
(INSN_ANNULLED_BRANCH_P (jump)
- && INSN_FROM_TARGET_P (XVECEXP (pat, 0, i)))
- ? eligible_for_annul_false (jump, i - 1,
- XVECEXP (pat, 0, i), flags) :
+ && INSN_FROM_TARGET_P (pat->insn (i)))
+ ? eligible_for_annul_false (jump, i - 1, pat->insn (i), flags) :
#endif
#ifdef ANNUL_IFTRUE_SLOTS
(INSN_ANNULLED_BRANCH_P (jump)
&& ! INSN_FROM_TARGET_P (XVECEXP (pat, 0, i)))
- ? eligible_for_annul_true (jump, i - 1,
- XVECEXP (pat, 0, i), flags) :
+ ? eligible_for_annul_true (jump, i - 1, pat->insn (i), flags) :
#endif
- eligible_for_delay (jump, i - 1, XVECEXP (pat, 0, i), flags)))
+ eligible_for_delay (jump, i - 1, pat->insn (i), flags)))
break;
- return (i == XVECLEN (pat, 0));
+ return (i == pat->len ());
}
/* Return nonzero if redirecting JUMP to NEWLABEL does not invalidate
any insns we wish to place in the delay slot of JUMP. */
static int
-redirect_with_delay_list_safe_p (rtx jump, rtx newlabel, rtx delay_list)
+redirect_with_delay_list_safe_p (rtx_insn *jump, rtx newlabel,
+ rtx_insn_list *delay_list)
{
int flags, i;
- rtx li;
+ rtx_insn_list *li;
/* Make sure all the insns in DELAY_LIST would still be
valid after threading the jump. If they are still
valid, then return nonzero. */
flags = get_jump_flags (jump, newlabel);
- for (li = delay_list, i = 0; li; li = XEXP (li, 1), i++)
+ for (li = delay_list, i = 0; li; li = li->next (), i++)
if (! (
#ifdef ANNUL_IFFALSE_SLOTS
(INSN_ANNULLED_BRANCH_P (jump)
- && INSN_FROM_TARGET_P (XEXP (li, 0)))
- ? eligible_for_annul_false (jump, i, XEXP (li, 0), flags) :
+ && INSN_FROM_TARGET_P (li->insn ()))
+ ? eligible_for_annul_false (jump, i, li->insn (), flags) :
#endif
#ifdef ANNUL_IFTRUE_SLOTS
(INSN_ANNULLED_BRANCH_P (jump)
&& ! INSN_FROM_TARGET_P (XEXP (li, 0)))
- ? eligible_for_annul_true (jump, i, XEXP (li, 0), flags) :
+ ? eligible_for_annul_true (jump, i, li->insn (), flags) :
#endif
- eligible_for_delay (jump, i, XEXP (li, 0), flags)))
+ eligible_for_delay (jump, i, li->insn (), flags)))
break;
return (li == NULL);
execution should continue. */
static rtx_insn_list *
-steal_delay_list_from_target (rtx insn, rtx condition, rtx_sequence *seq,
+steal_delay_list_from_target (rtx_insn *insn, rtx condition, rtx_sequence *seq,
rtx_insn_list *delay_list, struct resources *sets,
struct resources *needed,
struct resources *other_needed,
for INSN since unconditional branches are much easier to fill. */
static rtx_insn_list *
-steal_delay_list_from_fallthrough (rtx insn, rtx condition, rtx_sequence *seq,
+steal_delay_list_from_fallthrough (rtx_insn *insn, rtx condition,
+ rtx_sequence *seq,
rtx_insn_list *delay_list,
struct resources *sets,
struct resources *needed,
we delete the merged insn. */
static void
-try_merge_delay_insns (rtx insn, rtx thread)
+try_merge_delay_insns (rtx insn, rtx_insn *thread)
{
- rtx trial, next_trial;
- rtx delay_insn = XVECEXP (PATTERN (insn), 0, 0);
+ rtx_insn *trial, *next_trial;
+ rtx_insn *delay_insn = as_a <rtx_insn *> (XVECEXP (PATTERN (insn), 0, 0));
int annul_p = JUMP_P (delay_insn) && INSN_ANNULLED_BRANCH_P (delay_insn);
int slot_number = 1;
int num_slots = XVECLEN (PATTERN (insn), 0);
gain in rare cases. */
static rtx
-redundant_insn (rtx insn, rtx target, rtx delay_list)
+redundant_insn (rtx insn, rtx_insn *target, rtx delay_list)
{
rtx target_main = target;
rtx ipat = PATTERN (insn);
- rtx trial, pat;
+ rtx_insn *trial;
+ rtx pat;
struct resources needed, set;
int i;
unsigned insns_to_search;
finding an active insn, we do not own this thread. */
static int
-own_thread_p (rtx thread, rtx label, int allow_fallthrough)
+own_thread_p (rtx_insn *thread, rtx label, int allow_fallthrough)
{
- rtx active_insn;
- rtx insn;
+ rtx_insn *active_insn;
+ rtx_insn *insn;
/* We don't own the function end. */
if (thread == 0 || ANY_RETURN_P (thread))
the new label. */
static rtx_insn *
-get_label_before (rtx insn, rtx sibling)
+get_label_before (rtx_insn *insn, rtx sibling)
{
rtx_insn *label;
{
/* See comment in relax_delay_slots about necessity of using
next_real_insn here. */
- rtx new_label = next_real_insn (next_trial);
+ rtx_insn *new_label = next_real_insn (next_trial);
if (new_label != 0)
new_label = get_label_before (new_label, JUMP_LABEL (trial));
= fill_slots_from_thread (insn, const_true_rtx,
next_active_insn (JUMP_LABEL (insn)),
NULL, 1, 1,
- own_thread_p (JUMP_LABEL (insn),
- JUMP_LABEL (insn), 0),
+ own_thread_p (JUMP_LABEL_AS_INSN (insn),
+ JUMP_LABEL_AS_INSN (insn), 0),
slots_to_fill, &slots_filled,
delay_list);
delete_computation (insn);
}
-static rtx
+static rtx_insn *
label_before_next_insn (rtx x, rtx scan_limit)
{
- rtx insn = next_active_insn (x);
+ rtx_insn *insn = next_active_insn (x);
while (insn)
{
insn = PREV_INSN (insn);
if (insn == scan_limit || insn == NULL_RTX)
- return NULL_RTX;
+ return NULL;
if (LABEL_P (insn))
break;
}
/* Look at every JUMP_INSN and see if we can improve it. */
for (insn = first; insn; insn = next)
{
- rtx other;
+ rtx_insn *other;
bool crossing;
next = next_active_insn (insn);
{
/* Figure out where to emit the special USE insn so we don't
later incorrectly compute register live/death info. */
- rtx tmp = next_active_insn (trial);
+ rtx_insn *tmp = next_active_insn (trial);
if (tmp == 0)
tmp = find_end_label (simple_return_rtx);
RETURN as well. */
static void
-make_return_insns (rtx first)
+make_return_insns (rtx_insn *first)
{
- rtx insn, jump_insn, pat;
+ rtx_insn *insn;
+ rtx_insn *jump_insn;
rtx real_return_label = function_return_label;
rtx real_simple_return_label = function_simple_return_label;
int slots, i;
else
continue;
- pat = PATTERN (insn);
- jump_insn = XVECEXP (pat, 0, 0);
+ rtx_sequence *pat = as_a <rtx_sequence *> (PATTERN (insn));
+ jump_insn = pat->insn (0);
/* If we can't make the jump into a RETURN, try to redirect it to the best
RETURN and go on to the next insn. */
if (! (
#ifdef ANNUL_IFFALSE_SLOTS
(INSN_ANNULLED_BRANCH_P (jump_insn)
- && INSN_FROM_TARGET_P (XVECEXP (pat, 0, i)))
+ && INSN_FROM_TARGET_P (pat->insn (i)))
? eligible_for_annul_false (jump_insn, i - 1,
- XVECEXP (pat, 0, i), flags) :
+ pat->insn (i), flags) :
#endif
#ifdef ANNUL_IFTRUE_SLOTS
(INSN_ANNULLED_BRANCH_P (jump_insn)
- && ! INSN_FROM_TARGET_P (XVECEXP (pat, 0, i)))
+ && ! INSN_FROM_TARGET_P (pat->insn (i)))
? eligible_for_annul_true (jump_insn, i - 1,
- XVECEXP (pat, 0, i), flags) :
+ pat->insn (i), flags) :
#endif
eligible_for_delay (jump_insn, i - 1,
- XVECEXP (pat, 0, i), flags)))
+ pat->insn (i), flags)))
break;
}
else
insns for its delay slots, if it needs some. */
if (ANY_RETURN_P (PATTERN (jump_insn)))
{
- rtx prev = PREV_INSN (insn);
+ rtx_insn *prev = PREV_INSN (insn);
delete_related_insns (insn);
for (i = 1; i < XVECLEN (pat, 0); i++)
|| JUMP_TABLE_DATA_P (X) \
|| BARRIER_P (X) \
|| LABEL_P (X)) \
- && PREV_INSN (X) != NULL \
- && NEXT_INSN (PREV_INSN (X)) == X \
- ? PREV_INSN (X) : NULL)
+ && PREV_INSN (as_a <rtx_insn *> (X)) != NULL \
+ && NEXT_INSN (PREV_INSN (as_a <rtx_insn *> (X))) == X \
+ ? PREV_INSN (as_a <rtx_insn *> (X)) : NULL)
/* Define macros to access the `code' field of the rtx. */
and an lvalue form:
SET_NEXT_INSN/SET_PREV_INSN. */
-inline rtx_insn *PREV_INSN (const_rtx insn)
+inline rtx_insn *PREV_INSN (const rtx_insn *insn)
{
rtx prev = XEXP (insn, 0);
return safe_as_a <rtx_insn *> (prev);
return XEXP (insn, 0);
}
-inline rtx_insn *NEXT_INSN (const_rtx insn)
+inline rtx_insn *NEXT_INSN (const rtx_insn *insn)
{
rtx next = XEXP (insn, 1);
return safe_as_a <rtx_insn *> (next);
be decremented and possibly the label can be deleted. */
#define JUMP_LABEL(INSN) XCEXP (INSN, 7, JUMP_INSN)
-inline rtx_insn *JUMP_LABEL_AS_INSN (rtx_insn *insn)
+inline rtx_insn *JUMP_LABEL_AS_INSN (const rtx_insn *insn)
{
return safe_as_a <rtx_insn *> (JUMP_LABEL (insn));
}
extern int reg_mentioned_p (const_rtx, const_rtx);
extern int count_occurrences (const_rtx, const_rtx, int);
extern int reg_referenced_p (const_rtx, const_rtx);
-extern int reg_used_between_p (const_rtx, const_rtx, const_rtx);
+extern int reg_used_between_p (const_rtx, const rtx_insn *, const rtx_insn *);
extern int reg_set_between_p (const_rtx, const_rtx, const_rtx);
extern int commutative_operand_precedence (rtx);
extern bool swap_commutative_operands_p (rtx, rtx);
extern int modified_between_p (const_rtx, const_rtx, const_rtx);
-extern int no_labels_between_p (const_rtx, const_rtx);
+extern int no_labels_between_p (const rtx_insn *, const rtx_insn *);
extern int modified_in_p (const_rtx, const_rtx);
extern int reg_set_p (const_rtx, const_rtx);
extern rtx single_set_2 (const_rtx, const_rtx);
no CODE_LABEL insn. */
int
-no_labels_between_p (const_rtx beg, const_rtx end)
+no_labels_between_p (const rtx_insn *beg, const rtx_insn *end)
{
- rtx p;
+ rtx_insn *p;
if (beg == end)
return 0;
for (p = NEXT_INSN (beg); p != end; p = NEXT_INSN (p))
FROM_INSN and TO_INSN (exclusive of those two). */
int
-reg_used_between_p (const_rtx reg, const_rtx from_insn, const_rtx to_insn)
+reg_used_between_p (const_rtx reg, const rtx_insn *from_insn,
+ const rtx_insn *to_insn)
{
rtx_insn *insn;
FROM_INSN and TO_INSN (exclusive of those two). */
int
-reg_set_between_p (const_rtx reg, const_rtx from_insn, const_rtx to_insn)
+reg_set_between_p (const_rtx reg, const_rtx uncast_from_insn, const_rtx to_insn)
{
+ const rtx_insn *from_insn =
+ safe_as_a <const rtx_insn *> (uncast_from_insn);
const rtx_insn *insn;
if (from_insn == to_insn)
X contains a MEM; this routine does use memory aliasing. */
int
-modified_between_p (const_rtx x, const_rtx start, const_rtx end)
+modified_between_p (const_rtx x, const_rtx uncast_start, const_rtx end)
{
+ const rtx_insn *start =
+ safe_as_a <const rtx_insn *> (uncast_start);
const enum rtx_code code = GET_CODE (x);
const char *fmt;
int i, j;
label = JUMP_LABEL (insn);
if (label != NULL_RTX && !ANY_RETURN_P (label)
- && (table = NEXT_INSN (label)) != NULL_RTX
+ && (table = NEXT_INSN (as_a <rtx_insn *> (label))) != NULL_RTX
&& JUMP_TABLE_DATA_P (table))
{
if (labelp)
/* Find the proper seqno for inserting at INSN. Returns -1 if no predecessors
with positive seqno exist. */
int
-get_seqno_by_preds (rtx insn)
+get_seqno_by_preds (rtx_insn *insn)
{
basic_block bb = BLOCK_FOR_INSN (insn);
- rtx tmp = insn, head = BB_HEAD (bb);
+ rtx_insn *tmp = insn, *head = BB_HEAD (bb);
insn_t *preds;
int n, i, seqno;
void
clear_outdated_rtx_info (basic_block bb)
{
- rtx insn;
+ rtx_insn *insn;
FOR_BB_INSNS (bb, insn)
if (INSN_P (insn))
extern void free_succs_info (struct succs_info *);
extern bool sel_insn_has_single_succ_p (insn_t, int);
extern bool sel_num_cfg_preds_gt_1 (insn_t);
-extern int get_seqno_by_preds (rtx);
+extern int get_seqno_by_preds (rtx_insn *);
extern bool bb_ends_ebb_p (basic_block);
extern bool in_same_ebb_p (insn_t, insn_t);
{
basic_block bb;
int *regs_set_in_block;
- rtx insn, st;
+ rtx_insn *insn;
+ rtx_insn_list *st;
struct st_expr * ptr;
unsigned int max_gcse_regno = max_reg_num ();
for (ptr = first_st_expr (); ptr != NULL; ptr = next_st_expr (ptr))
{
- for (st = ptr->avail_stores; st != NULL; st = XEXP (st, 1))
+ for (st = ptr->avail_stores; st != NULL; st = st->next ())
{
- insn = XEXP (st, 0);
+ insn = st->insn ();
bb = BLOCK_FOR_INSN (insn);
/* If we've already seen an available expression in this block,
bitmap_set_bit (st_avloc[bb->index], ptr->index);
}
- for (st = ptr->antic_stores; st != NULL; st = XEXP (st, 1))
+ for (st = ptr->antic_stores; st != NULL; st = st->next ())
{
- insn = XEXP (st, 0);
+ insn = st->insn ();
bb = BLOCK_FOR_INSN (insn);
bitmap_set_bit (st_antloc[bb->index], ptr->index);
}
/* Returns estimate on cost of computing SEQ. */
static unsigned
-seq_cost (rtx seq, bool speed)
+seq_cost (rtx_insn *seq, bool speed)
{
unsigned cost = 0;
rtx set;
static unsigned
computation_cost (tree expr, bool speed)
{
- rtx seq, rslt;
+ rtx_insn *seq;
+ rtx rslt;
tree type = TREE_TYPE (expr);
unsigned cost;
/* Avoid using hard regs in ways which may be unsupported. */
HOST_WIDE_INT rat, off = 0;
int old_cse_not_expected, width;
unsigned sym_p, var_p, off_p, rat_p, add_c;
- rtx seq, addr, base;
+ rtx_insn *seq;
+ rtx addr, base;
rtx reg0, reg1;
data = (address_cost_data) xcalloc (1, sizeof (*data));
Phase 4: removal of "scaffolding": DONE
Phase 5: additional rtx_def subclasses: DONE
Phase 6: use extra rtx_def subclasses: IN PROGRESS
-
-TODO: "Scaffolding" to be removed
-=================================
-* SET_NEXT_INSN, SET_PREV_INSN