+2014-08-25 David Malcolm <dmalcolm@redhat.com>
+
+ * target.def (reorder): Strengthen param "ready" of this DEFHOOK
+ from rtx * to rtx_insn **.
+ (reorder2): Likewise.
+ (dependencies_evaluation_hook): Strengthen params "head", "tail"
+ from rtx to rtx_insn *.
+
+ * doc/tm.texi: Update mechanically for above change to target.def.
+
+ * sched-int.h (note_list): Strengthen this variable from rtx to
+ rtx_insn *.
+ (remove_notes): Likewise for both params.
+ (restore_other_notes): Likewise for return type and first param.
+ (struct ready_list): Strengthen field "vec" from rtx * to
+ rtx_insn **.
+ (struct dep_replacement): Strenghten field "insn" from rtx to
+ rtx_insn *.
+ (struct deps_desc): Likewise for fields "last_debug_insn",
+ "last_args_size".
+ (struct haifa_sched_info): Likewise for callback field
+ "can_schedule_ready_p"'s param, for first param of "new_ready"
+ callback field, for both params of "rank" callback field, for
+ first field of "print_insn" callback field (with a const), for
+ both params of "contributes_to_priority" callback, for param
+ of "insn_finishes_block_p" callback, for fields "prev_head",
+ "next_tail", "head", "tail", for first param of "add_remove_insn"
+ callback, for first param of "begin_schedule_ready" callback, for
+ both params of "begin_move_insn" callback, and for second param
+ of "advance_target_bb" callback.
+ (add_dependence): Likewise for params 1 and 2.
+ (sched_analyze): Likewise for params 2 and 3.
+ (deps_analyze_insn): Likewise for param 2.
+ (ready_element): Likewise for return type.
+ (ready_lastpos): Strengthen return type from rtx * to rtx_insn **.
+ (try_ready): Strenghten param from rtx to rtx_insn *.
+ (sched_emit_insn): Likewise for return type.
+ (record_delay_slot_pair): Likewise for params 1 and 2.
+ (add_delay_dependencies): Likewise for param.
+ (contributes_to_priority): Likewise for both params.
+ (find_modifiable_mems): Likewise.
+
+ * config/arm/arm.c (cortexa7_sched_reorder): Strengthen param
+ "ready" from rtx * to rtx_insn **. Strengthen locals "insn",
+ "first_older_only_insn" from rtx to rtx_insn *.
+ (arm_sched_reorder): Strengthen param "ready" from rtx * to
+ rtx_insn **.
+
+ * config/c6x/c6x.c (struct c6x_sched_context): Strengthen field
+ "last_scheduled_iter0" from rtx to rtx_insn *.
+ (init_sched_state): Replace use of NULL_RTX with NULL for insn.
+ (c6x_sched_reorder_1): Strengthen param "ready" and locals
+ "e_ready", "insnp" from rtx * to rtx_insn **. Strengthen local
+ "insn" from rtx to rtx_insn *.
+ (c6x_sched_reorder): Strengthen param "ready" from rtx * to
+ rtx_insn **.
+ (c6x_sched_reorder2): Strengthen param "ready" and locals
+ "e_ready", "insnp" from rtx * to rtx_insn **. Strengthen local
+ "insn" from rtx to rtx_insn *.
+ (c6x_variable_issue): Add a checked cast when assigning from insn
+ to ss.last_scheduled_iter0.
+ (split_delayed_branch): Strengthen param "insn" and local "i1"
+ from rtx to rtx_insn *.
+ (split_delayed_nonbranch): Likewise.
+ (undo_split_delayed_nonbranch): Likewise for local "insn".
+ (hwloop_optimize): Likewise for locals "seq", "insn", "prev",
+ "entry_after", "end_packet", "head_insn", "tail_insn",
+ "new_insns", "last_insn", "this_iter", "prev_stage_insn".
+ Strengthen locals "orig_vec", "copies", "insn_copies" from rtx *
+ to rtx_insn **. Remove now-redundant checked cast on last_insn,
+ but add a checked cast on loop->start_label. Consolidate calls to
+ avoid assigning result of gen_spkernel to "insn", now an
+ rtx_insn *.
+
+ * config/i386/i386.c (do_reorder_for_imul): Strengthen param
+ "ready" from rtx * to rtx_insn **. Strengthen local "insn" from
+ rtx to rtx_insn *.
+ (swap_top_of_ready_list): Strengthen param "ready" from rtx * to
+ rtx_insn **. Strengthen locals "top", "next" from rtx to
+ rtx_insn *.
+ (ix86_sched_reorder): Strengthen param "ready" from rtx * to
+ rtx_insn **. Strengthen local "insn" from rtx to rtx_insn *.
+ (add_parameter_dependencies): Strengthen params "call", "head" and
+ locals "insn", "last", "first_arg" from rtx to rtx_insn *.
+ (avoid_func_arg_motion): Likewise for params "first_arg", "insn".
+ (add_dependee_for_func_arg): Likewise for param "arg" and local
+ "insn".
+ (ix86_dependencies_evaluation_hook): Likewise for params "head",
+ "tail" and locals "insn", "first_arg".
+
+ * config/ia64/ia64.c (ia64_dependencies_evaluation_hook): Likewise
+ for params "head", "tail" and locals "insn", "next", "next_tail".
+ (ia64_dfa_sched_reorder): Strengthen param "ready" and locals
+ "e_ready", "insnp" from rtx * to rtx_insn **. Strengthen locals
+ "insn", "lowest", "highest" from rtx to rtx_insn *.
+ (ia64_sched_reorder): Strengthen param "ready" from rtx * to
+ rtx_insn **.
+ (ia64_sched_reorder2): Likewise.
+
+ * config/mep/mep.c (mep_find_ready_insn): Strengthen return type
+ and local "insn" from rtx to rtx_insn *. Strengthen param "ready"
+ from rtx * to rtx_insn **.
+ (mep_move_ready_insn): Strengthen param "ready" from rtx * to
+ rtx_insn **.
+ (mep_print_sched_insn): Strengthen param "insn" from rtx to
+ rtx_insn *.
+ (mep_sched_reorder): Strengthen param "ready" from rtx * to
+ rtx_insn **. Strengthen locals "core_insn", "cop_insn" from rtx
+ to rtx_insn *.
+
+ * config/mips/mips.c (mips_promote_ready): Strengthen param "ready"
+ from rtx * to rtx_insn **. Strengthen local "new_head" from rtx
+ to rtx_insn *.
+ (mips_maybe_swap_ready): Strengthen param "ready" from rtx * to
+ rtx_insn **. Strengthen local "temp" from rtx to rtx_insn *.
+ (mips_macc_chains_reorder): Strengthen param "ready" from rtx * to
+ rtx_insn **.
+ (vr4130_reorder): Likewise.
+ (mips_74k_agen_reorder): Likewise. Strengthen local "insn" from
+ rtx to rtx_insn *.
+ (mips_sched_reorder_1): Strengthen param "ready" from rtx * to
+ rtx_insn **.
+ (mips_sched_reorder): Likewise.
+ (mips_sched_reorder2): Likewise.
+
+ * config/picochip/picochip.c (picochip_sched_reorder): Likewise.
+
+ * config/rs6000/rs6000.c (rs6000_sched_reorder): Likewise.
+ Strengthen local "tmp" from rtx to rtx_insn *.
+ (rs6000_sched_reorder2): Likewise.
+
+ * config/s390/s390.c (s390_z10_prevent_earlyload_conflicts):
+ Likewise. Update sizeof(rtx) to sizeof(rtx_insn *) in memmove.
+ (s390_sched_reorder): Strengthen param "ready" from rtx * to
+ rtx_insn **. Strengthen local "tmp" from rtx to rtx_insn *.
+
+ * config/sh/sh.c (rank_for_reorder): Strengthen locals "tmp",
+ "tmp2" from rtx to rtx_insn *.
+ (swap_reorder): Strengthen param "a" from rtx * to rtx_insn **.
+ Strengthen local "insn" from rtx to rtx_insn *.
+ (ready_reorder): Strengthen param "ready" from rtx * to
+ rtx_insn **. Update sizeof(rtx) to sizeof(rtx_insn *) in qsort.
+ (sh_reorder): Strengthen param "ready" from rtx * to rtx_insn **.
+ (sh_reorder2): Likewise.
+
+ * config/spu/spu.c (spu_sched_reorder): Likewise. Strengthen
+ local "insn" from rtx to rtx_insn *.
+
+ * haifa-sched.c (note_list): Strengthen this variable from rtx to
+ rtx_insn *.
+ (scheduled_insns): Strengthen this variable from vec<rtx> to
+ vec<rtx_insn *>.
+ (set_modulo_params): Likewise for locals "i1", "i2".
+ (record_delay_slot_pair): Likewise for params "i1", "i2".
+ (add_delay_dependencies): Likewise for param "insn".
+ (cond_clobbered_p): Likewise.
+ (recompute_todo_spec): Likewise for local "prev".
+ (last_scheduled_insn): Likewise for this variable.
+ (nonscheduled_insns_begin): Likewise.
+ (model_set_excess_costs): Strengthen param "insns" from rtx * to
+ rtx_insn **.
+ (rank_for_schedule): Strengthen locals "tmp", "tmp2" from rtx to
+ rtx_insn *.
+ (swap_sort): Strengthen param "a" from rtx * to rtx_insn **.
+ Strengthen local "insn" from rtx to rtx_insn *.
+ (queue_insn): Strengthen param "insn" from rtx to rtx_insn *.
+ (ready_lastpos): Strengthen return type from rtx * to rtx_insn **.
+ (ready_add): Strengthen param "insn" from rtx to rtx_insn *.
+ (ready_remove_first): Likewise for return type and local "t".
+ (ready_element): Likewise for return type.
+ (ready_remove): Likewise for return type and local "t".
+ (ready_sort): Strengthen local "first" from rtx * to rtx_insn **.
+ (check_clobbered_conditions): Strengthen local "x" from rtx to
+ rtx_insn *, adding a checked cast.
+ (schedule_insn): Likewise for param "insn".
+ (remove_notes): Likewise for params "head", "tail" and locals
+ "next_tail", "insn", "next".
+ (struct haifa_saved_data): Likewise for fields
+ "last_scheduled_insn", "nonscheduled_insns_begin".
+ (save_backtrack_point): Update for change to field "vec" of
+ struct ready_list.
+ (toggle_cancelled_flags): Strengthen local "first" from rtx * to
+ rtx_insn **.
+ (restore_last_backtrack_point): Likewise. Strengthen local "insn"
+ from rtx to rtx_insn *
+ (resolve_dependencies): Strengthen param "insn" from rtx to
+ rtx_insn *
+ (restore_other_notes): Likewise for return type, for param "head"
+ and local "note_head".
+ (undo_all_replacements): Likewise for local "insn".
+ (first_nonscheduled_insn): Likewise for return type and local "insn".
+ (queue_to_ready): Likewise for local "insn", adding checked casts.
+ (early_queue_to_ready): Likewise for local "insn".
+ (debug_ready_list_1): Strengthen local "p" from rtx * to
+ rtx_insn **.
+ (move_insn): Strengthen param "insn" and local "note" from rtx to
+ rtx_insn *
+ (insn_finishes_cycle_p): Likewise for param "insn".
+ (max_issue): Likewise for local "insn".
+ (choose_ready): Likewise. Strengthen param "insn_ptr" from rtx *
+ to rtx_insn **.
+ (commit_schedule): Strengthen param "prev_head" and local "insn"
+ from rtx to rtx_insn *
+ (prune_ready_list): Likewise for local "insn".
+ (schedule_block): Likewise for locals "prev_head", "head", "tail",
+ "skip_insn", "insn", "failed_insn", "x", adding a checked cast.
+ (set_priorities): Likewise for local "prev_head".
+ (try_ready): Likewise for param "next".
+ (fix_tick_ready): Likewise.
+ (change_queue_index): Likewise.
+ (sched_extend_ready_list): Update for change to field "vec" of
+ struct ready_list.
+ (generate_recovery_code): Strengthen param "insn" from rtx to
+ rtx_insn *.
+ (begin_speculative_block): Likewise.
+ (create_check_block_twin): Likewise for param "insn" and locals
+ "label", "check", "twin". Introduce local "check_pat" to avoid
+ "check" being used as a plain rtx before being used as an insn.
+ (fix_recovery_deps): Add a checked cast to rtx_insn * when
+ extracting elements from ready_list.
+ (sched_remove_insn): Strengthen param "insn" from rtx to
+ rtx_insn *.
+ (sched_emit_insn): Likewise for return type.
+ (ready_remove_first_dispatch): Likewise for return type and local
+ "insn".
+
+ * hw-doloop.c (discover_loop): Add a checked cast to rtx_insn *.
+
+ * modulo-sched.c (sms_print_insn): Strengthen from const_rtx to
+ const rtx_insn *.
+
+ * sched-deps.c (add_dependence): Strengthen params "con", "pro"
+ from rtx to rtx_insn *.
+ (add_dependence_list): Likewise for param "insn". Add a checked
+ cast.
+ (add_dependence_list_and_free): Strengthen param "insn" from rtx
+ to rtx_insn *. Strengthen param "list_p" from rtx * to
+ rtx_insn **.
+ (chain_to_prev_insn): Strengthen param "insn" and locals
+ "prec_nonnote", "i" from rtx to rtx_insn *.
+ (flush_pending_lists): Likewise for param "insn".
+ (cur_insn): Likewise for this variable.
+ (haifa_start_insn): Add a checked cast.
+ (note_dep): Strengthen param "e" from rtx to rtx_insn *.
+ (sched_analyze_reg): Likewise for param "insn".
+ (sched_analyze_1): Likewise.
+ (sched_analyze_2): Likewise. Add checked casts.
+ (sched_analyze_insn): Likewise. Also for local "prev".
+ (deps_analyze_insn): Likewise for param "insn".
+ (sched_analyze): Likewise for params "head", "tail" and local "insn".
+ (add_dependence_1): Likewise for params "insn", "elem".
+ (struct mem_inc_info): Likewise for fields "inc_insn", "mem_insn".
+ (parse_add_or_inc): Likewise for param "insn".
+ (find_inc): Likewise for local "inc_cand".
+ (find_modifiable_mems): Likewise for params "head", "tail" and
+ locals "insn", "next_tail".
+
+ * sched-ebb.c (init_ready_list): Likewise for local "insn".
+ (begin_schedule_ready): Likewise for param "insn".
+ (begin_move_insn): Likewise for params "insn" and "last".
+ (ebb_print_insn): Strengthen param "insn" from const_rtx to
+ const rtx_insn *.
+ (rank): Strengthen params "insn1", "insn2" from rtx to rtx_insn *.
+ (ebb_contributes_to_priority): Likewise for params "next", "insn".
+ (ebb_add_remove_insn): Likewise for param "insn".
+ (advance_target_bb): Likewise.
+
+ * sched-rgn.c (rgn_estimate_number_of_insns): Likewise for local
+ "insn".
+ (check_live): Likewise for param "insn".
+ (init_ready_list): Likewise for local "insn".
+ (can_schedule_ready_p): Likewise for param "insn".
+ (begin_schedule_ready): Likewise.
+ (new_ready): Likewise for param "next".
+ (rgn_print_insn): Likewise for param "insn".
+ (rgn_rank): Likewise for params "insn1", "insn2".
+ (contributes_to_priority): Likewise for params "next", "insn".
+ (rgn_insn_finishes_block_p): Likewise for param "insn".
+ (add_branch_dependences): Likewise for params "head", "tail" and
+ locals "insn", "last".
+ (rgn_add_remove_insn): Likewise for param "insn".
+ (advance_target_bb): Likewise.
+
+ * sel-sched-dump.c (sel_print_insn): Strengthen param "insn" from
+ const_rtx to const rtx_insn *.
+
+ * sel-sched-dump.h (sel_print_insn): Likewise.
+
+ * sel-sched-ir.c (advance_deps_context): Add a checked cast.
+ (deps_init_id): Likewise.
+
+ * sel-sched.c (convert_vec_av_set_to_ready): Likewise.
+ (invoke_reorder_hooks): Strengthen local "arr" from rtx * to
+ rtx_insn **.
+
2014-08-25 David Malcolm <dmalcolm@redhat.com>
* output.h (final_start_function): Strengthen param 1 from rtx to
static int arm_comp_type_attributes (const_tree, const_tree);
static void arm_set_default_type_attributes (tree);
static int arm_adjust_cost (rtx, rtx, rtx, int);
-static int arm_sched_reorder (FILE *, int, rtx *, int *, int);
+static int arm_sched_reorder (FILE *, int, rtx_insn **, int *, int);
static int optimal_immediate_sequence (enum rtx_code code,
unsigned HOST_WIDE_INT val,
struct four_ints *return_sequence);
instructions. This heuristic may affect dual issue opportunities
in the current cycle. */
static void
-cortexa7_sched_reorder (FILE *file, int verbose, rtx *ready, int *n_readyp,
- int clock)
+cortexa7_sched_reorder (FILE *file, int verbose, rtx_insn **ready,
+ int *n_readyp, int clock)
{
int i;
int first_older_only = -1, first_younger = -1;
older. */
for (i = *n_readyp - 1; i >= 0; i--)
{
- rtx insn = ready[i];
+ rtx_insn *insn = ready[i];
if (cortexa7_older_only (insn))
{
first_older_only = i;
fprintf (file, ";; cortexa7_sched_reorder insn %d before %d\n",
INSN_UID(ready [first_older_only]),
INSN_UID(ready [first_younger]));
- rtx first_older_only_insn = ready [first_older_only];
+ rtx_insn *first_older_only_insn = ready [first_older_only];
for (i = first_older_only; i < first_younger; i++)
{
ready[i] = ready[i+1];
/* Implement TARGET_SCHED_REORDER. */
static int
-arm_sched_reorder (FILE *file, int verbose, rtx *ready, int *n_readyp,
+arm_sched_reorder (FILE *file, int verbose, rtx_insn **ready, int *n_readyp,
int clock)
{
switch (arm_tune)
/* The following variable value is the last issued insn. */
rtx last_scheduled_insn;
/* The last issued insn that isn't a shadow of another. */
- rtx last_scheduled_iter0;
+ rtx_insn *last_scheduled_iter0;
/* The following variable value is DFA state before issuing the
first insn in the current clock cycle. We do not use this member
init_sched_state (c6x_sched_context_t sc)
{
sc->last_scheduled_insn = NULL_RTX;
- sc->last_scheduled_iter0 = NULL_RTX;
+ sc->last_scheduled_iter0 = NULL;
sc->issued_this_cycle = 0;
memset (sc->jump_cycles, 0, sizeof sc->jump_cycles);
memset (sc->jump_cond, 0, sizeof sc->jump_cond);
number of non-unsafe insns. */
static int
-c6x_sched_reorder_1 (rtx *ready, int *pn_ready, int clock_var)
+c6x_sched_reorder_1 (rtx_insn **ready, int *pn_ready, int clock_var)
{
int n_ready = *pn_ready;
- rtx *e_ready = ready + n_ready;
- rtx *insnp;
+ rtx_insn **e_ready = ready + n_ready;
+ rtx_insn **insnp;
int first_jump;
/* Keep track of conflicts due to a limit number of register accesses,
for (insnp = ready; insnp < e_ready; insnp++)
{
- rtx insn = *insnp;
+ rtx_insn *insn = *insnp;
int icode = recog_memoized (insn);
bool is_asm = (icode < 0
&& (GET_CODE (PATTERN (insn)) == ASM_INPUT
for (insnp = ready; insnp < e_ready; insnp++)
{
- rtx insn = *insnp;
+ rtx_insn *insn = *insnp;
int icode = recog_memoized (insn);
bool is_asm = (icode < 0
&& (GET_CODE (PATTERN (insn)) == ASM_INPUT
static int
c6x_sched_reorder (FILE *dump ATTRIBUTE_UNUSED,
int sched_verbose ATTRIBUTE_UNUSED,
- rtx *ready ATTRIBUTE_UNUSED,
+ rtx_insn **ready ATTRIBUTE_UNUSED,
int *pn_ready ATTRIBUTE_UNUSED, int clock_var)
{
ss.curr_sched_clock = clock_var;
static int
c6x_sched_reorder2 (FILE *dump ATTRIBUTE_UNUSED,
int sched_verbose ATTRIBUTE_UNUSED,
- rtx *ready ATTRIBUTE_UNUSED,
+ rtx_insn **ready ATTRIBUTE_UNUSED,
int *pn_ready ATTRIBUTE_UNUSED, int clock_var)
{
/* FIXME: the assembler rejects labels inside an execute packet.
&& get_attr_type (ss.last_scheduled_insn) == TYPE_ATOMIC))
{
int n_ready = *pn_ready;
- rtx *e_ready = ready + n_ready;
- rtx *insnp;
+ rtx_insn **e_ready = ready + n_ready;
+ rtx_insn **insnp;
for (insnp = ready; insnp < e_ready; insnp++)
{
- rtx insn = *insnp;
+ rtx_insn *insn = *insnp;
if (!shadow_p (insn))
{
memmove (ready + 1, ready, (insnp - ready) * sizeof (rtx));
{
ss.last_scheduled_insn = insn;
if (INSN_UID (insn) < sploop_max_uid_iter0 && !JUMP_P (insn))
- ss.last_scheduled_iter0 = insn;
+ ss.last_scheduled_iter0 = as_a <rtx_insn *> (insn);
if (GET_CODE (PATTERN (insn)) != USE && GET_CODE (PATTERN (insn)) != CLOBBER)
ss.issued_this_cycle++;
if (insn_info.exists ())
/* If possible, split INSN, which we know is either a jump or a call, into a real
insn and its shadow. */
static void
-split_delayed_branch (rtx insn)
+split_delayed_branch (rtx_insn *insn)
{
int code = recog_memoized (insn);
- rtx i1, newpat;
+ rtx_insn *i1;
+ rtx newpat;
rtx pat = PATTERN (insn);
if (GET_CODE (pat) == COND_EXEC)
with the possibility. Currently we handle loads and most mpy2 and
mpy4 insns. */
static bool
-split_delayed_nonbranch (rtx insn)
+split_delayed_nonbranch (rtx_insn *insn)
{
int code = recog_memoized (insn);
enum attr_type type;
- rtx i1, newpat, src, dest;
+ rtx_insn *i1;
+ rtx newpat, src, dest;
rtx pat = PATTERN (insn);
rtvec rtv;
int delay;
static void
split_delayed_insns (void)
{
- rtx insn;
+ rtx_insn *insn;
for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
{
if (JUMP_P (insn) || CALL_P (insn))
hwloop_optimize (hwloop_info loop)
{
basic_block entry_bb, bb;
- rtx seq, insn, prev, entry_after, end_packet;
- rtx head_insn, tail_insn, new_insns, last_insn;
+ rtx_insn *seq, *insn, *prev, *entry_after, *end_packet;
+ rtx_insn *head_insn, *tail_insn, *new_insns, *last_insn;
int loop_earliest;
int n_execute_packets;
edge entry_edge;
unsigned ix;
int max_uid_before, delayed_splits;
int i, sp_ii, min_ii, max_ii, max_parallel, n_insns, n_real_insns, stages;
- rtx *orig_vec;
- rtx *copies;
- rtx **insn_copies;
+ rtx_insn **orig_vec;
+ rtx_insn **copies;
+ rtx_insn ***insn_copies;
if (!c6x_flag_modulo_sched || !c6x_flag_schedule_insns2
|| !TARGET_INSNS_64PLUS)
if (NONDEBUG_INSN_P (insn) && insn != loop->loop_end)
n_real_insns++;
}
- orig_vec = XNEWVEC (rtx, n_insns);
+ orig_vec = XNEWVEC (rtx_insn *, n_insns);
n_insns = 0;
FOR_BB_INSNS (bb, insn)
orig_vec[n_insns++] = insn;
to handle. */
max_parallel = loop_earliest / min_ii + 1;
- copies = XCNEWVEC (rtx, (max_parallel + 1) * n_real_insns);
- insn_copies = XNEWVEC (rtx *, max_parallel + 1);
+ copies = XCNEWVEC (rtx_insn *, (max_parallel + 1) * n_real_insns);
+ insn_copies = XNEWVEC (rtx_insn **, max_parallel + 1);
for (i = 0; i < max_parallel + 1; i++)
insn_copies[i] = copies + i * n_real_insns;
for (i = 0; i < max_parallel; i++)
{
int j;
- rtx this_iter;
+ rtx_insn *this_iter;
this_iter = duplicate_insn_chain (head_insn, tail_insn);
j = 0;
while (this_iter)
{
- rtx prev_stage_insn = insn_copies[i][j];
+ rtx_insn *prev_stage_insn = insn_copies[i][j];
gcc_assert (INSN_CODE (this_iter) == INSN_CODE (prev_stage_insn));
if (INSN_CODE (this_iter) >= 0
&& (get_attr_type (this_iter) == TYPE_LOAD_SHADOW
|| get_attr_type (this_iter) == TYPE_MULT_SHADOW))
{
- rtx prev = PREV_INSN (this_iter);
+ rtx_insn *prev = PREV_INSN (this_iter);
record_delay_slot_pair (prev, this_iter,
get_attr_cycles (prev) - 1, 0);
}
schedule_ebbs_init ();
set_modulo_params (sp_ii, max_parallel, n_real_insns,
sploop_max_uid_iter0);
- tmp_bb = schedule_ebb (BB_HEAD (bb),
- safe_as_a <rtx_insn *> (last_insn),
- true);
+ tmp_bb = schedule_ebb (BB_HEAD (bb), last_insn, true);
schedule_ebbs_finish ();
if (tmp_bb)
/* Compute the number of execute packets the pipelined form of the loop will
require. */
- prev = NULL_RTX;
+ prev = NULL;
n_execute_packets = 0;
- for (insn = loop->start_label; insn != loop->loop_end; insn = NEXT_INSN (insn))
+ for (insn = as_a <rtx_insn *> (loop->start_label);
+ insn != loop->loop_end;
+ insn = NEXT_INSN (insn))
{
if (NONDEBUG_INSN_P (insn) && GET_MODE (insn) == TImode
&& !shadow_p (insn))
spot. */
PUT_MODE (end_packet, VOIDmode);
- insn = gen_spkernel (GEN_INT (stages - 1),
- const0_rtx, JUMP_LABEL (loop->loop_end));
- insn = emit_jump_insn_before (insn, end_packet);
+ insn = emit_jump_insn_before (
+ gen_spkernel (GEN_INT (stages - 1),
+ const0_rtx, JUMP_LABEL (loop->loop_end)),
+ end_packet);
JUMP_LABEL (insn) = JUMP_LABEL (loop->loop_end);
insn_set_clock (insn, loop_earliest);
PUT_MODE (insn, TImode);
ready list.
Return index of IMUL producer if it was found and -1 otherwise. */
static int
-do_reorder_for_imul (rtx *ready, int n_ready)
+do_reorder_for_imul (rtx_insn **ready, int n_ready)
{
- rtx insn, set, insn1, insn2;
+ rtx_insn *insn;
+ rtx set, insn1, insn2;
sd_iterator_def sd_it;
dep_t dep;
int index = -1;
scheduled earlier. Applied for Silvermont only.
Return true if top 2 insns must be interchanged. */
static bool
-swap_top_of_ready_list (rtx *ready, int n_ready)
+swap_top_of_ready_list (rtx_insn **ready, int n_ready)
{
- rtx top = ready[n_ready - 1];
- rtx next = ready[n_ready - 2];
+ rtx_insn *top = ready[n_ready - 1];
+ rtx_insn *next = ready[n_ready - 2];
rtx set;
sd_iterator_def sd_it;
dep_t dep;
/* Perform possible reodering of ready list for Atom/Silvermont only.
Return issue rate. */
static int
-ix86_sched_reorder (FILE *dump, int sched_verbose, rtx *ready, int *pn_ready,
- int clock_var)
+ix86_sched_reorder (FILE *dump, int sched_verbose, rtx_insn **ready,
+ int *pn_ready, int clock_var)
{
int issue_rate = -1;
int n_ready = *pn_ready;
int i;
- rtx insn;
+ rtx_insn *insn;
int index = -1;
/* Set up issue rate. */
/* Add output dependencies for chain of function adjacent arguments if only
there is a move to likely spilled HW register. Return first argument
if at least one dependence was added or NULL otherwise. */
-static rtx
-add_parameter_dependencies (rtx call, rtx head)
+static rtx_insn *
+add_parameter_dependencies (rtx_insn *call, rtx_insn *head)
{
- rtx insn;
- rtx last = call;
- rtx first_arg = NULL;
+ rtx_insn *insn;
+ rtx_insn *last = call;
+ rtx_insn *first_arg = NULL;
bool is_spilled = false;
head = PREV_INSN (head);
/* Add output or anti dependency from insn to first_arg to restrict its code
motion. */
static void
-avoid_func_arg_motion (rtx first_arg, rtx insn)
+avoid_func_arg_motion (rtx_insn *first_arg, rtx_insn *insn)
{
rtx set;
rtx tmp;
/* Avoid cross block motion of function argument through adding dependency
from the first non-jump instruction in bb. */
static void
-add_dependee_for_func_arg (rtx arg, basic_block bb)
+add_dependee_for_func_arg (rtx_insn *arg, basic_block bb)
{
- rtx insn = BB_END (bb);
+ rtx_insn *insn = BB_END (bb);
while (insn)
{
/* Hook for pre-reload schedule - avoid motion of function arguments
passed in likely spilled HW registers. */
static void
-ix86_dependencies_evaluation_hook (rtx head, rtx tail)
+ix86_dependencies_evaluation_hook (rtx_insn *head, rtx_insn *tail)
{
- rtx insn;
- rtx first_arg = NULL;
+ rtx_insn *insn;
+ rtx_insn *first_arg = NULL;
if (reload_completed)
return;
while (head != tail && DEBUG_INSN_P (head))
static int emitted_frame_related_regs[number_of_ia64_frame_regs];
\f
static int ia64_first_cycle_multipass_dfa_lookahead (void);
-static void ia64_dependencies_evaluation_hook (rtx, rtx);
+static void ia64_dependencies_evaluation_hook (rtx_insn *, rtx_insn *);
static void ia64_init_dfa_pre_cycle_insn (void);
static rtx ia64_dfa_pre_cycle_insn (void);
static int ia64_first_cycle_multipass_dfa_lookahead_guard (rtx, int);
static void ia64_sched_init_global (FILE *, int, int);
static void ia64_sched_finish_global (FILE *, int);
static void ia64_sched_finish (FILE *, int);
-static int ia64_dfa_sched_reorder (FILE *, int, rtx *, int *, int, int);
-static int ia64_sched_reorder (FILE *, int, rtx *, int *, int);
-static int ia64_sched_reorder2 (FILE *, int, rtx *, int *, int);
+static int ia64_dfa_sched_reorder (FILE *, int, rtx_insn **, int *, int, int);
+static int ia64_sched_reorder (FILE *, int, rtx_insn **, int *, int);
+static int ia64_sched_reorder2 (FILE *, int, rtx_insn **, int *, int);
static int ia64_variable_issue (FILE *, int, rtx, int);
static void ia64_asm_unwind_emit (FILE *, rtx);
`ia64_produce_address_p' and the DFA descriptions). */
static void
-ia64_dependencies_evaluation_hook (rtx head, rtx tail)
+ia64_dependencies_evaluation_hook (rtx_insn *head, rtx_insn *tail)
{
- rtx insn, next, next_tail;
+ rtx_insn *insn, *next, *next_tail;
/* Before reload, which_alternative is not set, which means that
ia64_safe_itanium_class will produce wrong results for (at least)
Override the default sort algorithm to better slot instructions. */
static int
-ia64_dfa_sched_reorder (FILE *dump, int sched_verbose, rtx *ready,
+ia64_dfa_sched_reorder (FILE *dump, int sched_verbose, rtx_insn **ready,
int *pn_ready, int clock_var,
int reorder_type)
{
int n_asms;
int n_ready = *pn_ready;
- rtx *e_ready = ready + n_ready;
- rtx *insnp;
+ rtx_insn **e_ready = ready + n_ready;
+ rtx_insn **insnp;
if (sched_verbose)
fprintf (dump, "// ia64_dfa_sched_reorder (type %d):\n", reorder_type);
for (insnp = ready; insnp < e_ready; insnp++)
if (insnp < e_ready)
{
- rtx insn = *insnp;
+ rtx_insn *insn = *insnp;
enum attr_type t = ia64_safe_type (insn);
if (t == TYPE_UNKNOWN)
{
if (GET_CODE (PATTERN (insn)) == ASM_INPUT
|| asm_noperands (PATTERN (insn)) >= 0)
{
- rtx lowest = ready[n_asms];
+ rtx_insn *lowest = ready[n_asms];
ready[n_asms] = insn;
*insnp = lowest;
n_asms++;
}
else
{
- rtx highest = ready[n_ready - 1];
+ rtx_insn *highest = ready[n_ready - 1];
ready[n_ready - 1] = insn;
*insnp = highest;
return 1;
while (insnp-- > ready + deleted)
while (insnp >= ready + deleted)
{
- rtx insn = *insnp;
+ rtx_insn *insn = *insnp;
if (! safe_group_barrier_needed (insn))
break;
memmove (ready + 1, ready, (insnp - ready) * sizeof (rtx));
while (insnp-- > ready + moved)
while (insnp >= ready + moved)
{
- rtx insn = *insnp;
+ rtx_insn *insn = *insnp;
if (! is_load_p (insn))
break;
memmove (ready + 1, ready, (insnp - ready) * sizeof (rtx));
the default sort algorithm to better slot instructions. */
static int
-ia64_sched_reorder (FILE *dump, int sched_verbose, rtx *ready, int *pn_ready,
- int clock_var)
+ia64_sched_reorder (FILE *dump, int sched_verbose, rtx_insn **ready,
+ int *pn_ready, int clock_var)
{
return ia64_dfa_sched_reorder (dump, sched_verbose, ready,
pn_ready, clock_var, 0);
static int
ia64_sched_reorder2 (FILE *dump ATTRIBUTE_UNUSED,
- int sched_verbose ATTRIBUTE_UNUSED, rtx *ready,
+ int sched_verbose ATTRIBUTE_UNUSED, rtx_insn **ready,
int *pn_ready, int clock_var)
{
return ia64_dfa_sched_reorder (dump, sched_verbose, ready, pn_ready,
static rtx mep_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
static int mep_adjust_cost (rtx, rtx, rtx, int);
static int mep_issue_rate (void);
-static rtx mep_find_ready_insn (rtx *, int, enum attr_slot, int);
-static void mep_move_ready_insn (rtx *, int, rtx);
-static int mep_sched_reorder (FILE *, int, rtx *, int *, int);
+static rtx_insn *mep_find_ready_insn (rtx_insn **, int, enum attr_slot, int);
+static void mep_move_ready_insn (rtx_insn **, int, rtx_insn *);
+static int mep_sched_reorder (FILE *, int, rtx_insn **, int *, int);
static rtx_insn *mep_make_bundle (rtx, rtx_insn *);
static void mep_bundle_insns (rtx_insn *);
static bool mep_rtx_cost (rtx, int, int, int, int *, bool);
return lookup_attribute ("vliw", TYPE_ATTRIBUTES (TREE_TYPE (decl))) != 0;
}
-static rtx
-mep_find_ready_insn (rtx *ready, int nready, enum attr_slot slot, int length)
+static rtx_insn *
+mep_find_ready_insn (rtx_insn **ready, int nready, enum attr_slot slot,
+ int length)
{
int i;
for (i = nready - 1; i >= 0; --i)
{
- rtx insn = ready[i];
+ rtx_insn *insn = ready[i];
if (recog_memoized (insn) >= 0
&& get_attr_slot (insn) == slot
&& get_attr_length (insn) == length)
return insn;
}
- return NULL_RTX;
+ return NULL;
}
static void
-mep_move_ready_insn (rtx *ready, int nready, rtx insn)
+mep_move_ready_insn (rtx_insn **ready, int nready, rtx_insn *insn)
{
int i;
}
static void
-mep_print_sched_insn (FILE *dump, rtx insn)
+mep_print_sched_insn (FILE *dump, rtx_insn *insn)
{
const char *slots = "none";
const char *name = NULL;
static int
mep_sched_reorder (FILE *dump ATTRIBUTE_UNUSED,
- int sched_verbose ATTRIBUTE_UNUSED, rtx *ready,
+ int sched_verbose ATTRIBUTE_UNUSED, rtx_insn **ready,
int *pnready, int clock ATTRIBUTE_UNUSED)
{
int nready = *pnready;
- rtx core_insn, cop_insn;
+ rtx_insn *core_insn, *cop_insn;
int i;
if (dump && sched_verbose > 1)
be <= HIGHER. */
static void
-mips_promote_ready (rtx *ready, int lower, int higher)
+mips_promote_ready (rtx_insn **ready, int lower, int higher)
{
- rtx new_head;
+ rtx_insn *new_head;
int i;
new_head = ready[lower];
instructions if POS2 is not already less than POS1. */
static void
-mips_maybe_swap_ready (rtx *ready, int pos1, int pos2, int limit)
+mips_maybe_swap_ready (rtx_insn **ready, int pos1, int pos2, int limit)
{
if (pos1 < pos2
&& INSN_PRIORITY (ready[pos1]) + limit >= INSN_PRIORITY (ready[pos2]))
{
- rtx temp;
+ rtx_insn *temp;
temp = ready[pos1];
ready[pos1] = ready[pos2];
clobber hi or lo. */
static void
-mips_macc_chains_reorder (rtx *ready, int nready)
+mips_macc_chains_reorder (rtx_insn **ready, int nready)
{
int i, j;
vr4130_swap_insns_p says that it could be worthwhile. */
static void
-vr4130_reorder (rtx *ready, int nready)
+vr4130_reorder (rtx_insn **ready, int nready)
{
if (vr4130_swap_insns_p (ready[nready - 1], ready[nready - 2]))
mips_promote_ready (ready, nready - 2, nready - 1);
together. Swap things around in the ready queue to make this happen. */
static void
-mips_74k_agen_reorder (rtx *ready, int nready)
+mips_74k_agen_reorder (rtx_insn **ready, int nready)
{
int i;
int store_pos, load_pos;
for (i = nready - 1; i >= 0; i--)
{
- rtx insn = ready[i];
+ rtx_insn *insn = ready[i];
if (USEFUL_INSN_P (insn))
switch (get_attr_type (insn))
{
static void
mips_sched_reorder_1 (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
- rtx *ready, int *nreadyp, int cycle ATTRIBUTE_UNUSED)
+ rtx_insn **ready, int *nreadyp, int cycle ATTRIBUTE_UNUSED)
{
if (!reload_completed
&& TUNE_MACC_CHAINS
static int
mips_sched_reorder (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
- rtx *ready, int *nreadyp, int cycle ATTRIBUTE_UNUSED)
+ rtx_insn **ready, int *nreadyp, int cycle ATTRIBUTE_UNUSED)
{
mips_sched_reorder_1 (file, verbose, ready, nreadyp, cycle);
return mips_issue_rate ();
static int
mips_sched_reorder2 (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
- rtx *ready, int *nreadyp, int cycle ATTRIBUTE_UNUSED)
+ rtx_insn **ready, int *nreadyp, int cycle ATTRIBUTE_UNUSED)
{
mips_sched_reorder_1 (file, verbose, ready, nreadyp, cycle);
return cached_can_issue_more;
int picochip_sched_issue_rate (void);
int picochip_sched_adjust_cost (rtx insn, rtx link,
rtx dep_insn, int cost);
-int picochip_sched_reorder (FILE * file, int verbose, rtx * ready,
+int picochip_sched_reorder (FILE * file, int verbose, rtx_insn ** ready,
int *n_readyp, int clock);
void picochip_init_builtins (void);
int
picochip_sched_reorder (FILE * file, int verbose,
- rtx * ready ATTRIBUTE_UNUSED,
+ rtx_insn ** ready ATTRIBUTE_UNUSED,
int *n_readyp ATTRIBUTE_UNUSED, int clock)
{
static int
rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED, int sched_verbose,
- rtx *ready ATTRIBUTE_UNUSED,
+ rtx_insn **ready ATTRIBUTE_UNUSED,
int *pn_ready ATTRIBUTE_UNUSED,
int clock_var ATTRIBUTE_UNUSED)
{
&& (recog_memoized (ready[n_ready - 2]) > 0))
/* Simply swap first two insns. */
{
- rtx tmp = ready[n_ready - 1];
+ rtx_insn *tmp = ready[n_ready - 1];
ready[n_ready - 1] = ready[n_ready - 2];
ready[n_ready - 2] = tmp;
}
/* Like rs6000_sched_reorder, but called after issuing each insn. */
static int
-rs6000_sched_reorder2 (FILE *dump, int sched_verbose, rtx *ready,
+rs6000_sched_reorder2 (FILE *dump, int sched_verbose, rtx_insn **ready,
int *pn_ready, int clock_var ATTRIBUTE_UNUSED)
{
if (sched_verbose)
{
int pos;
int i;
- rtx tmp, load_mem, str_mem;
+ rtx_insn *tmp;
+ rtx load_mem, str_mem;
if (is_store_insn (last_scheduled_insn, &str_mem))
/* Issuing a store, swing the load_store_pendulum to the left */
for Z10_EARLYLOAD_DISTANCE. A problematic load instruction is
moved to the very end of the ready list. */
static void
-s390_z10_prevent_earlyload_conflicts (rtx *ready, int *nready_p)
+s390_z10_prevent_earlyload_conflicts (rtx_insn **ready, int *nready_p)
{
unsigned int regno;
int nready = *nready_p;
- rtx tmp;
+ rtx_insn *tmp;
int i;
rtx_insn *insn;
rtx set;
return;
tmp = ready[i];
- memmove (&ready[1], &ready[0], sizeof (rtx) * i);
+ memmove (&ready[1], &ready[0], sizeof (rtx_insn *) * i);
ready[0] = tmp;
}
conflicts in the floating point pipeline */
static int
s390_sched_reorder (FILE *file, int verbose,
- rtx *ready, int *nreadyp, int clock ATTRIBUTE_UNUSED)
+ rtx_insn **ready, int *nreadyp, int clock ATTRIBUTE_UNUSED)
{
if (s390_tune == PROCESSOR_2097_Z10)
if (reload_completed && *nreadyp > 1)
int last_index = *nreadyp - 1;
int max_index = -1;
int max_score = -1;
- rtx tmp;
+ rtx_insn *tmp;
/* Just move the insn with the highest score to the top (the
end) of the list. A full sort is not needed since a conflict
static void sh_md_init_global (FILE *, int, int);
static void sh_md_finish_global (FILE *, int);
static int rank_for_reorder (const void *, const void *);
-static void swap_reorder (rtx *, int);
-static void ready_reorder (rtx *, int);
+static void swap_reorder (rtx_insn **, int);
+static void ready_reorder (rtx_insn **, int);
static bool high_pressure (enum machine_mode);
-static int sh_reorder (FILE *, int, rtx *, int *, int);
-static int sh_reorder2 (FILE *, int, rtx *, int *, int);
+static int sh_reorder (FILE *, int, rtx_insn **, int *, int);
+static int sh_reorder2 (FILE *, int, rtx_insn **, int *, int);
static void sh_md_init (FILE *, int, int);
static int sh_variable_issue (FILE *, int, rtx, int);
static int
rank_for_reorder (const void *x, const void *y)
{
- rtx tmp = *(const rtx *) y;
- rtx tmp2 = *(const rtx *) x;
+ rtx_insn *tmp = *(rtx_insn * const *) y;
+ rtx_insn *tmp2 = *(rtx_insn * const *) x;
/* The insn in a schedule group should be issued the first. */
if (SCHED_GROUP_P (tmp) != SCHED_GROUP_P (tmp2))
/* Resort the array A in which only element at index N may be out of order. */
static void
-swap_reorder (rtx *a, int n)
+swap_reorder (rtx_insn **a, int n)
{
- rtx insn = a[n - 1];
+ rtx_insn *insn = a[n - 1];
int i = n - 2;
while (i >= 0 && rank_for_reorder (a + i, &insn) >= 0)
/* Sort the ready list by ascending priority. */
static void
-ready_reorder (rtx *ready, int nready)
+ready_reorder (rtx_insn **ready, int nready)
{
if (nready == 2)
swap_reorder (ready, nready);
else if (nready > 2)
- qsort (ready, nready, sizeof (rtx), rank_for_reorder);
+ qsort (ready, nready, sizeof (rtx_insn *), rank_for_reorder);
}
/* Count life regions of r0 for a block. */
static int
sh_reorder (FILE *dump ATTRIBUTE_UNUSED,
int sched_verbose ATTRIBUTE_UNUSED,
- rtx *ready,
+ rtx_insn **ready,
int *n_readyp,
int clock_var ATTRIBUTE_UNUSED)
{
static int
sh_reorder2 (FILE *dump ATTRIBUTE_UNUSED,
int sched_verbose ATTRIBUTE_UNUSED,
- rtx *ready ATTRIBUTE_UNUSED,
+ rtx_insn **ready ATTRIBUTE_UNUSED,
int *n_readyp ATTRIBUTE_UNUSED,
int clock_var ATTRIBUTE_UNUSED)
{
TARGET_SCHED_REORDER2. */
static int
spu_sched_reorder (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
- rtx *ready, int *nreadyp, int clock)
+ rtx_insn **ready, int *nreadyp, int clock)
{
int i, nready = *nreadyp;
int pipe_0, pipe_1, pipe_hbrp, pipe_ls, schedule_i;
- rtx insn;
+ rtx_insn *insn;
clock_var = clock;
scheduling priorities of insns.
@end deftypefn
-@deftypefn {Target Hook} int TARGET_SCHED_REORDER (FILE *@var{file}, int @var{verbose}, rtx *@var{ready}, int *@var{n_readyp}, int @var{clock})
+@deftypefn {Target Hook} int TARGET_SCHED_REORDER (FILE *@var{file}, int @var{verbose}, rtx_insn **@var{ready}, int *@var{n_readyp}, int @var{clock})
This hook is executed by the scheduler after it has scheduled the ready
list, to allow the machine description to reorder it (for example to
combine two small instructions together on @samp{VLIW} machines).
@samp{TARGET_SCHED_REORDER2}.
@end deftypefn
-@deftypefn {Target Hook} int TARGET_SCHED_REORDER2 (FILE *@var{file}, int @var{verbose}, rtx *@var{ready}, int *@var{n_readyp}, int @var{clock})
+@deftypefn {Target Hook} int TARGET_SCHED_REORDER2 (FILE *@var{file}, int @var{verbose}, rtx_insn **@var{ready}, int *@var{n_readyp}, int @var{clock})
Like @samp{TARGET_SCHED_REORDER}, but called at a different time. That
function is called whenever the scheduler starts a new cycle. This one
is called once per iteration over a cycle, immediately after
group, and they will not be scheduled apart.
@end deftypefn
-@deftypefn {Target Hook} void TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK (rtx @var{head}, rtx @var{tail})
+@deftypefn {Target Hook} void TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK (rtx_insn *@var{head}, rtx_insn *@var{tail})
This hook is called after evaluation forward dependencies of insns in
chain given by two parameter values (@var{head} and @var{tail}
correspondingly) but before insns scheduling of the insn chain. For
/* List of important notes we must keep around. This is a pointer to the
last element in the list. */
-rtx note_list;
+rtx_insn *note_list;
static struct spec_info_def spec_info_var;
/* Description of the speculative part of the scheduling.
/* This records the actual schedule. It is built up during the main phase
of schedule_block, and afterwards used to reorder the insns in the RTL. */
-static vec<rtx> scheduled_insns;
+static vec<rtx_insn *> scheduled_insns;
static int may_trap_exp (const_rtx, int);
struct delay_pair
{
struct delay_pair *next_same_i1;
- rtx i1, i2;
+ rtx_insn *i1, *i2;
int cycles;
/* When doing modulo scheduling, we a delay_pair can also be used to
show that I1 and I2 are the same insn in a different stage. If that
scheduling. */
void
-record_delay_slot_pair (rtx i1, rtx i2, int cycles, int stages)
+record_delay_slot_pair (rtx_insn *i1, rtx_insn *i2, int cycles, int stages)
{
struct delay_pair *p = XNEW (struct delay_pair);
struct delay_pair **slot;
and add dependencies to the real insns to limit the amount of backtracking
needed. */
void
-add_delay_dependencies (rtx insn)
+add_delay_dependencies (rtx_insn *insn)
{
struct delay_pair *pair;
sd_iterator_def sd_it;
static int priority (rtx);
static int rank_for_schedule (const void *, const void *);
-static void swap_sort (rtx *, int);
-static void queue_insn (rtx, int, const char *);
-static int schedule_insn (rtx);
+static void swap_sort (rtx_insn **, int);
+static void queue_insn (rtx_insn *, int, const char *);
+static int schedule_insn (rtx_insn *);
static void adjust_priority (rtx);
static void advance_one_cycle (void);
static void extend_h_i_d (void);
unlink_other_notes ()). After scheduling the block, these notes are
inserted at the beginning of the block (in schedule_block()). */
-static void ready_add (struct ready_list *, rtx, bool);
-static rtx ready_remove_first (struct ready_list *);
-static rtx ready_remove_first_dispatch (struct ready_list *ready);
+static void ready_add (struct ready_list *, rtx_insn *, bool);
+static rtx_insn *ready_remove_first (struct ready_list *);
+static rtx_insn *ready_remove_first_dispatch (struct ready_list *ready);
static void queue_to_ready (struct ready_list *);
static int early_queue_to_ready (state_t, struct ready_list *);
/* The following functions are used to implement multi-pass scheduling
on the first cycle. */
-static rtx ready_remove (struct ready_list *, int);
+static rtx_insn *ready_remove (struct ready_list *, int);
static void ready_remove_insn (rtx);
static void fix_inter_tick (rtx, rtx);
-static int fix_tick_ready (rtx);
-static void change_queue_index (rtx, int);
+static int fix_tick_ready (rtx_insn *);
+static void change_queue_index (rtx_insn *, int);
/* The following functions are used to implement scheduling of data/control
speculative instructions. */
static void extend_h_i_d (void);
static void init_h_i_d (rtx);
static int haifa_speculate_insn (rtx, ds_t, rtx *);
-static void generate_recovery_code (rtx);
+static void generate_recovery_code (rtx_insn *);
static void process_insn_forw_deps_be_in_spec (rtx, rtx, ds_t);
-static void begin_speculative_block (rtx);
+static void begin_speculative_block (rtx_insn *);
static void add_to_speculative_block (rtx);
static void init_before_recovery (basic_block *);
-static void create_check_block_twin (rtx, bool);
+static void create_check_block_twin (rtx_insn *, bool);
static void fix_recovery_deps (basic_block);
static bool haifa_change_pattern (rtx, rtx);
static void dump_new_block_header (int, basic_block, rtx, rtx);
static void fix_jump_move (rtx);
static void move_block_after_check (rtx);
static void move_succs (vec<edge, va_gc> **, basic_block);
-static void sched_remove_insn (rtx);
+static void sched_remove_insn (rtx_insn *);
static void clear_priorities (rtx, rtx_vec_t *);
static void calc_priorities (rtx_vec_t);
static void add_jump_dependencies (rtx, rtx);
/* Determine if INSN has a condition that is clobbered if a register
in SET_REGS is modified. */
static bool
-cond_clobbered_p (rtx insn, HARD_REG_SET set_regs)
+cond_clobbered_p (rtx_insn *insn, HARD_REG_SET set_regs)
{
rtx pat = PATTERN (insn);
gcc_assert (GET_CODE (pat) == COND_EXEC);
rtx pro, other, new_pat;
rtx cond = NULL_RTX;
bool success;
- rtx prev = NULL_RTX;
+ rtx_insn *prev = NULL;
int i;
unsigned regno;
}
\f
/* Pointer to the last instruction scheduled. */
-static rtx last_scheduled_insn;
+static rtx_insn *last_scheduled_insn;
/* Pointer to the last nondebug instruction scheduled within the
block, or the prev_head of the scheduling block. Used by
/* Pointer that iterates through the list of unscheduled insns if we
have a dbg_cnt enabled. It always points at an insn prior to the
first unscheduled one. */
-static rtx nonscheduled_insns_begin;
+static rtx_insn *nonscheduled_insns_begin;
/* Compute cost of executing INSN.
This is the number of cycles between instruction issue and
/* Set INSN_REG_PRESSURE_EXCESS_COST_CHANGE for INSNS[0...COUNT-1]. */
static void
-model_set_excess_costs (rtx *insns, int count)
+model_set_excess_costs (rtx_insn **insns, int count)
{
int i, cost, priority_base, priority;
bool print_p;
static int
rank_for_schedule (const void *x, const void *y)
{
- rtx tmp = *(const rtx *) y;
- rtx tmp2 = *(const rtx *) x;
+ rtx_insn *tmp = *(rtx_insn * const *) y;
+ rtx_insn *tmp2 = *(rtx_insn * const *) x;
int tmp_class, tmp2_class;
int val, priority_val, info_val, diff;
/* Resort the array A in which only element at index N may be out of order. */
HAIFA_INLINE static void
-swap_sort (rtx *a, int n)
+swap_sort (rtx_insn **a, int n)
{
- rtx insn = a[n - 1];
+ rtx_insn *insn = a[n - 1];
int i = n - 2;
while (i >= 0 && rank_for_schedule (a + i, &insn) >= 0)
output. */
HAIFA_INLINE static void
-queue_insn (rtx insn, int n_cycles, const char *reason)
+queue_insn (rtx_insn *insn, int n_cycles, const char *reason)
{
int next_q = NEXT_Q_AFTER (q_ptr, n_cycles);
rtx link = alloc_INSN_LIST (insn, insn_queue[next_q]);
/* Return a pointer to the bottom of the ready list, i.e. the insn
with the lowest priority. */
-rtx *
+rtx_insn **
ready_lastpos (struct ready_list *ready)
{
gcc_assert (ready->n_ready >= 1);
lowest/highest priority depending on FIRST_P. */
HAIFA_INLINE static void
-ready_add (struct ready_list *ready, rtx insn, bool first_p)
+ready_add (struct ready_list *ready, rtx_insn *insn, bool first_p)
{
if (!first_p)
{
/* Remove the element with the highest priority from the ready list and
return it. */
-HAIFA_INLINE static rtx
+HAIFA_INLINE static rtx_insn *
ready_remove_first (struct ready_list *ready)
{
- rtx t;
+ rtx_insn *t;
gcc_assert (ready->n_ready);
t = ready->vec[ready->first--];
insn with the highest priority is 0, and the lowest priority has
N_READY - 1. */
-rtx
+rtx_insn *
ready_element (struct ready_list *ready, int index)
{
gcc_assert (ready->n_ready && index < ready->n_ready);
for insn with the highest priority is 0, and the lowest priority
has N_READY - 1. */
-HAIFA_INLINE static rtx
+HAIFA_INLINE static rtx_insn *
ready_remove (struct ready_list *ready, int index)
{
- rtx t;
+ rtx_insn *t;
int i;
if (index == 0)
ready_sort (struct ready_list *ready)
{
int i;
- rtx *first = ready_lastpos (ready);
+ rtx_insn **first = ready_lastpos (ready);
if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
{
restart:
for (i = 0; i < ready.n_ready; i++)
{
- rtx x = ready_element (&ready, i);
+ rtx_insn *x = ready_element (&ready, i);
if (TODO_SPEC (x) == DEP_CONTROL && cond_clobbered_p (x, t))
{
ready_remove_insn (x);
restart_queue:
for (link = insn_queue[q]; link; link = XEXP (link, 1))
{
- rtx x = XEXP (link, 0);
+ rtx_insn *x = as_a <rtx_insn *> (XEXP (link, 0));
if (TODO_SPEC (x) == DEP_CONTROL && cond_clobbered_p (x, t))
{
queue_remove (x);
zero for insns in a schedule group). */
static int
-schedule_insn (rtx insn)
+schedule_insn (rtx_insn *insn)
{
sd_iterator_def sd_it;
dep_t dep;
/* Delete notes between HEAD and TAIL and put them in the chain
of notes ended by NOTE_LIST. */
void
-remove_notes (rtx head, rtx tail)
+remove_notes (rtx_insn *head, rtx_insn *tail)
{
- rtx next_tail, insn, next;
+ rtx_insn *next_tail, *insn, *next;
note_list = 0;
if (head == tail && !INSN_P (head))
struct ready_list ready;
state_t curr_state;
- rtx last_scheduled_insn;
+ rtx_insn *last_scheduled_insn;
rtx last_nondebug_scheduled_insn;
- rtx nonscheduled_insns_begin;
+ rtx_insn *nonscheduled_insns_begin;
int cycle_issued_insns;
/* Copies of state used in the inner loop of schedule_block. */
save->ready.n_ready = ready.n_ready;
save->ready.n_debug = ready.n_debug;
save->ready.veclen = ready.veclen;
- save->ready.vec = XNEWVEC (rtx, ready.veclen);
+ save->ready.vec = XNEWVEC (rtx_insn *, ready.veclen);
memcpy (save->ready.vec, ready.vec, ready.veclen * sizeof (rtx));
save->insn_queue = XNEWVEC (rtx, max_insn_queue_index + 1);
if (ready.n_ready > 0)
{
- rtx *first = ready_lastpos (&ready);
+ rtx_insn **first = ready_lastpos (&ready);
for (i = 0; i < ready.n_ready; i++)
FOR_EACH_DEP (first[i], SD_LIST_BACK, sd_it, dep)
if (!DEBUG_INSN_P (DEP_PRO (dep)))
of the queues. */
if (ready.n_ready > 0)
{
- rtx *first = ready_lastpos (&ready);
+ rtx_insn **first = ready_lastpos (&ready);
for (i = 0; i < ready.n_ready; i++)
{
- rtx insn = first[i];
+ rtx_insn *insn = first[i];
QUEUE_INDEX (insn) = QUEUE_NOWHERE;
INSN_TICK (insn) = INVALID_TICK;
}
if (ready.n_ready > 0)
{
- rtx *first = ready_lastpos (&ready);
+ rtx_insn **first = ready_lastpos (&ready);
for (i = 0; i < ready.n_ready; i++)
{
- rtx insn = first[i];
+ rtx_insn *insn = first[i];
QUEUE_INDEX (insn) = QUEUE_READY;
TODO_SPEC (insn) = recompute_todo_spec (insn, true);
INSN_TICK (insn) = save->clock_var;
/* If INSN has no unresolved backwards dependencies, add it to the schedule and
recursively resolve all its forward dependencies. */
static void
-resolve_dependencies (rtx insn)
+resolve_dependencies (rtx_insn *insn)
{
sd_iterator_def sd_it;
dep_t dep;
/* Restore-other-notes: NOTE_LIST is the end of a chain of notes
previously found among the insns. Insert them just before HEAD. */
-rtx
-restore_other_notes (rtx head, basic_block head_bb)
+rtx_insn *
+restore_other_notes (rtx_insn *head, basic_block head_bb)
{
if (note_list != 0)
{
- rtx note_head = note_list;
+ rtx_insn *note_head = note_list;
if (head)
head_bb = BLOCK_FOR_INSN (head);
static void
undo_all_replacements (void)
{
- rtx insn;
+ rtx_insn *insn;
int i;
FOR_EACH_VEC_ELT (scheduled_insns, i, insn)
/* Return first non-scheduled insn in the current scheduling block.
This is mostly used for debug-counter purposes. */
-static rtx
+static rtx_insn *
first_nonscheduled_insn (void)
{
- rtx insn = (nonscheduled_insns_begin != NULL_RTX
- ? nonscheduled_insns_begin
- : current_sched_info->prev_head);
+ rtx_insn *insn = (nonscheduled_insns_begin != NULL_RTX
+ ? nonscheduled_insns_begin
+ : current_sched_info->prev_head);
do
{
static void
queue_to_ready (struct ready_list *ready)
{
- rtx insn;
+ rtx_insn *insn;
rtx link;
rtx skip_insn;
ready list. */
for (link = insn_queue[q_ptr]; link; link = XEXP (link, 1))
{
- insn = XEXP (link, 0);
+ insn = as_a <rtx_insn *> (XEXP (link, 0));
q_size -= 1;
if (sched_verbose >= 2)
{
for (; link; link = XEXP (link, 1))
{
- insn = XEXP (link, 0);
+ insn = as_a <rtx_insn *> (XEXP (link, 0));
q_size -= 1;
if (sched_verbose >= 2)
static int
early_queue_to_ready (state_t state, struct ready_list *ready)
{
- rtx insn;
+ rtx_insn *insn;
rtx link;
rtx next_link;
rtx prev_link;
while (link)
{
next_link = XEXP (link, 1);
- insn = XEXP (link, 0);
+ insn = as_a <rtx_insn *> (XEXP (link, 0));
if (insn && sched_verbose > 6)
print_rtl_single (sched_dump, insn);
static void
debug_ready_list_1 (struct ready_list *ready, signed char *ready_try)
{
- rtx *p;
+ rtx_insn **p;
int i;
if (ready->n_ready == 0)
/* Move INSN. Reemit notes if needed. Update CFG, if needed. */
static void
-move_insn (rtx insn, rtx last, rtx nt)
+move_insn (rtx_insn *insn, rtx last, rtx nt)
{
if (PREV_INSN (insn) != last)
{
basic_block bb;
- rtx note;
+ rtx_insn *note;
int jump_p = 0;
bb = BLOCK_FOR_INSN (insn);
/* Return true if scheduling INSN will finish current clock cycle. */
static bool
-insn_finishes_cycle_p (rtx insn)
+insn_finishes_cycle_p (rtx_insn *insn)
{
if (SCHED_GROUP_P (insn))
/* After issuing INSN, rest of the sched_group will be forced to issue
int n, i, all, n_ready, best, delay, tries_num;
int more_issue;
struct choice_entry *top;
- rtx insn;
+ rtx_insn *insn;
n_ready = ready->n_ready;
gcc_assert (dfa_lookahead >= 1 && privileged_n >= 0
1 if choose_ready () should be restarted without advancing the cycle. */
static int
choose_ready (struct ready_list *ready, bool first_cycle_insn_p,
- rtx *insn_ptr)
+ rtx_insn **insn_ptr)
{
int lookahead;
if (nonscheduled_insns_begin == NULL_RTX)
nonscheduled_insns_begin = current_sched_info->prev_head;
- rtx insn = first_nonscheduled_insn ();
+ rtx_insn *insn = first_nonscheduled_insn ();
if (QUEUE_INDEX (insn) == QUEUE_READY)
/* INSN is in the ready_list. */
{
/* Try to choose the best insn. */
int index = 0, i;
- rtx insn;
+ rtx_insn *insn;
insn = ready_element (ready, 0);
if (INSN_CODE (insn) < 0)
block. TARGET_BB is the argument passed to schedule_block. */
static void
-commit_schedule (rtx prev_head, rtx tail, basic_block *target_bb)
+commit_schedule (rtx_insn *prev_head, rtx tail, basic_block *target_bb)
{
unsigned int i;
- rtx insn;
+ rtx_insn *insn;
last_scheduled_insn = prev_head;
for (i = 0;
for (i = 0; i < ready.n_ready; i++)
{
- rtx insn = ready_element (&ready, i);
+ rtx_insn *insn = ready_element (&ready, i);
if (SCHED_GROUP_P (insn))
{
sched_group_found = true;
int n = ready.n_ready;
for (i = 0; i < n; i++)
{
- rtx insn = ready_element (&ready, i);
+ rtx_insn *insn = ready_element (&ready, i);
int cost = 0;
const char *reason = "resource conflict";
int sort_p, advance, start_clock_var;
/* Head/tail info for this block. */
- rtx prev_head = current_sched_info->prev_head;
+ rtx_insn *prev_head = current_sched_info->prev_head;
rtx next_tail = current_sched_info->next_tail;
- rtx head = NEXT_INSN (prev_head);
- rtx tail = PREV_INSN (next_tail);
+ rtx_insn *head = NEXT_INSN (prev_head);
+ rtx_insn *tail = PREV_INSN (next_tail);
if ((current_sched_info->flags & DONT_BREAK_DEPENDENCIES) == 0
&& sched_pressure != SCHED_PRESSURE_MODEL)
/* We start inserting insns after PREV_HEAD. */
last_scheduled_insn = prev_head;
last_nondebug_scheduled_insn = NULL_RTX;
- nonscheduled_insns_begin = NULL_RTX;
+ nonscheduled_insns_begin = NULL;
gcc_assert ((NOTE_P (last_scheduled_insn)
|| DEBUG_INSN_P (last_scheduled_insn))
activated make an exception for the insn right after
nonscheduled_insns_begin. */
{
- rtx skip_insn;
+ rtx_insn *skip_insn;
if (dbg_cnt (sched_insn) == false)
skip_insn = first_nonscheduled_insn ();
else
- skip_insn = NULL_RTX;
+ skip_insn = NULL;
while (i < ready.n_ready)
{
- rtx insn;
+ rtx_insn *insn;
insn = ready_remove (&ready, i);
ls.can_issue_more = issue_rate;
for (;;)
{
- rtx insn;
+ rtx_insn *insn;
int cost;
bool asm_p;
{
while (ready.n_ready && DEBUG_INSN_P (ready_element (&ready, 0)))
{
- rtx insn = ready_remove_first (&ready);
+ rtx_insn *insn = ready_remove_first (&ready);
gcc_assert (DEBUG_INSN_P (insn));
(*current_sched_info->begin_schedule_ready) (insn);
scheduled_insns.safe_push (insn);
{
int res;
- insn = NULL_RTX;
+ insn = NULL;
res = choose_ready (&ready, ls.first_cycle_insn_p, &insn);
if (res < 0)
while (must_backtrack)
{
struct haifa_saved_data *failed;
- rtx failed_insn;
+ rtx_insn *failed_insn;
must_backtrack = false;
failed = verify_shadows ();
}
for (i = ready.n_ready - 1; i >= 0; i--)
{
- rtx x;
+ rtx_insn *x;
x = ready_element (&ready, i);
resolve_dependencies (x);
rtx link;
while ((link = insn_queue[i]) != NULL)
{
- rtx x = XEXP (link, 0);
+ rtx_insn *x = as_a <rtx_insn *> (XEXP (link, 0));
insn_queue[i] = XEXP (link, 1);
QUEUE_INDEX (x) = QUEUE_NOWHERE;
free_INSN_LIST_node (link);
int n_insn;
int sched_max_insns_priority =
current_sched_info->sched_max_insns_priority;
- rtx prev_head;
+ rtx_insn *prev_head;
if (head == tail && ! INSN_P (head))
gcc_unreachable ();
0 - added to the ready list,
0 < N - queued for N cycles. */
int
-try_ready (rtx next)
+try_ready (rtx_insn *next)
{
ds_t old_ts, new_ts;
/* Calculate INSN_TICK of NEXT and add it to either ready or queue list. */
static int
-fix_tick_ready (rtx next)
+fix_tick_ready (rtx_insn *next)
{
int tick, delay;
or add it to the ready list (DELAY == QUEUE_READY),
or remove it from ready and queue lists at all (DELAY == QUEUE_NOWHERE). */
static void
-change_queue_index (rtx next, int delay)
+change_queue_index (rtx_insn *next, int delay)
{
int i = QUEUE_INDEX (next);
i = sched_ready_n_insns + 1;
ready.veclen = new_sched_ready_n_insns + issue_rate;
- ready.vec = XRESIZEVEC (rtx, ready.vec, ready.veclen);
+ ready.vec = XRESIZEVEC (rtx_insn *, ready.vec, ready.veclen);
gcc_assert (new_sched_ready_n_insns >= sched_ready_n_insns);
/* Generates recovery code for INSN. */
static void
-generate_recovery_code (rtx insn)
+generate_recovery_code (rtx_insn *insn)
{
if (TODO_SPEC (insn) & BEGIN_SPEC)
begin_speculative_block (insn);
/* Generates recovery code for BEGIN speculative INSN. */
static void
-begin_speculative_block (rtx insn)
+begin_speculative_block (rtx_insn *insn)
{
if (TODO_SPEC (insn) & BEGIN_DATA)
nr_begin_data++;
/* This function creates recovery code for INSN. If MUTATE_P is nonzero,
INSN is a simple check, that should be converted to branchy one. */
static void
-create_check_block_twin (rtx insn, bool mutate_p)
+create_check_block_twin (rtx_insn *insn, bool mutate_p)
{
basic_block rec;
- rtx label, check, twin;
+ rtx_insn *label, *check, *twin;
+ rtx check_pat;
ds_t fs;
sd_iterator_def sd_it;
dep_t dep;
else
{
rec = EXIT_BLOCK_PTR_FOR_FN (cfun);
- label = NULL_RTX;
+ label = NULL;
}
/* Emit CHECK. */
- check = targetm.sched.gen_spec_check (insn, label, todo_spec);
+ check_pat = targetm.sched.gen_spec_check (insn, label, todo_spec);
if (rec != EXIT_BLOCK_PTR_FOR_FN (cfun))
{
we emit check BEFORE insn, so insn after splitting
insn will be at the beginning of second_bb, which will
provide us with the correct life information. */
- check = emit_jump_insn_before (check, insn);
+ check = emit_jump_insn_before (check_pat, insn);
JUMP_LABEL (check) = label;
LABEL_NUSES (label)++;
}
else
- check = emit_insn_before (check, insn);
+ check = emit_insn_before (check_pat, insn);
/* Extend data structures. */
haifa_init_insn (check);
/* Try to add instructions to the ready or queue list. */
for (link = ready_list; link; link = XEXP (link, 1))
- try_ready (XEXP (link, 0));
+ try_ready (as_a <rtx_insn *> (XEXP (link, 0)));
free_INSN_LIST_list (&ready_list);
/* Fixing jump's dependences. */
/* Remove INSN from the instruction stream.
INSN should have any dependencies. */
static void
-sched_remove_insn (rtx insn)
+sched_remove_insn (rtx_insn *insn)
{
sd_finish_insn (insn);
/* Insert PAT as an INSN into the schedule and update the necessary data
structures to account for it. */
-rtx
+rtx_insn *
sched_emit_insn (rtx pat)
{
- rtx insn = emit_insn_before (pat, first_nonscheduled_insn ());
+ rtx_insn *insn = emit_insn_before (pat, first_nonscheduled_insn ());
haifa_init_insn (insn);
if (current_sched_info->add_remove_insn)
/* This function returns a candidate satisfying dispatch constraints from
the ready list. */
-static rtx
+static rtx_insn *
ready_remove_first_dispatch (struct ready_list *ready)
{
int i;
- rtx insn = ready_element (ready, 0);
+ rtx_insn *insn = ready_element (ready, 0);
if (ready->n_ready == 1
|| !INSN_P (insn)
loop->loop_end = tail_insn;
loop->iter_reg = reg;
vec_alloc (loop->incoming, 2);
- loop->start_label = JUMP_LABEL (tail_insn);
+ loop->start_label = as_a <rtx_insn *> (JUMP_LABEL (tail_insn));
if (EDGE_COUNT (tail_bb->succs) != 2)
{
code in order to use sched_analyze() for computing the dependencies.
They are used when initializing the sched_info structure. */
static const char *
-sms_print_insn (const_rtx insn, int aligned ATTRIBUTE_UNUSED)
+sms_print_insn (const rtx_insn *insn, int aligned ATTRIBUTE_UNUSED)
{
static char tmp[80];
static bool mark_as_hard;
static int deps_may_trap_p (const_rtx);
-static void add_dependence_1 (rtx, rtx, enum reg_note);
-static void add_dependence_list (rtx, rtx, int, enum reg_note, bool);
-static void add_dependence_list_and_free (struct deps_desc *, rtx,
+static void add_dependence_1 (rtx_insn *, rtx_insn *, enum reg_note);
+static void add_dependence_list (rtx_insn *, rtx, int, enum reg_note, bool);
+static void add_dependence_list_and_free (struct deps_desc *, rtx_insn *,
rtx *, int, enum reg_note, bool);
static void delete_all_dependences (rtx);
-static void chain_to_prev_insn (rtx);
+static void chain_to_prev_insn (rtx_insn *);
-static void flush_pending_lists (struct deps_desc *, rtx, int, int);
-static void sched_analyze_1 (struct deps_desc *, rtx, rtx);
-static void sched_analyze_2 (struct deps_desc *, rtx, rtx);
-static void sched_analyze_insn (struct deps_desc *, rtx, rtx);
+static void flush_pending_lists (struct deps_desc *, rtx_insn *, int, int);
+static void sched_analyze_1 (struct deps_desc *, rtx, rtx_insn *);
+static void sched_analyze_2 (struct deps_desc *, rtx, rtx_insn *);
+static void sched_analyze_insn (struct deps_desc *, rtx, rtx_insn *);
static bool sched_has_condition_p (const_rtx);
static int conditions_mutex_p (const_rtx, const_rtx, bool, bool);
impossible; otherwise we add additional true dependencies on the
INSN_COND_DEPS list of the jump (which PRO must be). */
void
-add_dependence (rtx con, rtx pro, enum reg_note dep_type)
+add_dependence (rtx_insn *con, rtx_insn *pro, enum reg_note dep_type)
{
if (dep_type == REG_DEP_CONTROL
&& !(current_sched_info->flags & DO_PREDICATION))
true if DEP_NONREG should be set on newly created dependencies. */
static void
-add_dependence_list (rtx insn, rtx list, int uncond, enum reg_note dep_type,
+add_dependence_list (rtx_insn *insn, rtx list, int uncond, enum reg_note dep_type,
bool hard)
{
mark_as_hard = hard;
for (; list; list = XEXP (list, 1))
{
if (uncond || ! sched_insns_conditions_mutex_p (insn, XEXP (list, 0)))
- add_dependence (insn, XEXP (list, 0), dep_type);
+ add_dependence (insn, as_a <rtx_insn *> (XEXP (list, 0)), dep_type);
}
mark_as_hard = false;
}
newly created dependencies. */
static void
-add_dependence_list_and_free (struct deps_desc *deps, rtx insn, rtx *listp,
+add_dependence_list_and_free (struct deps_desc *deps, rtx_insn *insn, rtx *listp,
int uncond, enum reg_note dep_type, bool hard)
{
add_dependence_list (insn, *listp, uncond, dep_type, hard);
the previous nonnote insn. */
static void
-chain_to_prev_insn (rtx insn)
+chain_to_prev_insn (rtx_insn *insn)
{
sd_iterator_def sd_it;
dep_t dep;
- rtx prev_nonnote;
+ rtx_insn *prev_nonnote;
FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
{
- rtx i = insn;
+ rtx_insn *i = insn;
rtx_insn *pro = DEP_PRO (dep);
do
dependencies for a read operation, similarly with FOR_WRITE. */
static void
-flush_pending_lists (struct deps_desc *deps, rtx insn, int for_read,
+flush_pending_lists (struct deps_desc *deps, rtx_insn *insn, int for_read,
int for_write)
{
if (for_write)
}
\f
/* Instruction which dependencies we are analyzing. */
-static rtx cur_insn = NULL_RTX;
+static rtx_insn *cur_insn = NULL;
/* Implement hooks for haifa scheduler. */
{
gcc_assert (insn && !cur_insn);
- cur_insn = insn;
+ cur_insn = as_a <rtx_insn *> (insn);
}
static void
}
static void
-note_dep (rtx e, ds_t ds)
+note_dep (rtx_insn *e, ds_t ds)
{
if (sched_deps_info->note_dep)
sched_deps_info->note_dep (e, ds);
static void
sched_analyze_reg (struct deps_desc *deps, int regno, enum machine_mode mode,
- enum rtx_code ref, rtx insn)
+ enum rtx_code ref, rtx_insn *insn)
{
/* We could emit new pseudos in renaming. Extend the reg structures. */
if (!reload_completed && sel_sched_p ()
destination of X, and reads of everything mentioned. */
static void
-sched_analyze_1 (struct deps_desc *deps, rtx x, rtx insn)
+sched_analyze_1 (struct deps_desc *deps, rtx x, rtx_insn *insn)
{
rtx dest = XEXP (x, 0);
enum rtx_code code = GET_CODE (x);
/* Analyze the uses of memory and registers in rtx X in INSN. */
static void
-sched_analyze_2 (struct deps_desc *deps, rtx x, rtx insn)
+sched_analyze_2 (struct deps_desc *deps, rtx x, rtx_insn *insn)
{
int i;
int j;
}
for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
- add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
+ add_dependence (insn, as_a <rtx_insn *> (XEXP (u, 0)),
+ REG_DEP_ANTI);
for (u = deps->pending_jump_insns; u; u = XEXP (u, 1))
if (deps_may_trap_p (x))
ds_t ds = set_dep_weak (DEP_ANTI, BEGIN_CONTROL,
MAX_DEP_WEAK);
- note_dep (XEXP (u, 0), ds);
+ note_dep (as_a <rtx_insn *> (XEXP (u, 0)), ds);
}
else
- add_dependence (insn, XEXP (u, 0), REG_DEP_CONTROL);
+ add_dependence (insn, as_a <rtx_insn *> (XEXP (u, 0)),
+ REG_DEP_CONTROL);
}
}
/* Analyze an INSN with pattern X to find all dependencies. */
static void
-sched_analyze_insn (struct deps_desc *deps, rtx x, rtx insn)
+sched_analyze_insn (struct deps_desc *deps, rtx x, rtx_insn *insn)
{
RTX_CODE code = GET_CODE (x);
rtx link;
while (pending)
{
if (! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
- add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
+ add_dependence (insn, as_a <rtx_insn *> (XEXP (pending, 0)),
+ REG_DEP_OUTPUT);
pending = XEXP (pending, 1);
pending_mem = XEXP (pending_mem, 1);
}
{
if (MEM_VOLATILE_P (XEXP (pending_mem, 0))
&& ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
- add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
+ add_dependence (insn, as_a <rtx_insn *> (XEXP (pending, 0)),
+ REG_DEP_OUTPUT);
pending = XEXP (pending, 1);
pending_mem = XEXP (pending_mem, 1);
}
/* Add register dependencies for insn. */
if (DEBUG_INSN_P (insn))
{
- rtx prev = deps->last_debug_insn;
+ rtx_insn *prev = deps->last_debug_insn;
rtx u;
if (!deps->readonly)
if (!sel_sched_p ())
for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
- add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
+ add_dependence (insn, as_a <rtx_insn *> (XEXP (u, 0)), REG_DEP_ANTI);
EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
{
/* Analyze INSN with DEPS as a context. */
void
-deps_analyze_insn (struct deps_desc *deps, rtx insn)
+deps_analyze_insn (struct deps_desc *deps, rtx_insn *insn)
{
if (sched_deps_info->start_insn)
sched_deps_info->start_insn (insn);
/* Analyze every insn between HEAD and TAIL inclusive, creating backward
dependencies for each insn. */
void
-sched_analyze (struct deps_desc *deps, rtx head, rtx tail)
+sched_analyze (struct deps_desc *deps, rtx_insn *head, rtx_insn *tail)
{
- rtx insn;
+ rtx_insn *insn;
if (sched_deps_info->use_cselib)
cselib_init (CSELIB_RECORD_MEMORY);
This function can handle same INSN and ELEM (INSN == ELEM).
It is a convenience wrapper. */
static void
-add_dependence_1 (rtx insn, rtx elem, enum reg_note dep_type)
+add_dependence_1 (rtx_insn *insn, rtx_insn *elem, enum reg_note dep_type)
{
ds_t ds;
bool internal;
insns which depend on each other, but could possibly be interchanged. */
struct mem_inc_info
{
- rtx inc_insn;
- rtx mem_insn;
+ rtx_insn *inc_insn;
+ rtx_insn *mem_insn;
rtx *mem_loc;
/* A register occurring in the memory address for which we wish to break
a corresponding memory reference. */
static bool
-parse_add_or_inc (struct mem_inc_info *mii, rtx insn, bool before_mem)
+parse_add_or_inc (struct mem_inc_info *mii, rtx_insn *insn, bool before_mem)
{
rtx pat = single_set (insn);
rtx src, cst;
dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
rtx_insn *pro = DEP_PRO (dep);
rtx_insn *con = DEP_CON (dep);
- rtx inc_cand = backwards ? pro : con;
+ rtx_insn *inc_cand = backwards ? pro : con;
if (DEP_NONREG (dep) || DEP_MULTIPLE (dep))
goto next;
if (parse_add_or_inc (mii, inc_cand, backwards))
dependencies that can be broken by modifying one of the patterns. */
void
-find_modifiable_mems (rtx head, rtx tail)
+find_modifiable_mems (rtx_insn *head, rtx_insn *tail)
{
- rtx insn, next_tail = NEXT_INSN (tail);
+ rtx_insn *insn, *next_tail = NEXT_INSN (tail);
int success_in_block = 0;
for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
/* Implementations of the sched_info functions for region scheduling. */
static void init_ready_list (void);
-static void begin_schedule_ready (rtx);
+static void begin_schedule_ready (rtx_insn *);
static int schedule_more_p (void);
-static const char *ebb_print_insn (const_rtx, int);
-static int rank (rtx, rtx);
-static int ebb_contributes_to_priority (rtx, rtx);
+static const char *ebb_print_insn (const rtx_insn *, int);
+static int rank (rtx_insn *, rtx_insn *);
+static int ebb_contributes_to_priority (rtx_insn *, rtx_insn *);
static basic_block earliest_block_with_similiar_load (basic_block, rtx);
static void add_deps_for_risky_insns (rtx_insn *, rtx_insn *);
static void debug_ebb_dependencies (rtx, rtx);
-static void ebb_add_remove_insn (rtx, int);
+static void ebb_add_remove_insn (rtx_insn *, int);
static void ebb_add_block (basic_block, basic_block);
-static basic_block advance_target_bb (basic_block, rtx);
+static basic_block advance_target_bb (basic_block, rtx_insn *);
static void ebb_fix_recovery_cfg (int, int, int);
/* Allocate memory and store the state of the frontend. Return the allocated
int n = 0;
rtx prev_head = current_sched_info->prev_head;
rtx next_tail = current_sched_info->next_tail;
- rtx insn;
+ rtx_insn *insn;
sched_rgn_n_insns = 0;
/* INSN is being scheduled after LAST. Update counters. */
static void
-begin_schedule_ready (rtx insn ATTRIBUTE_UNUSED)
+begin_schedule_ready (rtx_insn *insn ATTRIBUTE_UNUSED)
{
sched_rgn_n_insns++;
}
/* INSN is being moved to its place in the schedule, after LAST. */
static void
-begin_move_insn (rtx insn, rtx last)
+begin_move_insn (rtx_insn *insn, rtx_insn *last)
{
if (BLOCK_FOR_INSN (insn) == last_bb
/* INSN is a jump in the last block, ... */
to be formatted so that multiple output lines will line up nicely. */
static const char *
-ebb_print_insn (const_rtx insn, int aligned ATTRIBUTE_UNUSED)
+ebb_print_insn (const rtx_insn *insn, int aligned ATTRIBUTE_UNUSED)
{
static char tmp[80];
is to be preferred. Zero if they are equally good. */
static int
-rank (rtx insn1, rtx insn2)
+rank (rtx_insn *insn1, rtx_insn *insn2)
{
basic_block bb1 = BLOCK_FOR_INSN (insn1);
basic_block bb2 = BLOCK_FOR_INSN (insn2);
calculations. */
static int
-ebb_contributes_to_priority (rtx next ATTRIBUTE_UNUSED,
- rtx insn ATTRIBUTE_UNUSED)
+ebb_contributes_to_priority (rtx_insn *next ATTRIBUTE_UNUSED,
+ rtx_insn *insn ATTRIBUTE_UNUSED)
{
return 1;
}
/* INSN has been added to/removed from current ebb. */
static void
-ebb_add_remove_insn (rtx insn ATTRIBUTE_UNUSED, int remove_p)
+ebb_add_remove_insn (rtx_insn *insn ATTRIBUTE_UNUSED, int remove_p)
{
if (!remove_p)
rgn_n_insns++;
/* Return next block in ebb chain. For parameter meaning please refer to
sched-int.h: struct sched_info: advance_target_bb. */
static basic_block
-advance_target_bb (basic_block bb, rtx insn)
+advance_target_bb (basic_block bb, rtx_insn *insn)
{
if (insn)
{
/* This list holds ripped off notes from the current block. These notes will
be attached to the beginning of the block when its scheduling is
finished. */
-extern rtx note_list;
+extern rtx_insn *note_list;
-extern void remove_notes (rtx, rtx);
-extern rtx restore_other_notes (rtx, basic_block);
+extern void remove_notes (rtx_insn *, rtx_insn *);
+extern rtx_insn *restore_other_notes (rtx_insn *, basic_block);
extern void sched_insns_init (rtx);
extern void sched_insns_finish (void);
N_DEBUG determines how many debug insns are on the ready list. */
struct ready_list
{
- rtx *vec;
+ rtx_insn **vec;
int veclen;
int first;
int n_ready;
rtx *loc;
rtx orig;
rtx newval;
- rtx insn;
+ rtx_insn *insn;
};
/* Information about the dependency. */
enum post_call_group in_post_call_group_p;
/* The last debug insn we've seen. */
- rtx last_debug_insn;
+ rtx_insn *last_debug_insn;
/* The last insn bearing REG_ARGS_SIZE that we've seen. */
- rtx last_args_size;
+ rtx_insn *last_args_size;
/* The maximum register number for the following arrays. Before reload
this is max_reg_num; after reload it is FIRST_PSEUDO_REGISTER. */
void (*init_ready_list) (void);
/* Called after taking an insn from the ready list. Returns nonzero if
this insn can be scheduled, nonzero if we should silently discard it. */
- int (*can_schedule_ready_p) (rtx);
+ int (*can_schedule_ready_p) (rtx_insn *);
/* Return nonzero if there are more insns that should be scheduled. */
int (*schedule_more_p) (void);
/* Called after an insn has all its hard dependencies resolved.
to indicate if instruction should be moved to the ready list or the
queue, or if it should silently discard it (until next resolved
dependence). */
- ds_t (*new_ready) (rtx, ds_t);
+ ds_t (*new_ready) (rtx_insn *, ds_t);
/* Compare priority of two insns. Return a positive number if the second
insn is to be preferred for scheduling, and a negative one if the first
is to be preferred. Zero if they are equally good. */
- int (*rank) (rtx, rtx);
+ int (*rank) (rtx_insn *, rtx_insn *);
/* Return a string that contains the insn uid and optionally anything else
necessary to identify this insn in an output. It's valid to use a
static buffer for this. The ALIGNED parameter should cause the string
to be formatted so that multiple output lines will line up nicely. */
- const char *(*print_insn) (const_rtx, int);
+ const char *(*print_insn) (const rtx_insn *, int);
/* Return nonzero if an insn should be included in priority
calculations. */
- int (*contributes_to_priority) (rtx, rtx);
+ int (*contributes_to_priority) (rtx_insn *, rtx_insn *);
/* Return true if scheduling insn (passed as the parameter) will trigger
finish of scheduling current block. */
- bool (*insn_finishes_block_p) (rtx);
+ bool (*insn_finishes_block_p) (rtx_insn *);
/* The boundaries of the set of insns to be scheduled. */
rtx_insn *prev_head, *next_tail;
/* Filled in after the schedule is finished; the first and last scheduled
insns. */
- rtx head, tail;
+ rtx_insn *head, *tail;
/* If nonzero, enables an additional sanity check in schedule_block. */
unsigned int queue_must_finish_empty:1;
/* Called to notify frontend that instruction is being added (second
parameter == 0) or removed (second parameter == 1). */
- void (*add_remove_insn) (rtx, int);
+ void (*add_remove_insn) (rtx_insn *, int);
/* Called to notify the frontend that instruction INSN is being
scheduled. */
- void (*begin_schedule_ready) (rtx insn);
+ void (*begin_schedule_ready) (rtx_insn *insn);
/* Called to notify the frontend that an instruction INSN is about to be
moved to its correct place in the final schedule. This is done for all
insns in order of the schedule. LAST indicates the last scheduled
instruction. */
- void (*begin_move_insn) (rtx insn, rtx last);
+ void (*begin_move_insn) (rtx_insn *insn, rtx_insn *last);
/* If the second parameter is not NULL, return nonnull value, if the
basic block should be advanced.
If the second parameter is NULL, return the next basic block in EBB.
The first parameter is the current basic block in EBB. */
- basic_block (*advance_target_bb) (basic_block, rtx);
+ basic_block (*advance_target_bb) (basic_block, rtx_insn *);
/* Allocate memory, store the frontend scheduler state in it, and
return it. */
void (*note_mem_dep) (rtx mem1, rtx mem2, rtx insn2, ds_t ds);
/* Note a dependence of type DS from the INSN. */
- void (*note_dep) (rtx insn, ds_t ds);
+ void (*note_dep) (rtx, ds_t ds);
/* Nonzero if we should use cselib for better alias analysis. This
must be 0 if the dependency information is used after sched_analyze
extern rtx sched_get_reverse_condition_uncached (const_rtx);
extern bool sched_insns_conditions_mutex_p (const_rtx, const_rtx);
extern bool sched_insn_is_legitimate_for_speculation_p (const_rtx, ds_t);
-extern void add_dependence (rtx, rtx, enum reg_note);
-extern void sched_analyze (struct deps_desc *, rtx, rtx);
+extern void add_dependence (rtx_insn *, rtx_insn *, enum reg_note);
+extern void sched_analyze (struct deps_desc *, rtx_insn *, rtx_insn *);
extern void init_deps (struct deps_desc *, bool);
extern void init_deps_reg_last (struct deps_desc *);
extern void free_deps (struct deps_desc *);
extern void init_deps_global (void);
extern void finish_deps_global (void);
-extern void deps_analyze_insn (struct deps_desc *, rtx);
+extern void deps_analyze_insn (struct deps_desc *, rtx_insn *);
extern void remove_from_deps (struct deps_desc *, rtx);
extern void init_insn_reg_pressure_info (rtx);
extern int dfa_lookahead;
extern void ready_sort (struct ready_list *);
-extern rtx ready_element (struct ready_list *, int);
-extern rtx *ready_lastpos (struct ready_list *);
+extern rtx_insn *ready_element (struct ready_list *, int);
+extern rtx_insn **ready_lastpos (struct ready_list *);
-extern int try_ready (rtx);
+extern int try_ready (rtx_insn *);
extern void sched_extend_ready_list (int);
extern void sched_finish_ready_list (void);
extern void sched_change_pattern (rtx, rtx);
extern void add_block (basic_block, basic_block);
extern rtx_note *bb_note (basic_block);
extern void concat_note_lists (rtx, rtx *);
-extern rtx sched_emit_insn (rtx);
+extern rtx_insn *sched_emit_insn (rtx);
extern rtx get_ready_element (int);
extern int number_in_ready (void);
\f
extern bool sched_no_dce;
extern void set_modulo_params (int, int, int, int);
-extern void record_delay_slot_pair (rtx, rtx, int, int);
+extern void record_delay_slot_pair (rtx_insn *, rtx_insn *, int, int);
extern rtx real_insn_for_shadow (rtx);
extern void discard_delay_pairs_above (int);
extern void free_delay_pairs (void);
-extern void add_delay_dependencies (rtx);
+extern void add_delay_dependencies (rtx_insn *);
extern bool sched_is_disabled_for_current_region_p (void);
extern void sched_rgn_init (bool);
extern void sched_rgn_finish (void);
extern void debug_rgn_dependencies (int);
extern void debug_dependencies (rtx, rtx);
extern void free_rgn_deps (void);
-extern int contributes_to_priority (rtx, rtx);
+extern int contributes_to_priority (rtx_insn *, rtx_insn *);
extern void extend_rgns (int *, int *, sbitmap, int *);
extern void deps_join (struct deps_desc *, struct deps_desc *);
extern void haifa_sched_init (void);
extern void haifa_sched_finish (void);
-extern void find_modifiable_mems (rtx, rtx);
+extern void find_modifiable_mems (rtx_insn *, rtx_insn *);
/* sched-deps.c interface to walk, add, search, update, resolve, delete
and debug instruction dependencies. */
static bool sets_likely_spilled (rtx);
static void sets_likely_spilled_1 (rtx, const_rtx, void *);
-static void add_branch_dependences (rtx, rtx);
+static void add_branch_dependences (rtx_insn *, rtx_insn *);
static void compute_block_dependences (int);
static void schedule_region (int);
if (MAY_HAVE_DEBUG_INSNS)
{
- rtx insn;
+ rtx_insn *insn;
FOR_BB_INSNS (bb, insn)
if (DEBUG_INSN_P (insn))
ready-list or before the scheduling. */
static int
-check_live (rtx insn, int src)
+check_live (rtx_insn *insn, int src)
{
/* Find the registers set by instruction. */
if (GET_CODE (PATTERN (insn)) == SET
/* Implementations of the sched_info functions for region scheduling. */
static void init_ready_list (void);
-static int can_schedule_ready_p (rtx);
-static void begin_schedule_ready (rtx);
-static ds_t new_ready (rtx, ds_t);
+static int can_schedule_ready_p (rtx_insn *);
+static void begin_schedule_ready (rtx_insn *);
+static ds_t new_ready (rtx_insn *, ds_t);
static int schedule_more_p (void);
-static const char *rgn_print_insn (const_rtx, int);
-static int rgn_rank (rtx, rtx);
+static const char *rgn_print_insn (const rtx_insn *, int);
+static int rgn_rank (rtx_insn *, rtx_insn *);
static void compute_jump_reg_dependencies (rtx, regset);
/* Functions for speculative scheduling. */
-static void rgn_add_remove_insn (rtx, int);
+static void rgn_add_remove_insn (rtx_insn *, int);
static void rgn_add_block (basic_block, basic_block);
static void rgn_fix_recovery_cfg (int, int, int);
-static basic_block advance_target_bb (basic_block, rtx);
+static basic_block advance_target_bb (basic_block, rtx_insn *);
/* Return nonzero if there are more insns that should be scheduled. */
rtx prev_head = current_sched_info->prev_head;
rtx next_tail = current_sched_info->next_tail;
int bb_src;
- rtx insn;
+ rtx_insn *insn;
target_n_insns = 0;
sched_target_n_insns = 0;
insn can be scheduled, nonzero if we should silently discard it. */
static int
-can_schedule_ready_p (rtx insn)
+can_schedule_ready_p (rtx_insn *insn)
{
/* An interblock motion? */
if (INSN_BB (insn) != target_bb
can_schedule_ready_p () differs from the one passed to
begin_schedule_ready (). */
static void
-begin_schedule_ready (rtx insn)
+begin_schedule_ready (rtx_insn *insn)
{
/* An interblock motion? */
if (INSN_BB (insn) != target_bb)
Return nonzero if it should be moved to the ready list or the queue, or zero
if we should silently discard it. */
static ds_t
-new_ready (rtx next, ds_t ts)
+new_ready (rtx_insn *next, ds_t ts)
{
if (INSN_BB (next) != target_bb)
{
to be formatted so that multiple output lines will line up nicely. */
static const char *
-rgn_print_insn (const_rtx insn, int aligned)
+rgn_print_insn (const rtx_insn *insn, int aligned)
{
static char tmp[80];
is to be preferred. Zero if they are equally good. */
static int
-rgn_rank (rtx insn1, rtx insn2)
+rgn_rank (rtx_insn *insn1, rtx_insn *insn2)
{
/* Some comparison make sense in interblock scheduling only. */
if (INSN_BB (insn1) != INSN_BB (insn2))
calculations. */
int
-contributes_to_priority (rtx next, rtx insn)
+contributes_to_priority (rtx_insn *next, rtx_insn *insn)
{
/* NEXT and INSN reside in one ebb. */
return BLOCK_TO_BB (BLOCK_NUM (next)) == BLOCK_TO_BB (BLOCK_NUM (insn));
/* Return true if scheduling INSN will trigger finish of scheduling
current block. */
static bool
-rgn_insn_finishes_block_p (rtx insn)
+rgn_insn_finishes_block_p (rtx_insn *insn)
{
if (INSN_BB (insn) == target_bb
&& sched_target_n_insns + 1 == target_n_insns)
/* Add dependences so that branches are scheduled to run last in their
block. */
static void
-add_branch_dependences (rtx head, rtx tail)
+add_branch_dependences (rtx_insn *head, rtx_insn *tail)
{
- rtx insn, last;
+ rtx_insn *insn, *last;
/* For all branches, calls, uses, clobbers, cc0 setters, and instructions
that can throw exceptions, force them to remain in order at the end of
/* INSN has been added to/removed from current region. */
static void
-rgn_add_remove_insn (rtx insn, int remove_p)
+rgn_add_remove_insn (rtx_insn *insn, int remove_p)
{
if (!remove_p)
rgn_n_insns++;
/* Return next block in ebb chain. For parameter meaning please refer to
sched-int.h: struct sched_info: advance_target_bb. */
static basic_block
-advance_target_bb (basic_block bb, rtx insn)
+advance_target_bb (basic_block bb, rtx_insn *insn)
{
if (insn)
return 0;
/* Pretty print INSN. This is used as a hook. */
const char *
-sel_print_insn (const_rtx insn, int aligned ATTRIBUTE_UNUSED)
+sel_print_insn (const rtx_insn *insn, int aligned ATTRIBUTE_UNUSED)
{
static char buf[80];
/* Functions from sel-sched-dump.c. */
extern void sel_print (const char *fmt, ...) ATTRIBUTE_PRINTF_1;
-extern const char * sel_print_insn (const_rtx, int);
+extern const char * sel_print_insn (const rtx_insn *, int);
extern void free_sel_dump_data (void);
extern void block_start (void);
advance_deps_context (deps_t dc, insn_t insn)
{
sched_deps_info = &advance_deps_context_sched_deps_info;
- deps_analyze_insn (dc, insn);
+ deps_analyze_insn (dc, as_a <rtx_insn *> (insn));
}
\f
sched_deps_info = &deps_init_id_sched_deps_info;
- deps_analyze_insn (dc, insn);
+ deps_analyze_insn (dc, as_a <rtx_insn *> (insn));
free_deps (dc);
insn_t insn = VINSN_INSN_RTX (vi);
ready_try[n] = 0;
- ready.vec[n] = insn;
+ ready.vec[n] = as_a <rtx_insn *> (insn);
}
}
if (issue_more && ran_hook)
{
int i, j, n;
- rtx *arr = ready.vec;
+ rtx_insn **arr = ready.vec;
expr_t *vec = vec_av_set.address ();
for (i = 0, n = ready.n_ready; i < n; i++)
the number of ready insns. The return value is the number of insns that\n\
can issue this cycle; normally this is just @code{issue_rate}. See also\n\
@samp{TARGET_SCHED_REORDER2}.",
- int, (FILE *file, int verbose, rtx *ready, int *n_readyp, int clock), NULL)
+ int, (FILE *file, int verbose, rtx_insn **ready, int *n_readyp, int clock), NULL)
DEFHOOK
(reorder2,
this hook can be useful if there are frequent situations where\n\
scheduling one insn causes other insns to become ready in the same\n\
cycle. These other insns can then be taken into account properly.",
- int, (FILE *file, int verbose, rtx *ready, int *n_readyp, int clock), NULL)
+ int, (FILE *file, int verbose, rtx_insn **ready, int *n_readyp, int clock), NULL)
DEFHOOK
(macro_fusion_p,
analysis of dependencies. This hook can use backward and forward\n\
dependencies of the insn scheduler because they are already\n\
calculated.",
- void, (rtx head, rtx tail), NULL)
+ void, (rtx_insn *head, rtx_insn *tail), NULL)
/* The values of the following four members are pointers to functions
used to simplify the automaton descriptions. dfa_pre_cycle_insn and