+2014-08-26 David Malcolm <dmalcolm@redhat.com>
+
+ * rtx-classes-status.txt (TODO): Remove SET_BND_TO.
+
2014-08-25 David Malcolm <dmalcolm@redhat.com>
* rtx-classes-status.txt (TODO): Remove SET_BB_NOTE_LIST.
+2014-08-26 David Malcolm <dmalcolm@redhat.com>
+
+ * sel-sched-ir.h (insn_t): Strengthen from rtx to rtx_insn *.
+ (BND_TO): Delete this function and...
+ (SET_BND_TO): ...this functions in favor of...
+ (BND_TO): ...reinstating this macro.
+ (struct _fence): Strengthen field "executing_insns" from
+ vec<rtx, va_gc> * to vec<rtx_insn *, va_gc> *. Strengthen fields
+ "last_scheduled_insn" and "sched_next" from rtx to rtx_insn *.
+ (_succ_iter_cond): Update param "succp" from rtx * to insn_t *
+ and param "insn" from rtx to insn_t.
+ (create_vinsn_from_insn_rtx): Strengthen first param from rtx to
+ rtx_insn *.
+
+ * sched-int.h (insn_vec_t): Strengthen from vec<rtx> to
+ vec<rtx_insn *> .
+ (rtx_vec_t): Likewise.
+ (struct sched_deps_info_def): Strengthen param of "start_insn"
+ callback from rtx to rtx_insn *. Likewise for param "insn2" of
+ "note_mem_dep" callback and first param of "note_dep" callback.
+
+ * haifa-sched.c (add_to_speculative_block): Strengthen param
+ "insn" from rtx to rtx_insn *.
+ (clear_priorities): Likewise.
+ (calc_priorities): Likewise for local "insn".
+
+ * sched-deps.c (haifa_start_insn): Likewise for param "insn".
+ Remove redundant checked cast.
+ (haifa_note_mem_dep): Likewise for param "pending_insn".
+ (haifa_note_dep): Likewise for param "elem".
+ (note_mem_dep): Likewise for param "e".
+ (sched_analyze_1): Add checked casts.
+ (sched_analyze_2): Likewise.
+
+ * sel-sched-dump.c (dump_insn_vector): Strengthen local "succ"
+ from rtx to rtx_insn *.
+ (debug): Update param from vec<rtx> & to vec<rtx_insn *>, and
+ from vec<rtx> * to vec<rtx_insn *> *.
+
+ * sel-sched-ir.c (blist_add): Remove use of SET_BND_TO
+ scaffolding.
+ (flist_add): Strengthen param "executing_insns" from
+ vec<rtx, va_gc> * to vec<rtx_insn *, va_gc> *.
+ (advance_deps_context): Remove now-redundant checked cast.
+ (init_fences): Replace uses of NULL_RTX with NULL.
+ (merge_fences): Strengthen params "last_scheduled_insn" and
+ "sched_next" from rtx to rtx_insn * and "executing_insns" from
+ vec<rtx, va_gc> * to vec<rtx_insn *, va_gc> *.
+ (add_clean_fence_to_fences): Replace uses of NULL_RTX with NULL.
+ (get_nop_from_pool): Add local "nop_pat" so that "nop" can be
+ an instruction, rather than doing double-duty as a pattern.
+ (return_nop_to_pool): Update for change of insn_t.
+ (deps_init_id): Remove now-redundant checked cast.
+ (struct sched_scan_info_def): Strengthen param of "init_insn"
+ callback from rtx to insn_t.
+ (sched_scan): Strengthen local "insn" from rtx to rtx_insn *.
+ (init_global_and_expr_for_insn): Replace uses of NULL_RTX with
+ NULL.
+ (get_seqno_by_succs): Strengthen param "insn" and locals "tmp",
+ "end" from rtx to rtx_insn *.
+ (create_vinsn_from_insn_rtx): Likewise for param "insn_rtx".
+ (rtx insn_rtx, bool force_unique_p)
+ (BND_TO): Delete function.
+ (SET_BND_TO): Delete function.
+
+ * sel-sched.c (advance_one_cycle): Strengthen local "insn" from
+ rtx to rtx_insn *.
+ (extract_new_fences_from): Replace uses of NULL_RTX with NULL.
+ (replace_dest_with_reg_in_expr): Strengthen local "insn_rtx" from
+ rtx to rtx_insn *.
+ (undo_transformations): Likewise for param "insn".
+ (update_liveness_on_insn): Likewise.
+ (compute_live_below_insn): Likewise for param "insn" and local
+ "succ".
+ (update_data_sets): Likewise for param "insn".
+ (fill_vec_av_set): Replace uses of NULL_RTX with NULL.
+ (convert_vec_av_set_to_ready): Drop now-redundant checked cast.
+ (invoke_aftermath_hooks): Strengthen param "best_insn" from rtx to
+ rtx_insn *.
+ (move_cond_jump): Likewise for param "insn".
+ (move_cond_jump): Drop use of SET_BND_TO.
+ (compute_av_set_on_boundaries): Likewise.
+ (update_fence_and_insn): Replace uses of NULL_RTX with NULL.
+ (update_and_record_unavailable_insns): Strengthen local "bb_end"
+ from rtx to rtx_insn *.
+ (maybe_emit_renaming_copy): Likewise for param "insn".
+ (maybe_emit_speculative_check): Likewise.
+ (handle_emitting_transformations): Likewise.
+ (remove_insn_from_stream): Likewise.
+ (code_motion_process_successors): Strengthen local "succ" from rtx
+ to insn_t.
+
2014-08-26 David Malcolm <dmalcolm@redhat.com>
* sel-sched-ir.h (ilist_t): Redefine this typedef in terms of
static void generate_recovery_code (rtx_insn *);
static void process_insn_forw_deps_be_in_spec (rtx, rtx, ds_t);
static void begin_speculative_block (rtx_insn *);
-static void add_to_speculative_block (rtx);
+static void add_to_speculative_block (rtx_insn *);
static void init_before_recovery (basic_block *);
static void create_check_block_twin (rtx_insn *, bool);
static void fix_recovery_deps (basic_block);
static void move_block_after_check (rtx);
static void move_succs (vec<edge, va_gc> **, basic_block);
static void sched_remove_insn (rtx_insn *);
-static void clear_priorities (rtx, rtx_vec_t *);
+static void clear_priorities (rtx_insn *, rtx_vec_t *);
static void calc_priorities (rtx_vec_t);
static void add_jump_dependencies (rtx, rtx);
/* Generates recovery code for BE_IN speculative INSN. */
static void
-add_to_speculative_block (rtx insn)
+add_to_speculative_block (rtx_insn *insn)
{
ds_t ts;
sd_iterator_def sd_it;
Store in vector pointed to by ROOTS_PTR insns on which priority () should
be invoked to initialize all cleared priorities. */
static void
-clear_priorities (rtx insn, rtx_vec_t *roots_ptr)
+clear_priorities (rtx_insn *insn, rtx_vec_t *roots_ptr)
{
sd_iterator_def sd_it;
dep_t dep;
calc_priorities (rtx_vec_t roots)
{
int i;
- rtx insn;
+ rtx_insn *insn;
FOR_EACH_VEC_ELT (roots, i, insn)
priority (insn);
/* Implement hooks for haifa scheduler. */
static void
-haifa_start_insn (rtx insn)
+haifa_start_insn (rtx_insn *insn)
{
gcc_assert (insn && !cur_insn);
- cur_insn = as_a <rtx_insn *> (insn);
+ cur_insn = insn;
}
static void
}
static void
-haifa_note_mem_dep (rtx mem, rtx pending_mem, rtx pending_insn, ds_t ds)
+haifa_note_mem_dep (rtx mem, rtx pending_mem, rtx_insn *pending_insn, ds_t ds)
{
if (!(ds & SPECULATIVE))
{
}
static void
-haifa_note_dep (rtx elem, ds_t ds)
+haifa_note_dep (rtx_insn *elem, ds_t ds)
{
dep_def _dep;
dep_t dep = &_dep;
}
static void
-note_mem_dep (rtx m1, rtx m2, rtx e, ds_t ds)
+note_mem_dep (rtx m1, rtx m2, rtx_insn *e, ds_t ds)
{
if (sched_deps_info->note_mem_dep)
sched_deps_info->note_mem_dep (m1, m2, e, ds);
{
if (anti_dependence (XEXP (pending_mem, 0), t)
&& ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
- note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
+ note_mem_dep (t, XEXP (pending_mem, 0), as_a <rtx_insn *> (XEXP (pending, 0)),
DEP_ANTI);
pending = XEXP (pending, 1);
{
if (output_dependence (XEXP (pending_mem, 0), t)
&& ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
- note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
+ note_mem_dep (t, XEXP (pending_mem, 0),
+ as_a <rtx_insn *> (XEXP (pending, 0)),
DEP_OUTPUT);
pending = XEXP (pending, 1);
if (read_dependence (XEXP (pending_mem, 0), t)
&& ! sched_insns_conditions_mutex_p (insn,
XEXP (pending, 0)))
- note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
+ note_mem_dep (t, XEXP (pending_mem, 0),
+ as_a <rtx_insn *> (XEXP (pending, 0)),
DEP_ANTI);
pending = XEXP (pending, 1);
if (true_dependence (XEXP (pending_mem, 0), VOIDmode, t)
&& ! sched_insns_conditions_mutex_p (insn,
XEXP (pending, 0)))
- note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
+ note_mem_dep (t, XEXP (pending_mem, 0),
+ as_a <rtx_insn *> (XEXP (pending, 0)),
sched_deps_info->generate_spec_deps
? BEGIN_DATA | DEP_TRUE : DEP_TRUE);
};
typedef vec<basic_block> bb_vec_t;
-typedef vec<rtx> insn_vec_t;
-typedef vec<rtx> rtx_vec_t;
+typedef vec<rtx_insn *> insn_vec_t;
+typedef vec<rtx_insn *> rtx_vec_t;
extern void sched_init_bbs (void);
void (*compute_jump_reg_dependencies) (rtx, regset);
/* Start analyzing insn. */
- void (*start_insn) (rtx);
+ void (*start_insn) (rtx_insn *);
/* Finish analyzing insn. */
void (*finish_insn) (void);
/* Note memory dependence of type DS between MEM1 and MEM2 (which is
in the INSN2). */
- void (*note_mem_dep) (rtx mem1, rtx mem2, rtx insn2, ds_t ds);
+ void (*note_mem_dep) (rtx mem1, rtx mem2, rtx_insn *insn2, ds_t ds);
/* Note a dependence of type DS from the INSN. */
- void (*note_dep) (rtx, ds_t ds);
+ void (*note_dep) (rtx_insn *, ds_t ds);
/* Nonzero if we should use cselib for better alias analysis. This
must be 0 if the dependency information is used after sched_analyze
dump_insn_vector (rtx_vec_t succs)
{
int i;
- rtx succ;
+ rtx_insn *succ;
FOR_EACH_VEC_ELT (succs, i, succ)
if (succ)
/* Dump a rtx vector REF. */
DEBUG_FUNCTION void
-debug (vec<rtx> &ref)
+debug (vec<rtx_insn *> &ref)
{
switch_dump (stderr);
dump_insn_vector (ref);
}
DEBUG_FUNCTION void
-debug (vec<rtx> *ptr)
+debug (vec<rtx_insn *> *ptr)
{
if (ptr)
debug (*ptr);
_list_add (lp);
bnd = BLIST_BND (*lp);
- SET_BND_TO (bnd) = to;
+ BND_TO (bnd) = to;
BND_PTR (bnd) = ptr;
BND_AV (bnd) = NULL;
BND_AV1 (bnd) = NULL;
/* Add new fence consisting of INSN and STATE to the list pointed to by LP. */
static void
flist_add (flist_t *lp, insn_t insn, state_t state, deps_t dc, void *tc,
- insn_t last_scheduled_insn, vec<rtx, va_gc> *executing_insns,
+ insn_t last_scheduled_insn, vec<rtx_insn *, va_gc> *executing_insns,
int *ready_ticks, int ready_ticks_size, insn_t sched_next,
int cycle, int cycle_issued_insns, int issue_more,
bool starts_cycle_p, bool after_stall_p)
advance_deps_context (deps_t dc, insn_t insn)
{
sched_deps_info = &advance_deps_context_sched_deps_info;
- deps_analyze_insn (dc, as_a <rtx_insn *> (insn));
+ deps_analyze_insn (dc, insn);
}
\f
state_create (),
create_deps_context () /* dc */,
create_target_context (true) /* tc */,
- NULL_RTX /* last_scheduled_insn */,
+ NULL /* last_scheduled_insn */,
NULL, /* executing_insns */
XCNEWVEC (int, ready_ticks_size), /* ready_ticks */
ready_ticks_size,
- NULL_RTX /* sched_next */,
+ NULL /* sched_next */,
1 /* cycle */, 0 /* cycle_issued_insns */,
issue_rate, /* issue_more */
1 /* starts_cycle_p */, 0 /* after_stall_p */);
static void
merge_fences (fence_t f, insn_t insn,
state_t state, deps_t dc, void *tc,
- rtx last_scheduled_insn, vec<rtx, va_gc> *executing_insns,
+ rtx_insn *last_scheduled_insn,
+ vec<rtx_insn *, va_gc> *executing_insns,
int *ready_ticks, int ready_ticks_size,
rtx sched_next, int cycle, int issue_more, bool after_stall_p)
{
other parameters. */
static void
add_to_fences (flist_tail_t new_fences, insn_t insn,
- state_t state, deps_t dc, void *tc, rtx last_scheduled_insn,
- vec<rtx, va_gc> *executing_insns, int *ready_ticks,
- int ready_ticks_size, rtx sched_next, int cycle,
+ state_t state, deps_t dc, void *tc,
+ rtx_insn *last_scheduled_insn,
+ vec<rtx_insn *, va_gc> *executing_insns, int *ready_ticks,
+ int ready_ticks_size, rtx_insn *sched_next, int cycle,
int cycle_issued_insns, int issue_rate,
bool starts_cycle_p, bool after_stall_p)
{
add_to_fences (new_fences,
succ, state_create (), create_deps_context (),
create_target_context (true),
- NULL_RTX, NULL,
+ NULL, NULL,
XCNEWVEC (int, ready_ticks_size), ready_ticks_size,
- NULL_RTX, FENCE_CYCLE (fence) + 1,
+ NULL, FENCE_CYCLE (fence) + 1,
0, issue_rate, 1, FENCE_AFTER_STALL_P (fence));
}
insn_t
get_nop_from_pool (insn_t insn)
{
+ rtx nop_pat;
insn_t nop;
bool old_p = nop_pool.n != 0;
int flags;
if (old_p)
- nop = nop_pool.v[--nop_pool.n];
+ nop_pat = nop_pool.v[--nop_pool.n];
else
- nop = nop_pattern;
+ nop_pat = nop_pattern;
- nop = emit_insn_before (nop, insn);
+ nop = emit_insn_before (nop_pat, insn);
if (old_p)
flags = INSN_INIT_TODO_SSID;
INSN_DELETED_P (nop) = 0;
if (nop_pool.n == nop_pool.s)
- nop_pool.v = XRESIZEVEC (rtx, nop_pool.v,
+ nop_pool.v = XRESIZEVEC (rtx_insn *, nop_pool.v,
(nop_pool.s = 2 * nop_pool.s + 1));
nop_pool.v[nop_pool.n++] = nop;
}
sched_deps_info = &deps_init_id_sched_deps_info;
- deps_analyze_insn (dc, as_a <rtx_insn *> (insn));
+ deps_analyze_insn (dc, insn);
free_deps (dc);
/* This hook makes scheduler frontend to initialize its internal data
structures for the passed insn. */
- void (*init_insn) (rtx);
+ void (*init_insn) (insn_t);
};
/* A driver function to add a set of basic blocks (BBS) to the
if (ssi->init_insn)
FOR_EACH_VEC_ELT (bbs, i, bb)
{
- rtx insn;
+ rtx_insn *insn;
FOR_BB_INSNS (bb, insn)
ssi->init_insn (insn);
if (NOTE_INSN_BASIC_BLOCK_P (insn))
{
- init_global_data.prev_insn = NULL_RTX;
+ init_global_data.prev_insn = NULL;
return;
}
init_global_data.prev_insn = insn;
}
else
- init_global_data.prev_insn = NULL_RTX;
+ init_global_data.prev_insn = NULL;
if (GET_CODE (PATTERN (insn)) == ASM_INPUT
|| asm_noperands (PATTERN (insn)) >= 0)
/* Find the proper seqno for inserting at INSN by successors.
Return -1 if no successors with positive seqno exist. */
static int
-get_seqno_by_succs (rtx insn)
+get_seqno_by_succs (rtx_insn *insn)
{
basic_block bb = BLOCK_FOR_INSN (insn);
- rtx tmp = insn, end = BB_END (bb);
+ rtx_insn *tmp = insn, *end = BB_END (bb);
int seqno;
insn_t succ = NULL;
succ_iterator si;
/* Create a new vinsn for INSN_RTX. FORCE_UNIQUE_P is true when the vinsn
must not be clonable. */
vinsn_t
-create_vinsn_from_insn_rtx (rtx insn_rtx, bool force_unique_p)
+create_vinsn_from_insn_rtx (rtx_insn *insn_rtx, bool force_unique_p)
{
gcc_assert (INSN_P (insn_rtx) && !INSN_IN_STREAM_P (insn_rtx));
return vi->insn_rtx;
}
-rtx_insn *BND_TO (bnd_t bnd)
-{
- return safe_as_a <rtx_insn *> (bnd->to);
-}
-
-insn_t& SET_BND_TO (bnd_t bnd)
-{
- return bnd->to;
-}
-
#endif
#define _XLIST_NEXT(L) (_LIST_NEXT (L))
/* Instruction. */
-typedef rtx insn_t;
+typedef rtx_insn *insn_t;
/* List of insns. */
typedef _list_t ilist_t;
deps_t dc;
};
typedef struct _bnd *bnd_t;
-extern rtx_insn *BND_TO (bnd_t bnd);
-extern insn_t& SET_BND_TO (bnd_t bnd);
+#define BND_TO(B) ((B)->to)
/* PTR stands not for pointer as you might think, but as a Path To Root of the
current instruction group from boundary B. */
tc_t tc;
/* A vector of insns that are scheduled but not yet completed. */
- vec<rtx, va_gc> *executing_insns;
+ vec<rtx_insn *, va_gc> *executing_insns;
/* A vector indexed by UIDs that caches the earliest cycle on which
an insn can be scheduled on this fence. */
int ready_ticks_size;
/* Insn, which has been scheduled last on this fence. */
- rtx last_scheduled_insn;
+ rtx_insn *last_scheduled_insn;
/* The last value of can_issue_more variable on this fence. */
int issue_more;
/* If non-NULL force the next scheduled insn to be SCHED_NEXT. */
- rtx sched_next;
+ rtx_insn *sched_next;
/* True if fill_insns processed this fence. */
BOOL_BITFIELD processed_p : 1;
}
static inline bool
-_succ_iter_cond (succ_iterator *ip, rtx *succp, rtx insn,
+_succ_iter_cond (succ_iterator *ip, insn_t *succp, insn_t insn,
bool check (edge, succ_iterator *))
{
if (!ip->bb_end)
/* Expression transformation routines. */
extern rtx_insn *create_insn_rtx_from_pattern (rtx, rtx);
-extern vinsn_t create_vinsn_from_insn_rtx (rtx, bool);
+extern vinsn_t create_vinsn_from_insn_rtx (rtx_insn *, bool);
extern rtx_insn *create_copy_of_insn_rtx (rtx);
extern void change_vinsn_in_expr (expr_t, vinsn_t);
{
unsigned i;
int cycle;
- rtx insn;
+ rtx_insn *insn;
advance_state (FENCE_STATE (fence));
cycle = ++FENCE_CYCLE (fence);
int orig_max_seqno)
{
bool was_here_p = false;
- insn_t insn = NULL_RTX;
+ insn_t insn = NULL;
insn_t succ;
succ_iterator si;
ilist_iterator ii;
static void
replace_dest_with_reg_in_expr (expr_t expr, rtx new_reg)
{
- rtx insn_rtx;
+ rtx_insn *insn_rtx;
vinsn_t vinsn;
insn_rtx = create_insn_rtx_with_lhs (EXPR_VINSN (expr), new_reg);
/* Undo all transformations on *AV_PTR that were done when
moving through INSN. */
static void
-undo_transformations (av_set_t *av_ptr, rtx insn)
+undo_transformations (av_set_t *av_ptr, rtx_insn *insn)
{
av_set_iterator av_iter;
expr_t expr;
/* Update liveness sets for INSN. */
static inline void
-update_liveness_on_insn (rtx insn)
+update_liveness_on_insn (rtx_insn *insn)
{
ignore_first = true;
compute_live (insn);
/* Compute liveness below INSN and write it into REGS. */
static inline void
-compute_live_below_insn (rtx insn, regset regs)
+compute_live_below_insn (rtx_insn *insn, regset regs)
{
- rtx succ;
+ rtx_insn *succ;
succ_iterator si;
FOR_EACH_SUCC_1 (succ, si, insn, SUCCS_ALL)
/* Update the data gathered in av and lv sets starting from INSN. */
static void
-update_data_sets (rtx insn)
+update_data_sets (rtx_insn *insn)
{
update_liveness_on_insn (insn);
if (sel_bb_head_p (insn))
if (FENCE_SCHED_NEXT (fence))
{
gcc_assert (sched_next_worked == 1);
- FENCE_SCHED_NEXT (fence) = NULL_RTX;
+ FENCE_SCHED_NEXT (fence) = NULL;
}
/* No need to stall if this variable was not initialized. */
insn_t insn = VINSN_INSN_RTX (vi);
ready_try[n] = 0;
- ready.vec[n] = as_a <rtx_insn *> (insn);
+ ready.vec[n] = insn;
}
}
number is ISSUE_MORE. FENCE and BEST_INSN are the current fence
and the insn chosen for scheduling, respectively. */
static int
-invoke_aftermath_hooks (fence_t fence, rtx best_insn, int issue_more)
+invoke_aftermath_hooks (fence_t fence, rtx_insn *best_insn, int issue_more)
{
gcc_assert (INSN_P (best_insn));
...
*/
static void
-move_cond_jump (rtx insn, bnd_t bnd)
+move_cond_jump (rtx_insn *insn, bnd_t bnd)
{
edge ft_edge;
basic_block block_from, block_next, block_new, block_bnd, bb;
/* Jump is moved to the boundary. */
next = PREV_INSN (insn);
- SET_BND_TO (bnd) = insn;
+ BND_TO (bnd) = insn;
ft_edge = find_fallthru_edge_from (block_from);
block_next = ft_edge->dest;
{
gcc_assert (FENCE_INSN (fence) == BND_TO (bnd));
FENCE_INSN (fence) = bnd_to;
- SET_BND_TO (bnd) = bnd_to;
+ BND_TO (bnd) = bnd_to;
}
av_set_clear (&BND_AV (bnd));
SCHED_GROUP_P (insn) = 0;
}
else
- FENCE_SCHED_NEXT (fence) = NULL_RTX;
+ FENCE_SCHED_NEXT (fence) = NULL;
if (INSN_UID (insn) < FENCE_READY_TICKS_SIZE (fence))
FENCE_READY_TICKS (fence) [INSN_UID (insn)] = 0;
av_set_iterator i;
av_set_t old_av_set = NULL;
expr_t cur_expr;
- rtx bb_end = sel_bb_end (book_block);
+ rtx_insn *bb_end = sel_bb_end (book_block);
/* First, get correct liveness in the bookkeeping block. The problem is
the range between the bookeeping insn and the end of block. */
/* Emit a register-register copy for INSN if needed. Return true if
emitted one. PARAMS is the move_op static parameters. */
static bool
-maybe_emit_renaming_copy (rtx insn,
+maybe_emit_renaming_copy (rtx_insn *insn,
moveop_static_params_p params)
{
bool insn_emitted = false;
Return true if we've emitted one. PARAMS is the move_op static
parameters. */
static bool
-maybe_emit_speculative_check (rtx insn, expr_t expr,
+maybe_emit_speculative_check (rtx_insn *insn, expr_t expr,
moveop_static_params_p params)
{
bool insn_emitted = false;
insn such as renaming/speculation. Return true if one of such
transformations actually happened, and we have emitted this insn. */
static bool
-handle_emitting_transformations (rtx insn, expr_t expr,
+handle_emitting_transformations (rtx_insn *insn, expr_t expr,
moveop_static_params_p params)
{
bool insn_emitted = false;
/* Remove INSN from stream. When ONLY_DISCONNECT is true, its data
is not removed but reused when INSN is re-emitted. */
static void
-remove_insn_from_stream (rtx insn, bool only_disconnect)
+remove_insn_from_stream (rtx_insn *insn, bool only_disconnect)
{
/* If there's only one insn in the BB, make sure that a nop is
inserted into it, so the basic block won't disappear when we'll
{
int res = 0;
succ_iterator succ_i;
- rtx succ;
+ insn_t succ;
basic_block bb;
int old_index;
unsigned old_succs;
=================================
* DF_REF_INSN
* SET_BB_HEAD, SET_BB_END, SET_BB_HEADER
-* SET_BND_TO
* SET_DEP_PRO, SET_DEP_CON
* SET_NEXT_INSN, SET_PREV_INSN
* SET_VINSN_INSN_RTX