+2015-05-02 Trevor Saunders <tbsaunde+gcc@tbsaunde.org>
+
+ * haifa-sched.c: Change the type of some variables to rtx_insn *.
+ * sched-deps.c: Likewise.
+ * sched-int.h: Likewise.
+ * sched-rgn.c: Likewise.
+ * sel-sched.c: Likewise.
+
2015-05-02 Trevor Saunders <tbsaunde+gcc@tbsaunde.org>
to rtx_insn *.
/* The following functions are used to implement multi-pass scheduling
on the first cycle. */
static rtx_insn *ready_remove (struct ready_list *, int);
-static void ready_remove_insn (rtx);
+static void ready_remove_insn (rtx_insn *);
static void fix_inter_tick (rtx_insn *, rtx_insn *);
static int fix_tick_ready (rtx_insn *);
static void init_h_i_d (rtx_insn *);
static int haifa_speculate_insn (rtx_insn *, ds_t, rtx *);
static void generate_recovery_code (rtx_insn *);
-static void process_insn_forw_deps_be_in_spec (rtx, rtx_insn *, ds_t);
+static void process_insn_forw_deps_be_in_spec (rtx_insn *, rtx_insn *, ds_t);
static void begin_speculative_block (rtx_insn *);
static void add_to_speculative_block (rtx_insn *);
static void init_before_recovery (basic_block *);
block, or the prev_head of the scheduling block. Used by
rank_for_schedule, so that insns independent of the last scheduled
insn will be preferred over dependent instructions. */
-static rtx last_nondebug_scheduled_insn;
+static rtx_insn *last_nondebug_scheduled_insn;
/* Pointer that iterates through the list of unscheduled insns if we
have a dbg_cnt enabled. It always points at an insn prior to the
/* Compute the number of nondebug deps in list LIST for INSN. */
static int
-dep_list_size (rtx insn, sd_list_types_def list)
+dep_list_size (rtx_insn *insn, sd_list_types_def list)
{
sd_iterator_def sd_it;
dep_t dep;
{
dep_t dep1;
dep_t dep2;
- rtx last = last_nondebug_scheduled_insn;
+ rtx_insn *last = last_nondebug_scheduled_insn;
/* Classify the instructions into three classes:
1) Data dependent on last schedule insn.
/* Remove INSN from the ready list. */
static void
-ready_remove_insn (rtx insn)
+ready_remove_insn (rtx_insn *insn)
{
int i;
only be scheduled once their control dependency is resolved. */
static void
-check_clobbered_conditions (rtx insn)
+check_clobbered_conditions (rtx_insn *insn)
{
HARD_REG_SET t;
int i;
state_t curr_state;
rtx_insn *last_scheduled_insn;
- rtx last_nondebug_scheduled_insn;
+ rtx_insn *last_nondebug_scheduled_insn;
rtx_insn *nonscheduled_insns_begin;
int cycle_issued_insns;
/* For every dependency of INSN, set the FEEDS_BACKTRACK_INSN bit according
to SET_P. */
static void
-mark_backtrack_feeds (rtx insn, int set_p)
+mark_backtrack_feeds (rtx_insn *insn, int set_p)
{
sd_iterator_def sd_it;
dep_t dep;
queued nowhere. */
static void
-unschedule_insns_until (rtx insn)
+unschedule_insns_until (rtx_insn *insn)
{
auto_vec<rtx_insn *> recompute_vec;
{
rtx_insn *insn;
rtx_insn_list *link;
- rtx skip_insn;
+ rtx_insn *skip_insn;
q_ptr = NEXT_Q (q_ptr);
nonscheduled insn. */
skip_insn = first_nonscheduled_insn ();
else
- skip_insn = NULL_RTX;
+ skip_insn = NULL;
/* Add all pending insns that can be scheduled without stalls to the
ready list. */
addition) depending on user flags and target hooks. */
static bool
-ok_for_early_queue_removal (rtx insn)
+ok_for_early_queue_removal (rtx_insn *insn)
{
if (targetm.sched.is_costly_dependence)
{
/* We start inserting insns after PREV_HEAD. */
last_scheduled_insn = prev_head;
- last_nondebug_scheduled_insn = NULL_RTX;
+ last_nondebug_scheduled_insn = NULL;
nonscheduled_insns_begin = NULL;
gcc_assert ((NOTE_P (last_scheduled_insn)
Tries to add speculative dependencies of type FS between instructions
in deps_list L and TWIN. */
static void
-process_insn_forw_deps_be_in_spec (rtx insn, rtx_insn *twin, ds_t fs)
+process_insn_forw_deps_be_in_spec (rtx_insn *insn, rtx_insn *twin, ds_t fs)
{
sd_iterator_def sd_it;
dep_t dep;
static void add_dependence_list_and_free (struct deps_desc *, rtx_insn *,
rtx_insn_list **, int, enum reg_note,
bool);
-static void delete_all_dependences (rtx);
+static void delete_all_dependences (rtx_insn *);
static void chain_to_prev_insn (rtx_insn *);
static void flush_pending_lists (struct deps_desc *, rtx_insn *, int, int);
occurrences removed. */
static int
-remove_from_dependence_list (rtx insn, rtx_insn_list **listp)
+remove_from_dependence_list (rtx_insn *insn, rtx_insn_list **listp)
{
int removed = 0;
/* Same as above, but process two lists at once. */
static int
-remove_from_both_dependence_lists (rtx insn,
+remove_from_both_dependence_lists (rtx_insn *insn,
rtx_insn_list **listp,
rtx_expr_list **exprp)
{
/* Clear all dependencies for an insn. */
static void
-delete_all_dependences (rtx insn)
+delete_all_dependences (rtx_insn *insn)
{
sd_iterator_def sd_it;
dep_t dep;
/* Set up reg pressure info related to INSN. */
void
-init_insn_reg_pressure_info (rtx insn)
+init_insn_reg_pressure_info (rtx_insn *insn)
{
int i, len;
enum reg_class cl;
/* FIXME: Why can't this function just use flags_from_decl_or_type and
test for ECF_NORETURN? */
static bool
-call_may_noreturn_p (rtx insn)
+call_may_noreturn_p (rtx_insn *insn)
{
rtx call;
instruction of that group. */
static bool
-chain_to_prev_insn_p (rtx insn)
+chain_to_prev_insn_p (rtx_insn *insn)
{
rtx prev, x;
/* Helper for sched_free_deps ().
Delete INSN's (RESOLVED_P) backward dependencies. */
static void
-delete_dep_nodes_in_back_deps (rtx insn, bool resolved_p)
+delete_dep_nodes_in_back_deps (rtx_insn *insn, bool resolved_p)
{
sd_iterator_def sd_it;
dep_t dep;
extern void finish_deps_global (void);
extern void deps_analyze_insn (struct deps_desc *, rtx_insn *);
extern void remove_from_deps (struct deps_desc *, rtx_insn *);
-extern void init_insn_reg_pressure_info (rtx);
+extern void init_insn_reg_pressure_info (rtx_insn *);
extern dw_t get_dep_weak (ds_t, ds_t);
extern ds_t set_dep_weak (ds_t, ds_t, dw_t);
static int check_live_1 (int, rtx);
static void update_live_1 (int, rtx);
static int is_pfree (rtx, int, int);
-static int find_conditional_protection (rtx, int);
+static int find_conditional_protection (rtx_insn *, int);
static int is_conditionally_protected (rtx, int, int);
static int is_prisky (rtx, int, int);
-static int is_exception_free (rtx, int, int);
+static int is_exception_free (rtx_insn *, int, int);
static bool sets_likely_spilled (rtx);
static void sets_likely_spilled_1 (rtx, const_rtx, void *);
block src to trg. */
static void
-update_live (rtx insn, int src)
+update_live (rtx_insn *insn, int src)
{
/* Find the registers set by instruction. */
if (GET_CODE (PATTERN (insn)) == SET
branch depending on insn, that guards the speculative load. */
static int
-find_conditional_protection (rtx insn, int load_insn_bb)
+find_conditional_protection (rtx_insn *insn, int load_insn_bb)
{
sd_iterator_def sd_it;
dep_t dep;
and 0 otherwise. */
static int
-is_exception_free (rtx insn, int bb_src, int bb_trg)
+is_exception_free (rtx_insn *insn, int bb_src, int bb_trg)
{
int insn_class = haifa_classify_insn (insn);
/* Returns true when SUCC in a fallthru bb of INSN, possibly
skipping empty basic blocks. */
static bool
-in_fallthru_bb_p (rtx insn, rtx succ)
+in_fallthru_bb_p (rtx_insn *insn, rtx succ)
{
basic_block bb = BLOCK_FOR_INSN (insn);
edge e;
/* True when INSN is a "regN = regN" copy. */
static bool
-identical_copy_p (rtx insn)
+identical_copy_p (rtx_insn *insn)
{
rtx lhs, rhs, pat;
/* Track bookkeeping copies created, insns scheduled, and blocks for
rescheduling when INSN is found by move_op. */
static void
-track_scheduled_insns_and_blocks (rtx insn)
+track_scheduled_insns_and_blocks (rtx_insn *insn)
{
/* Even if this insn can be a copy that will be removed during current move_op,
we still need to count it as an originator. */