+2014-08-27 David Malcolm <dmalcolm@redhat.com>
+
+ * rtl.h (free_INSN_LIST_list): Strengthen param from rtx * to
+ rtx_insn_list **.
+ (alloc_INSN_LIST): Strengthen return type from rtx to
+ rtx_insn_list *.
+ (copy_INSN_LIST): Likewise for return type and param.
+ (concat_INSN_LIST): Likewise for both params and return type.
+ (remove_free_INSN_LIST_elem): Strenghten first param from rtx to
+ rtx_insn *. Strengthen second param from rtx * to rtx_insn_list **.
+ (remove_free_INSN_LIST_node): Strenghten return type from rtx to
+ rtx_insn *. Strengthen param from rtx * to rtx_insn_list **.
+
+ * sched-int.h (struct deps_reg): Strengthen fields "uses", "sets",
+ "implicit_sets", "control_uses", "clobbers" from rtx to
+ rtx_insn_list *.
+ (struct deps_desc): Likewise for fields "pending_read_insns",
+ "pending_write_insns", "pending_jump_insns",
+ "last_pending_memory_flush", "last_function_call",
+ "last_function_call_may_noreturn", "sched_before_next_call",
+ "sched_before_next_jump".
+ (struct _haifa_deps_insn_data): Likewise for field "cond_deps".
+ (remove_from_deps): Strengthen second param from rtx to rtx_insn *.
+
+ * gcse.c (struct ls_expr): Strengthen fields "loads" and "stores"
+ from rtx to rtx_insn_list *.
+ (ldst_entry): Replace use of NULL_RTX with NULL when dealing with
+ rtx_insn_list *.
+
+ * haifa-sched.c (insn_queue): Strengthen this variable from rtx *
+ to rtx_insn_list **.
+ (dep_cost_1): Strengthen local "dep_cost_rtx_link" from rtx to
+ rtx_insn_list *.
+ (queue_insn): Likewise for local "link".
+ (struct haifa_saved_data): Strengthen field "insn_queue" from
+ rtx * to rtx_insn_list **.
+ (save_backtrack_point): Update allocation of save->insn_queue to
+ reflect the strengthening of elements from rtx to rtx_insn_list *.
+ (queue_to_ready): Strengthen local "link" from rtx to
+ rtx_insn_list *; use methods "next" and "insn" when traversing the
+ list.
+ (early_queue_to_ready): Likewise for locals "link", "next_link",
+ "prev_link".
+ (schedule_block): Update allocation of insn_queue to reflect the
+ strengthening of elements from rtx to rtx_insn_list *. Strengthen
+ local "link" from rtx to rtx_insn_list *, and use methods when
+ working it.
+ (add_to_speculative_block): Strengthen locals "twins" and
+ "next_node" from rtx to rtx_insn_list *, and use methods when
+ working with them. Strengthen local "twin" from rtx to
+ rtx_insn *, eliminating a checked cast.
+ (fix_recovery_deps): Strengthen locals "ready_list" and "link"
+ from rtx to rtx_insn_list *, and use methods when working with
+ them.
+
+ * lists.c (alloc_INSN_LIST): Strengthen return type and local "r"
+ from rtx to rtx_insn_list *, adding a checked cast.
+ (free_INSN_LIST_list): Strengthen param "listp" from rtx * to
+ rtx_insn_list **.
+ (copy_INSN_LIST): Strengthen return type and locals "new_queue",
+ "newlink" from rtx to rtx_insn_list *. Strengthen local
+ "pqueue" from rtx * to rtx_insn_list **. Strengthen local "x"
+ from rtx to rtx_insn *.
+ (concat_INSN_LIST): Strengthen return type and local "new_rtx",
+ from rtx to rtx_insn_list *. Use methods of the latter class.
+ (remove_free_INSN_LIST_elem): Strengthen param "elem" from rtx to
+ rtx_insn *, and param "listp" from rtx * to rtx_insn_list **.
+ (remove_free_INSN_LIST_node): Strengthen return type and local
+ "elem" from rtx to rtx_insn *. Strenghten param "listp" from
+ rtx * to rtx_insn_list **. Strengthen local "node" from rtx to
+ rtx_insn_list *, using "insn" method.
+
+ * sched-deps.c (add_dependence_list): Strengthen param "list"
+ from rtx to rtx_insn_list *, and use methods when working with it.
+ (add_dependence_list_and_free): Strengthen param "listp" from
+ rtx * to rtx_insn_list **.
+ (remove_from_dependence_list): Strenghten param "listp" from rtx *
+ to rtx_insn_list **, and use methods when working with *listp.
+ (remove_from_both_dependence_lists): Strengthen param "listp" from
+ rtx * to rtx_insn_list **
+ (add_insn_mem_dependence): Strengthen local "insn_list" from rtx *
+ to rtx_insn_list **. Eliminate local "link", in favor of two new
+ locals "insn_node" and "mem_node", an rtx_insn_list * and an rtx
+ respectively.
+ (deps_analyze_insn): Split out uses 'f local "t" as an INSN_LIST
+ by introducing local "cond_deps".
+ (remove_from_deps): Strengthen param "insn" from rtx to
+ rtx_insn *.
+
+ * sched-rgn.c (concat_insn_mem_list): Strengthen param
+ "copy_insns" and local "new_insns" from rtx to rtx_insn_list *.
+ Strengthen param "old_insns_p" from rtx * to rtx_insn_list **.
+ Use methods of rtx_insn_list.
+
+ * store-motion.c (struct st_expr): Strengthen fields
+ "antic_stores" and "avail_stores" from rtx to rtx_insn_list *.
+ (st_expr_entry): Replace NULL_RTX with NULL when dealing with
+ rtx_insn_list *.
+ (find_moveable_store): Split out "tmp" into multiple more-tightly
+ scoped locals. Use methods of rtx_insn_list *.
+ (compute_store_table): Strengthen local "tmp" from rtx to
+ rtx_insn *. Use methods of rtx_insn_list *.
+
2014-08-27 David Malcolm <dmalcolm@redhat.com>
* coretypes.h (class rtx_insn_list): Add forward declaration.
struct expr * expr; /* Gcse expression reference for LM. */
rtx pattern; /* Pattern of this mem. */
rtx pattern_regs; /* List of registers mentioned by the mem. */
- rtx loads; /* INSN list of loads seen. */
- rtx stores; /* INSN list of stores seen. */
+ rtx_insn_list *loads; /* INSN list of loads seen. */
+ rtx_insn_list *stores; /* INSN list of stores seen. */
struct ls_expr * next; /* Next in the list. */
int invalid; /* Invalid for some reason. */
int index; /* If it maps to a bitmap index. */
ptr->expr = NULL;
ptr->pattern = x;
ptr->pattern_regs = NULL_RTX;
- ptr->loads = NULL_RTX;
- ptr->stores = NULL_RTX;
+ ptr->loads = NULL;
+ ptr->stores = NULL;
ptr->reaching_reg = NULL_RTX;
ptr->invalid = 0;
ptr->index = 0;
the base maximal time of functional unit reservations and getting a
result. This is the longest time an insn may be queued. */
-static rtx *insn_queue;
+static rtx_insn_list **insn_queue;
static int q_ptr = 0;
static int q_size = 0;
#define NEXT_Q(X) (((X)+1) & max_insn_queue_index)
{
/* This variable is used for backward compatibility with the
targets. */
- rtx dep_cost_rtx_link = alloc_INSN_LIST (NULL_RTX, NULL_RTX);
+ rtx_insn_list *dep_cost_rtx_link =
+ alloc_INSN_LIST (NULL_RTX, NULL);
/* Make it self-cycled, so that if some tries to walk over this
incomplete list he/she will be caught in an endless loop. */
queue_insn (rtx_insn *insn, int n_cycles, const char *reason)
{
int next_q = NEXT_Q_AFTER (q_ptr, n_cycles);
- rtx link = alloc_INSN_LIST (insn, insn_queue[next_q]);
+ rtx_insn_list *link = alloc_INSN_LIST (insn, insn_queue[next_q]);
int new_tick;
gcc_assert (n_cycles <= max_insn_queue_index);
/* We don't need to save q_ptr, as its value is arbitrary and we can set it
to 0 when restoring. */
int q_size;
- rtx *insn_queue;
+ rtx_insn_list **insn_queue;
/* Describe pattern replacements that occurred since this backtrack point
was queued. */
save->ready.vec = XNEWVEC (rtx_insn *, ready.veclen);
memcpy (save->ready.vec, ready.vec, ready.veclen * sizeof (rtx));
- save->insn_queue = XNEWVEC (rtx, max_insn_queue_index + 1);
+ save->insn_queue = XNEWVEC (rtx_insn_list *, max_insn_queue_index + 1);
save->q_size = q_size;
for (i = 0; i <= max_insn_queue_index; i++)
{
queue_to_ready (struct ready_list *ready)
{
rtx_insn *insn;
- rtx link;
+ rtx_insn_list *link;
rtx skip_insn;
q_ptr = NEXT_Q (q_ptr);
/* Add all pending insns that can be scheduled without stalls to the
ready list. */
- for (link = insn_queue[q_ptr]; link; link = XEXP (link, 1))
+ for (link = insn_queue[q_ptr]; link; link = link->next ())
{
- insn = as_a <rtx_insn *> (XEXP (link, 0));
+ insn = link->insn ();
q_size -= 1;
if (sched_verbose >= 2)
{
if ((link = insn_queue[NEXT_Q_AFTER (q_ptr, stalls)]))
{
- for (; link; link = XEXP (link, 1))
+ for (; link; link = link->next ())
{
insn = as_a <rtx_insn *> (XEXP (link, 0));
q_size -= 1;
early_queue_to_ready (state_t state, struct ready_list *ready)
{
rtx_insn *insn;
- rtx link;
- rtx next_link;
- rtx prev_link;
+ rtx_insn_list *link;
+ rtx_insn_list *next_link;
+ rtx_insn_list *prev_link;
bool move_to_ready;
int cost;
state_t temp_state = alloca (dfa_state_size);
prev_link = 0;
while (link)
{
- next_link = XEXP (link, 1);
- insn = as_a <rtx_insn *> (XEXP (link, 0));
+ next_link = link->next ();
+ insn = link->insn ();
if (insn && sched_verbose > 6)
print_rtl_single (sched_dump, insn);
q_ptr = 0;
q_size = 0;
- insn_queue = XALLOCAVEC (rtx, max_insn_queue_index + 1);
+ insn_queue = XALLOCAVEC (rtx_insn_list *, max_insn_queue_index + 1);
memset (insn_queue, 0, (max_insn_queue_index + 1) * sizeof (rtx));
/* Start just before the beginning of time. */
}
for (i = 0; i <= max_insn_queue_index; i++)
{
- rtx link;
+ rtx_insn_list *link;
while ((link = insn_queue[i]) != NULL)
{
- rtx_insn *x = as_a <rtx_insn *> (XEXP (link, 0));
- insn_queue[i] = XEXP (link, 1);
+ rtx_insn *x = link->insn ();
+ insn_queue[i] = link->next ();
QUEUE_INDEX (x) = QUEUE_NOWHERE;
free_INSN_LIST_node (link);
resolve_dependencies (x);
ds_t ts;
sd_iterator_def sd_it;
dep_t dep;
- rtx twins = NULL;
+ rtx_insn_list *twins = NULL;
rtx_vec_t priorities_roots;
ts = TODO_SPEC (insn);
because that would make TWINS appear in the INSN_BACK_DEPS (INSN). */
while (twins)
{
- rtx twin;
+ rtx_insn *twin;
+ rtx_insn_list *next_node;
- twin = XEXP (twins, 0);
+ twin = twins->insn ();
{
dep_def _new_dep, *new_dep = &_new_dep;
- init_dep (new_dep, insn, as_a <rtx_insn *> (twin), REG_DEP_OUTPUT);
+ init_dep (new_dep, insn, twin, REG_DEP_OUTPUT);
sd_add_dep (new_dep, false);
}
- twin = XEXP (twins, 1);
+ next_node = twins->next ();
free_INSN_LIST_node (twins);
- twins = twin;
+ twins = next_node;
}
calc_priorities (priorities_roots);
fix_recovery_deps (basic_block rec)
{
rtx_insn *note, *insn, *jump;
- rtx ready_list = 0;
+ rtx_insn_list *ready_list = 0;
bitmap_head in_ready;
- rtx link;
+ rtx_insn_list *link;
bitmap_initialize (&in_ready, 0);
bitmap_clear (&in_ready);
/* Try to add instructions to the ready or queue list. */
- for (link = ready_list; link; link = XEXP (link, 1))
- try_ready (as_a <rtx_insn *> (XEXP (link, 0)));
+ for (link = ready_list; link; link = link->next ())
+ try_ready (link->insn ());
free_INSN_LIST_list (&ready_list);
/* Fixing jump's dependences. */
/* This call is used in place of a gen_rtx_INSN_LIST. If there is a cached
node available, we'll use it, otherwise a call to gen_rtx_INSN_LIST
is made. */
-rtx
+rtx_insn_list *
alloc_INSN_LIST (rtx val, rtx next)
{
- rtx r;
+ rtx_insn_list *r;
if (unused_insn_list)
{
- r = unused_insn_list;
- unused_insn_list = XEXP (r, 1);
+ r = as_a <rtx_insn_list *> (unused_insn_list);
+ unused_insn_list = r->next ();
XEXP (r, 0) = val;
XEXP (r, 1) = next;
PUT_REG_NOTE_KIND (r, VOIDmode);
/* This function will free up an entire list of INSN_LIST nodes. */
void
-free_INSN_LIST_list (rtx *listp)
+free_INSN_LIST_list (rtx_insn_list **listp)
{
if (*listp == 0)
return;
- free_list (listp, &unused_insn_list);
+ free_list ((rtx *)listp, &unused_insn_list);
}
/* Make a copy of the INSN_LIST list LINK and return it. */
-rtx
-copy_INSN_LIST (rtx link)
+rtx_insn_list *
+copy_INSN_LIST (rtx_insn_list *link)
{
- rtx new_queue;
- rtx *pqueue = &new_queue;
+ rtx_insn_list *new_queue;
+ rtx_insn_list **pqueue = &new_queue;
- for (; link; link = XEXP (link, 1))
+ for (; link; link = link->next ())
{
- rtx x = XEXP (link, 0);
- rtx newlink = alloc_INSN_LIST (x, NULL);
+ rtx_insn *x = link->insn ();
+ rtx_insn_list *newlink = alloc_INSN_LIST (x, NULL);
*pqueue = newlink;
- pqueue = &XEXP (newlink, 1);
+ pqueue = (rtx_insn_list **)&XEXP (newlink, 1);
}
- *pqueue = NULL_RTX;
+ *pqueue = NULL;
return new_queue;
}
/* Duplicate the INSN_LIST elements of COPY and prepend them to OLD. */
-rtx
-concat_INSN_LIST (rtx copy, rtx old)
+rtx_insn_list *
+concat_INSN_LIST (rtx_insn_list *copy, rtx_insn_list *old)
{
- rtx new_rtx = old;
- for (; copy ; copy = XEXP (copy, 1))
+ rtx_insn_list *new_rtx = old;
+ for (; copy ; copy = copy->next ())
{
- new_rtx = alloc_INSN_LIST (XEXP (copy, 0), new_rtx);
+ new_rtx = alloc_INSN_LIST (copy->insn (), new_rtx);
PUT_REG_NOTE_KIND (new_rtx, REG_NOTE_KIND (copy));
}
return new_rtx;
/* Remove and free corresponding to ELEM node in the INSN_LIST pointed to
by LISTP. */
void
-remove_free_INSN_LIST_elem (rtx elem, rtx *listp)
+remove_free_INSN_LIST_elem (rtx_insn *elem, rtx_insn_list **listp)
{
- free_INSN_LIST_node (remove_list_elem (elem, listp));
+ free_INSN_LIST_node (remove_list_elem (elem, (rtx *)listp));
}
/* Remove and free the first node in the INSN_LIST pointed to by LISTP. */
-rtx
-remove_free_INSN_LIST_node (rtx *listp)
+rtx_insn *
+remove_free_INSN_LIST_node (rtx_insn_list **listp)
{
- rtx node = *listp;
- rtx elem = XEXP (node, 0);
+ rtx_insn_list *node = *listp;
+ rtx_insn *elem = node->insn ();
- remove_list_node (listp);
+ remove_list_node ((rtx *)listp);
free_INSN_LIST_node (node);
return elem;
/* lists.c */
extern void free_EXPR_LIST_list (rtx *);
-extern void free_INSN_LIST_list (rtx *);
+extern void free_INSN_LIST_list (rtx_insn_list **);
extern void free_EXPR_LIST_node (rtx);
extern void free_INSN_LIST_node (rtx);
-extern rtx alloc_INSN_LIST (rtx, rtx);
-extern rtx copy_INSN_LIST (rtx);
-extern rtx concat_INSN_LIST (rtx, rtx);
+extern rtx_insn_list *alloc_INSN_LIST (rtx, rtx);
+extern rtx_insn_list *copy_INSN_LIST (rtx_insn_list *);
+extern rtx_insn_list *concat_INSN_LIST (rtx_insn_list *, rtx_insn_list *);
extern rtx alloc_EXPR_LIST (int, rtx, rtx);
-extern void remove_free_INSN_LIST_elem (rtx, rtx *);
+extern void remove_free_INSN_LIST_elem (rtx_insn *, rtx_insn_list **);
extern rtx remove_list_elem (rtx, rtx *);
-extern rtx remove_free_INSN_LIST_node (rtx *);
+extern rtx_insn *remove_free_INSN_LIST_node (rtx_insn_list **);
extern rtx remove_free_EXPR_LIST_node (rtx *);
static int deps_may_trap_p (const_rtx);
static void add_dependence_1 (rtx_insn *, rtx_insn *, enum reg_note);
-static void add_dependence_list (rtx_insn *, rtx, int, enum reg_note, bool);
+static void add_dependence_list (rtx_insn *, rtx_insn_list *, int,
+ enum reg_note, bool);
static void add_dependence_list_and_free (struct deps_desc *, rtx_insn *,
- rtx *, int, enum reg_note, bool);
+ rtx_insn_list **, int, enum reg_note,
+ bool);
static void delete_all_dependences (rtx);
static void chain_to_prev_insn (rtx_insn *);
true if DEP_NONREG should be set on newly created dependencies. */
static void
-add_dependence_list (rtx_insn *insn, rtx list, int uncond, enum reg_note dep_type,
- bool hard)
+add_dependence_list (rtx_insn *insn, rtx_insn_list *list, int uncond,
+ enum reg_note dep_type, bool hard)
{
mark_as_hard = hard;
- for (; list; list = XEXP (list, 1))
+ for (; list; list = list->next ())
{
- if (uncond || ! sched_insns_conditions_mutex_p (insn, XEXP (list, 0)))
- add_dependence (insn, as_a <rtx_insn *> (XEXP (list, 0)), dep_type);
+ if (uncond || ! sched_insns_conditions_mutex_p (insn, list->insn ()))
+ add_dependence (insn, list->insn (), dep_type);
}
mark_as_hard = false;
}
newly created dependencies. */
static void
-add_dependence_list_and_free (struct deps_desc *deps, rtx_insn *insn, rtx *listp,
+add_dependence_list_and_free (struct deps_desc *deps, rtx_insn *insn,
+ rtx_insn_list **listp,
int uncond, enum reg_note dep_type, bool hard)
{
add_dependence_list (insn, *listp, uncond, dep_type, hard);
occurrences removed. */
static int
-remove_from_dependence_list (rtx insn, rtx* listp)
+remove_from_dependence_list (rtx insn, rtx_insn_list **listp)
{
int removed = 0;
while (*listp)
{
- if (XEXP (*listp, 0) == insn)
+ if ((*listp)->insn () == insn)
{
remove_free_INSN_LIST_node (listp);
removed++;
continue;
}
- listp = &XEXP (*listp, 1);
+ listp = (rtx_insn_list **)&XEXP (*listp, 1);
}
return removed;
/* Same as above, but process two lists at once. */
static int
-remove_from_both_dependence_lists (rtx insn, rtx *listp, rtx *exprp)
+remove_from_both_dependence_lists (rtx insn,
+ rtx_insn_list **listp,
+ rtx *exprp)
{
int removed = 0;
continue;
}
- listp = &XEXP (*listp, 1);
+ listp = (rtx_insn_list **)&XEXP (*listp, 1);
exprp = &XEXP (*exprp, 1);
}
add_insn_mem_dependence (struct deps_desc *deps, bool read_p,
rtx_insn *insn, rtx mem)
{
- rtx *insn_list;
+ rtx_insn_list **insn_list;
+ rtx_insn_list *insn_node;
rtx *mem_list;
- rtx link;
+ rtx mem_node;
gcc_assert (!deps->readonly);
if (read_p)
deps->pending_write_list_length++;
}
- link = alloc_INSN_LIST (insn, *insn_list);
- *insn_list = link;
+ insn_node = alloc_INSN_LIST (insn, *insn_list);
+ *insn_list = insn_node;
if (sched_deps_info->use_cselib)
{
XEXP (mem, 0) = cselib_subst_to_values_from_insn (XEXP (mem, 0),
GET_MODE (mem), insn);
}
- link = alloc_EXPR_LIST (VOIDmode, canon_rtx (mem), *mem_list);
- *mem_list = link;
+ mem_node = alloc_EXPR_LIST (VOIDmode, canon_rtx (mem), *mem_list);
+ *mem_list = mem_node;
}
/* Make a dependency between every memory reference on the pending lists
rtx t;
sched_get_condition_with_rev (insn, NULL);
t = INSN_CACHED_COND (insn);
- INSN_COND_DEPS (insn) = NULL_RTX;
+ INSN_COND_DEPS (insn) = NULL;
if (reload_completed
&& (current_sched_info->flags & DO_PREDICATION)
&& COMPARISON_P (t)
{
unsigned int regno;
int nregs;
+ rtx_insn_list *cond_deps = NULL;
t = XEXP (t, 0);
regno = REGNO (t);
nregs = hard_regno_nregs[regno][GET_MODE (t)];
- t = NULL_RTX;
while (nregs-- > 0)
{
struct deps_reg *reg_last = &deps->reg_last[regno + nregs];
- t = concat_INSN_LIST (reg_last->sets, t);
- t = concat_INSN_LIST (reg_last->clobbers, t);
- t = concat_INSN_LIST (reg_last->implicit_sets, t);
+ cond_deps = concat_INSN_LIST (reg_last->sets, cond_deps);
+ cond_deps = concat_INSN_LIST (reg_last->clobbers, cond_deps);
+ cond_deps = concat_INSN_LIST (reg_last->implicit_sets, cond_deps);
}
- INSN_COND_DEPS (insn) = t;
+ INSN_COND_DEPS (insn) = cond_deps;
}
}
/* Remove INSN from dependence contexts DEPS. */
void
-remove_from_deps (struct deps_desc *deps, rtx insn)
+remove_from_deps (struct deps_desc *deps, rtx_insn *insn)
{
int removed;
unsigned i;
/* Insns which affect pseudo-registers. */
struct deps_reg
{
- rtx uses;
- rtx sets;
- rtx implicit_sets;
- rtx control_uses;
- rtx clobbers;
+ rtx_insn_list *uses;
+ rtx_insn_list *sets;
+ rtx_insn_list *implicit_sets;
+ rtx_insn_list *control_uses;
+ rtx_insn_list *clobbers;
int uses_length;
int clobbers_length;
};
to a list more than once. */
/* An INSN_LIST containing all insns with pending read operations. */
- rtx pending_read_insns;
+ rtx_insn_list *pending_read_insns;
/* An EXPR_LIST containing all MEM rtx's which are pending reads. */
rtx pending_read_mems;
/* An INSN_LIST containing all insns with pending write operations. */
- rtx pending_write_insns;
+ rtx_insn_list *pending_write_insns;
/* An EXPR_LIST containing all MEM rtx's which are pending writes. */
rtx pending_write_mems;
/* An INSN_LIST containing all jump insns. */
- rtx pending_jump_insns;
+ rtx_insn_list *pending_jump_insns;
/* We must prevent the above lists from ever growing too large since
the number of dependencies produced is at least O(N*N),
alias analysis, this restriction can be relaxed.
This may also be an INSN that writes memory if the pending lists grow
too large. */
- rtx last_pending_memory_flush;
+ rtx_insn_list *last_pending_memory_flush;
/* A list of the last function calls we have seen. We use a list to
represent last function calls from multiple predecessor blocks.
Used to prevent register lifetimes from expanding unnecessarily. */
- rtx last_function_call;
+ rtx_insn_list *last_function_call;
/* A list of the last function calls that may not return normally
we have seen. We use a list to represent last function calls from
multiple predecessor blocks. Used to prevent moving trapping insns
across such calls. */
- rtx last_function_call_may_noreturn;
+ rtx_insn_list *last_function_call_may_noreturn;
/* A list of insns which use a pseudo register that does not already
cross a call. We create dependencies between each of those insn
and the next call insn, to ensure that they won't cross a call after
scheduling is done. */
- rtx sched_before_next_call;
+ rtx_insn_list *sched_before_next_call;
/* Similarly, a list of insns which should not cross a branch. */
- rtx sched_before_next_jump;
+ rtx_insn_list *sched_before_next_jump;
/* Used to keep post-call pseudo/hard reg movements together with
the call. */
/* For a conditional insn, a list of insns that could set the condition
register. Used when generating control dependencies. */
- rtx cond_deps;
+ rtx_insn_list *cond_deps;
/* True if the condition in 'cond' should be reversed to get the actual
condition. */
extern void init_deps_global (void);
extern void finish_deps_global (void);
extern void deps_analyze_insn (struct deps_desc *, rtx_insn *);
-extern void remove_from_deps (struct deps_desc *, rtx);
+extern void remove_from_deps (struct deps_desc *, rtx_insn *);
extern void init_insn_reg_pressure_info (rtx);
extern dw_t get_dep_weak (ds_t, ds_t);
static void compute_block_dependences (int);
static void schedule_region (int);
-static void concat_insn_mem_list (rtx, rtx, rtx *, rtx *);
+static void concat_insn_mem_list (rtx_insn_list *, rtx,
+ rtx_insn_list **, rtx *);
static void propagate_deps (int, struct deps_desc *);
static void free_pending_lists (void);
static struct deps_desc *bb_deps;
static void
-concat_insn_mem_list (rtx copy_insns, rtx copy_mems, rtx *old_insns_p,
+concat_insn_mem_list (rtx_insn_list *copy_insns, rtx copy_mems,
+ rtx_insn_list **old_insns_p,
rtx *old_mems_p)
{
- rtx new_insns = *old_insns_p;
+ rtx_insn_list *new_insns = *old_insns_p;
rtx new_mems = *old_mems_p;
while (copy_insns)
{
- new_insns = alloc_INSN_LIST (XEXP (copy_insns, 0), new_insns);
+ new_insns = alloc_INSN_LIST (copy_insns->insn (), new_insns);
new_mems = alloc_EXPR_LIST (VOIDmode, XEXP (copy_mems, 0), new_mems);
- copy_insns = XEXP (copy_insns, 1);
+ copy_insns = copy_insns->next ();
copy_mems = XEXP (copy_mems, 1);
}
/* List of registers mentioned by the mem. */
rtx pattern_regs;
/* INSN list of stores that are locally anticipatable. */
- rtx antic_stores;
+ rtx_insn_list *antic_stores;
/* INSN list of stores that are locally available. */
- rtx avail_stores;
+ rtx_insn_list *avail_stores;
/* Next in the list. */
struct st_expr * next;
/* Store ID in the dataflow bitmaps. */
ptr->next = store_motion_mems;
ptr->pattern = x;
ptr->pattern_regs = NULL_RTX;
- ptr->antic_stores = NULL_RTX;
- ptr->avail_stores = NULL_RTX;
+ ptr->antic_stores = NULL;
+ ptr->avail_stores = NULL;
ptr->reaching_reg = NULL_RTX;
ptr->index = 0;
ptr->hash_index = hash;
find_moveable_store (rtx_insn *insn, int *regs_set_before, int *regs_set_after)
{
struct st_expr * ptr;
- rtx dest, set, tmp;
+ rtx dest, set;
int check_anticipatable, check_available;
basic_block bb = BLOCK_FOR_INSN (insn);
check_anticipatable = 1;
else
{
- tmp = XEXP (ptr->antic_stores, 0);
+ rtx_insn *tmp = ptr->antic_stores->insn ();
if (tmp != NULL_RTX
&& BLOCK_FOR_INSN (tmp) != bb)
check_anticipatable = 1;
}
if (check_anticipatable)
{
+ rtx_insn *tmp;
if (store_killed_before (dest, ptr->pattern_regs, insn, bb, regs_set_before))
- tmp = NULL_RTX;
+ tmp = NULL;
else
tmp = insn;
ptr->antic_stores = alloc_INSN_LIST (tmp, ptr->antic_stores);
check_available = 1;
else
{
- tmp = XEXP (ptr->avail_stores, 0);
+ rtx_insn *tmp = ptr->avail_stores->insn ();
if (BLOCK_FOR_INSN (tmp) != bb)
check_available = 1;
}
failed last time. */
if (LAST_AVAIL_CHECK_FAILURE (ptr))
{
+ rtx_insn *tmp;
for (tmp = BB_END (bb);
tmp != insn && tmp != LAST_AVAIL_CHECK_FAILURE (ptr);
tmp = PREV_INSN (tmp))
unsigned regno;
#endif
rtx_insn *insn;
- rtx tmp;
+ rtx_insn *tmp;
df_ref def;
int *last_set_in, *already_set;
struct st_expr * ptr, **prev_next_ptr_ptr;
{
LAST_AVAIL_CHECK_FAILURE (ptr) = NULL_RTX;
if (ptr->antic_stores
- && (tmp = XEXP (ptr->antic_stores, 0)) == NULL_RTX)
- ptr->antic_stores = XEXP (ptr->antic_stores, 1);
+ && (tmp = ptr->antic_stores->insn ()) == NULL_RTX)
+ ptr->antic_stores = ptr->antic_stores->next ();
}
}