+2014-08-22 David Malcolm <dmalcolm@redhat.com>
+
+ * haifa-sched.c (bb_header): Strengthen from rtx * to rtx_insn **.
+ (add_delay_dependencies): Strengthen local "pro" from rtx to
+ rtx_insn *.
+ (recompute_todo_spec): Likewise.
+ (dep_cost_1): Likewise for locals "insn", "used".
+ (schedule_insn): Likewise for local "dbg".
+ (schedule_insn): Likewise for locals "pro", "next".
+ (unschedule_insns_until): Likewise for local "con".
+ (restore_pattern): Likewise for local "next".
+ (estimate_insn_tick): Likewise for local "pro".
+ (resolve_dependencies): Likewise for local "next".
+ (fix_inter_tick): Likewise.
+ (fix_tick_ready): Likewise for local "pro".
+ (add_to_speculative_block): Likewise for locals "check", "twin",
+ "pro".
+ (sched_extend_bb): Likewise for locals "end", "insn".
+ (init_before_recovery): Likewise for local "x".
+ (sched_create_recovery_block): Likewise for local "barrier".
+ (create_check_block_twin): Likewise for local "pro".
+ (fix_recovery_deps): Likewise for locals "note", "insn", "jump",
+ "consumer".
+ (unlink_bb_notes): Update for change to type of bb_header.
+ Strengthen locals "prev", "label", "note", "next" from rtx to
+ rtx_insn *.
+ (clear_priorities): Likewise for local "pro".
+
2014-08-22 David Malcolm <dmalcolm@redhat.com>
* gcse.c (struct occr): Strengthen field "insn" from rtx to
static int nr_begin_data, nr_be_in_data, nr_begin_control, nr_be_in_control;
/* Array used in {unlink, restore}_bb_notes. */
-static rtx *bb_header = 0;
+static rtx_insn **bb_header = 0;
/* Basic block after which recovery blocks will be created. */
static basic_block before_recovery;
FOR_EACH_DEP (pair->i2, SD_LIST_BACK, sd_it, dep)
{
- rtx pro = DEP_PRO (dep);
+ rtx_insn *pro = DEP_PRO (dep);
struct delay_pair *other_pair
= delay_htab_i2->find_with_hash (pro, htab_hash_pointer (pro));
if (!other_pair || other_pair->stages)
FOR_EACH_DEP (next, SD_LIST_BACK, sd_it, dep)
{
- rtx pro = DEP_PRO (dep);
+ rtx_insn *pro = DEP_PRO (dep);
ds_t ds = DEP_STATUS (dep) & SPECULATIVE;
if (DEBUG_INSN_P (pro) && !DEBUG_INSN_P (next))
int
dep_cost_1 (dep_t link, dw_t dw)
{
- rtx insn = DEP_PRO (link);
- rtx used = DEP_CON (link);
+ rtx_insn *insn = DEP_PRO (link);
+ rtx_insn *used = DEP_CON (link);
int cost;
if (DEP_COST (link) != UNKNOWN_DEP_COST)
for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
sd_iterator_cond (&sd_it, &dep);)
{
- rtx dbg = DEP_PRO (dep);
+ rtx_insn *dbg = DEP_PRO (dep);
struct reg_use_data *use, *next;
if (DEP_STATUS (dep) & DEP_CANCELLED)
sd_iterator_cond (&sd_it, &dep); sd_iterator_next (&sd_it))
{
struct dep_replacement *desc = DEP_REPLACE (dep);
- rtx pro = DEP_PRO (dep);
+ rtx_insn *pro = DEP_PRO (dep);
if (QUEUE_INDEX (pro) != QUEUE_SCHEDULED
&& desc != NULL && desc->insn == pro)
apply_replacement (dep, false);
for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
sd_iterator_cond (&sd_it, &dep);)
{
- rtx next = DEP_CON (dep);
+ rtx_insn *next = DEP_CON (dep);
bool cancelled = (DEP_STATUS (dep) & DEP_CANCELLED) != 0;
/* Resolve the dependence between INSN and NEXT.
for (sd_it = sd_iterator_start (last, SD_LIST_RES_FORW);
sd_iterator_cond (&sd_it, &dep);)
{
- rtx con = DEP_CON (dep);
+ rtx_insn *con = DEP_CON (dep);
sd_unresolve_dep (sd_it);
if (!MUST_RECOMPUTE_SPEC_P (con))
{
static void
restore_pattern (dep_t dep, bool immediately)
{
- rtx next = DEP_CON (dep);
+ rtx_insn *next = DEP_CON (dep);
int tick = INSN_TICK (next);
/* If we already scheduled the insn, the modified version is
FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
{
- rtx pro = DEP_PRO (dep);
+ rtx_insn *pro = DEP_PRO (dep);
int t;
if (DEP_STATUS (dep) & DEP_CANCELLED)
for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
sd_iterator_cond (&sd_it, &dep);)
{
- rtx next = DEP_CON (dep);
+ rtx_insn *next = DEP_CON (dep);
if (sched_verbose >= 4)
fprintf (sched_dump, ";;\t\tdep %d against %d\n", INSN_UID (insn),
FOR_EACH_DEP (head, SD_LIST_RES_FORW, sd_it, dep)
{
- rtx next;
+ rtx_insn *next;
next = DEP_CON (dep);
tick = INSN_TICK (next);
FOR_EACH_DEP (next, SD_LIST_RES_BACK, sd_it, dep)
{
- rtx pro = DEP_PRO (dep);
+ rtx_insn *pro = DEP_PRO (dep);
int tick1;
gcc_assert (INSN_TICK (pro) >= MIN_TICK);
for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
sd_iterator_cond (&sd_it, &dep);)
{
- rtx check = DEP_PRO (dep);
+ rtx_insn *check = DEP_PRO (dep);
if (IS_SPECULATION_SIMPLE_CHECK_P (check))
{
while (1)
{
- rtx check, twin;
+ rtx_insn *check, *twin;
basic_block rec;
/* Get the first backward dependency of INSN. */
instructions from REC. */
FOR_EACH_DEP (insn, SD_LIST_SPEC_BACK, sd_it, dep)
{
- rtx pro = DEP_PRO (dep);
+ rtx_insn *pro = DEP_PRO (dep);
gcc_assert (DEP_TYPE (dep) == REG_DEP_TRUE);
for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
sd_iterator_cond (&sd_it, &dep);)
{
- rtx pro = DEP_PRO (dep);
+ rtx_insn *pro = DEP_PRO (dep);
if (BLOCK_FOR_INSN (pro) == rec)
sd_delete_dep (sd_it);
sched_extend_bb (void)
{
/* The following is done to keep current_sched_info->next_tail non null. */
- rtx end = BB_END (EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb);
- rtx insn = DEBUG_INSN_P (end) ? prev_nondebug_insn (end) : end;
+ rtx_insn *end = BB_END (EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb);
+ rtx_insn *insn = DEBUG_INSN_P (end) ? prev_nondebug_insn (end) : end;
if (NEXT_INSN (end) == 0
|| (!NOTE_P (insn)
&& !LABEL_P (insn)
Between these two blocks recovery blocks will be emitted. */
basic_block single, empty;
- rtx x, label;
+ rtx_insn *x;
+ rtx label;
/* If the fallthrough edge to exit we've found is from the block we've
created before, don't do anything more. */
sched_create_recovery_block (basic_block *before_recovery_ptr)
{
rtx label;
- rtx barrier;
+ rtx_insn *barrier;
basic_block rec;
haifa_recovery_bb_recently_added_p = true;
/* First, create dependencies between INSN's producers and CHECK & TWIN. */
FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
{
- rtx pro = DEP_PRO (dep);
+ rtx_insn *pro = DEP_PRO (dep);
ds_t ds;
/* If BEGIN_DATA: [insn ~~TRUE~~> producer]:
static void
fix_recovery_deps (basic_block rec)
{
- rtx note, insn, jump, ready_list = 0;
+ rtx_insn *note, *insn, *jump;
+ rtx ready_list = 0;
bitmap_head in_ready;
rtx link;
for (sd_it = sd_iterator_start (insn, SD_LIST_FORW);
sd_iterator_cond (&sd_it, &dep);)
{
- rtx consumer = DEP_CON (dep);
+ rtx_insn *consumer = DEP_CON (dep);
if (BLOCK_FOR_INSN (consumer) != rec)
{
if (first == last)
return;
- bb_header = XNEWVEC (rtx, last_basic_block_for_fn (cfun));
+ bb_header = XNEWVEC (rtx_insn *, last_basic_block_for_fn (cfun));
/* Make a sentinel. */
if (last->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
first = first->next_bb;
do
{
- rtx prev, label, note, next;
+ rtx_insn *prev, *label, *note, *next;
label = BB_HEAD (last);
if (LABEL_P (label))
while (first != EXIT_BLOCK_PTR_FOR_FN (cfun)
&& bb_header[first->index])
{
- rtx prev, label, note, next;
+ rtx_insn *prev, *label, *note, *next;
label = bb_header[first->index];
prev = PREV_INSN (label);
FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
{
- rtx pro = DEP_PRO (dep);
+ rtx_insn *pro = DEP_PRO (dep);
if (INSN_PRIORITY_STATUS (pro) >= 0
&& QUEUE_INDEX (insn) != QUEUE_SCHEDULED)