int r = regno + i;
rtx u;
- for (u = deps->reg_last_uses[r]; u; u = XEXP (u, 1))
+ for (u = deps->reg_last[r].uses; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
- for (u = deps->reg_last_sets[r]; u; u = XEXP (u, 1))
+ for (u = deps->reg_last[r].sets; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
/* Clobbers need not be ordered with respect to one
if (code == SET)
{
if (GET_CODE (PATTERN (insn)) != COND_EXEC)
- free_INSN_LIST_list (&deps->reg_last_uses[r]);
- for (u = deps->reg_last_clobbers[r]; u; u = XEXP (u, 1))
+ free_INSN_LIST_list (&deps->reg_last[r].uses);
+ for (u = deps->reg_last[r].clobbers; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
SET_REGNO_REG_SET (reg_pending_sets, r);
}
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
}
}
+ /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
+ it does not reload. Ignore these as they have served their
+ purpose already. */
+ else if (regno >= deps->max_reg)
+ {
+ if (GET_CODE (PATTERN (insn)) != USE
+ && GET_CODE (PATTERN (insn)) != CLOBBER)
+ abort ();
+ }
else
{
rtx u;
- for (u = deps->reg_last_uses[regno]; u; u = XEXP (u, 1))
+ for (u = deps->reg_last[regno].uses; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
- for (u = deps->reg_last_sets[regno]; u; u = XEXP (u, 1))
+ for (u = deps->reg_last[regno].sets; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
if (code == SET)
{
if (GET_CODE (PATTERN (insn)) != COND_EXEC)
- free_INSN_LIST_list (&deps->reg_last_uses[regno]);
- for (u = deps->reg_last_clobbers[regno]; u; u = XEXP (u, 1))
+ free_INSN_LIST_list (&deps->reg_last[regno].uses);
+ for (u = deps->reg_last[regno].clobbers; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_OUTPUT);
SET_REGNO_REG_SET (reg_pending_sets, regno);
}
while (--i >= 0)
{
int r = regno + i;
- deps->reg_last_uses[r]
- = alloc_INSN_LIST (insn, deps->reg_last_uses[r]);
+ deps->reg_last[r].uses
+ = alloc_INSN_LIST (insn, deps->reg_last[r].uses);
+ SET_REGNO_REG_SET (&deps->reg_last_in_use, r);
- for (u = deps->reg_last_sets[r]; u; u = XEXP (u, 1))
+ for (u = deps->reg_last[r].sets; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), 0);
/* ??? This should never happen. */
- for (u = deps->reg_last_clobbers[r]; u; u = XEXP (u, 1))
+ for (u = deps->reg_last[r].clobbers; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), 0);
if (call_used_regs[r] || global_regs[r])
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
}
}
+ /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
+ it does not reload. Ignore these as they have served their
+ purpose already. */
+ else if (regno >= deps->max_reg)
+ {
+ if (GET_CODE (PATTERN (insn)) != USE
+ && GET_CODE (PATTERN (insn)) != CLOBBER)
+ abort ();
+ }
else
{
- deps->reg_last_uses[regno]
- = alloc_INSN_LIST (insn, deps->reg_last_uses[regno]);
+ deps->reg_last[regno].uses
+ = alloc_INSN_LIST (insn, deps->reg_last[regno].uses);
+ SET_REGNO_REG_SET (&deps->reg_last_in_use, regno);
- for (u = deps->reg_last_sets[regno]; u; u = XEXP (u, 1))
+ for (u = deps->reg_last[regno].sets; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), 0);
/* ??? This should never happen. */
- for (u = deps->reg_last_clobbers[regno]; u; u = XEXP (u, 1))
+ for (u = deps->reg_last[regno].clobbers; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), 0);
/* Pseudos that are REG_EQUIV to something may be replaced
pseudo-regs because it might give an incorrectly rounded result. */
if (code != ASM_OPERANDS || MEM_VOLATILE_P (x))
{
- int max_reg = max_reg_num ();
- for (i = 0; i < max_reg; i++)
+ for (i = 0; i < deps->max_reg; i++)
{
- for (u = deps->reg_last_uses[i]; u; u = XEXP (u, 1))
- add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
- if (GET_CODE (PATTERN (insn)) != COND_EXEC)
- free_INSN_LIST_list (&deps->reg_last_uses[i]);
+ struct deps_reg *reg_last = &deps->reg_last[i];
- for (u = deps->reg_last_sets[i]; u; u = XEXP (u, 1))
+ for (u = reg_last->uses; u; u = XEXP (u, 1))
+ add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
+ for (u = reg_last->sets; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), 0);
-
- for (u = deps->reg_last_clobbers[i]; u; u = XEXP (u, 1))
+ for (u = reg_last->clobbers; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), 0);
+
+ if (GET_CODE (PATTERN (insn)) != COND_EXEC)
+ free_INSN_LIST_list (®_last->uses);
}
reg_pending_sets_all = 1;
{
register RTX_CODE code = GET_CODE (x);
rtx link;
- int maxreg = max_reg_num ();
int i;
if (code == COND_EXEC)
next = next_nonnote_insn (insn);
if (next && GET_CODE (next) == BARRIER)
{
- for (i = 0; i < maxreg; i++)
+ for (i = 0; i < deps->max_reg; i++)
{
- for (u = deps->reg_last_sets[i]; u; u = XEXP (u, 1))
+ struct deps_reg *reg_last = &deps->reg_last[i];
+
+ for (u = reg_last->uses; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
- for (u = deps->reg_last_clobbers[i]; u; u = XEXP (u, 1))
+ for (u = reg_last->sets; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
- for (u = deps->reg_last_uses[i]; u; u = XEXP (u, 1))
+ for (u = reg_last->clobbers; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
}
}
INIT_REG_SET (&tmp);
(*current_sched_info->compute_jump_reg_dependencies) (insn, &tmp);
- EXECUTE_IF_SET_IN_REG_SET
- (&tmp, 0, i,
+ EXECUTE_IF_SET_IN_REG_SET (&tmp, 0, i,
{
- for (u = deps->reg_last_sets[i]; u; u = XEXP (u, 1))
+ struct deps_reg *reg_last = &deps->reg_last[i];
+ for (u = reg_last->sets; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
- deps->reg_last_uses[i]
- = alloc_INSN_LIST (insn, deps->reg_last_uses[i]);
+ reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
+ SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
});
CLEAR_REG_SET (&tmp);
if (loop_notes)
{
- int max_reg = max_reg_num ();
int schedule_barrier_found = 0;
rtx link;
/* Add dependencies if a scheduling barrier was found. */
if (schedule_barrier_found)
{
- for (i = 0; i < max_reg; i++)
+ for (i = 0; i < deps->max_reg; i++)
{
+ struct deps_reg *reg_last = &deps->reg_last[i];
rtx u;
- for (u = deps->reg_last_uses[i]; u; u = XEXP (u, 1))
- add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
- if (GET_CODE (PATTERN (insn)) != COND_EXEC)
- free_INSN_LIST_list (&deps->reg_last_uses[i]);
- for (u = deps->reg_last_sets[i]; u; u = XEXP (u, 1))
+ for (u = reg_last->uses; u; u = XEXP (u, 1))
+ add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
+ for (u = reg_last->sets; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), 0);
-
- for (u = deps->reg_last_clobbers[i]; u; u = XEXP (u, 1))
+ for (u = reg_last->clobbers; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), 0);
+
+ if (GET_CODE (PATTERN (insn)) != COND_EXEC)
+ free_INSN_LIST_list (®_last->uses);
}
reg_pending_sets_all = 1;
}
- /* Accumulate clobbers until the next set so that it will be output dependent
- on all of them. At the next set we can clear the clobber list, since
- subsequent sets will be output dependent on it. */
- EXECUTE_IF_SET_IN_REG_SET
- (reg_pending_sets, 0, i,
+ /* Accumulate clobbers until the next set so that it will be output
+ dependent on all of them. At the next set we can clear the clobber
+ list, since subsequent sets will be output dependent on it. */
+ if (reg_pending_sets_all)
{
- if (GET_CODE (PATTERN (insn)) != COND_EXEC)
+ reg_pending_sets_all = 0;
+ for (i = 0; i < deps->max_reg; i++)
{
- free_INSN_LIST_list (&deps->reg_last_sets[i]);
- free_INSN_LIST_list (&deps->reg_last_clobbers[i]);
- deps->reg_last_sets[i] = 0;
+ struct deps_reg *reg_last = &deps->reg_last[i];
+ if (GET_CODE (PATTERN (insn)) != COND_EXEC)
+ {
+ free_INSN_LIST_list (®_last->sets);
+ free_INSN_LIST_list (®_last->clobbers);
+ }
+ reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
+ SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
}
- deps->reg_last_sets[i]
- = alloc_INSN_LIST (insn, deps->reg_last_sets[i]);
- });
- EXECUTE_IF_SET_IN_REG_SET
- (reg_pending_clobbers, 0, i,
- {
- deps->reg_last_clobbers[i]
- = alloc_INSN_LIST (insn, deps->reg_last_clobbers[i]);
- });
- CLEAR_REG_SET (reg_pending_sets);
- CLEAR_REG_SET (reg_pending_clobbers);
-
- if (reg_pending_sets_all)
+ }
+ else
{
- for (i = 0; i < maxreg; i++)
+ EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i,
{
+ struct deps_reg *reg_last = &deps->reg_last[i];
if (GET_CODE (PATTERN (insn)) != COND_EXEC)
{
- free_INSN_LIST_list (&deps->reg_last_sets[i]);
- free_INSN_LIST_list (&deps->reg_last_clobbers[i]);
- deps->reg_last_sets[i] = 0;
+ free_INSN_LIST_list (®_last->sets);
+ free_INSN_LIST_list (®_last->clobbers);
}
- deps->reg_last_sets[i]
- = alloc_INSN_LIST (insn, deps->reg_last_sets[i]);
- }
-
- reg_pending_sets_all = 0;
+ reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
+ SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
+ });
+ EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i,
+ {
+ struct deps_reg *reg_last = &deps->reg_last[i];
+ reg_last->clobbers = alloc_INSN_LIST (insn, reg_last->clobbers);
+ SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
+ });
}
+ CLEAR_REG_SET (reg_pending_sets);
+ CLEAR_REG_SET (reg_pending_clobbers);
/* If a post-call group is still open, see if it should remain so.
This insn must be a simple move of a hard reg to a pseudo or
if (NEXT_INSN (insn) && GET_CODE (NEXT_INSN (insn)) == NOTE
&& NOTE_LINE_NUMBER (NEXT_INSN (insn)) == NOTE_INSN_SETJMP)
{
- int max_reg = max_reg_num ();
- for (i = 0; i < max_reg; i++)
+ for (i = 0; i < deps->max_reg; i++)
{
- for (u = deps->reg_last_uses[i]; u; u = XEXP (u, 1))
+ struct deps_reg *reg_last = &deps->reg_last[i];
+
+ for (u = reg_last->uses; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
- free_INSN_LIST_list (&deps->reg_last_uses[i]);
-
- for (u = deps->reg_last_sets[i]; u; u = XEXP (u, 1))
+ for (u = reg_last->sets; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), 0);
-
- for (u = deps->reg_last_clobbers[i]; u; u = XEXP (u, 1))
+ for (u = reg_last->clobbers; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), 0);
+
+ free_INSN_LIST_list (®_last->uses);
}
reg_pending_sets_all = 1;
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
if (call_used_regs[i] || global_regs[i])
{
- for (u = deps->reg_last_uses[i]; u; u = XEXP (u, 1))
+ for (u = deps->reg_last[i].uses; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
-
- for (u = deps->reg_last_sets[i]; u; u = XEXP (u, 1))
+ for (u = deps->reg_last[i].sets; u; u = XEXP (u, 1))
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
SET_REGNO_REG_SET (reg_pending_clobbers, i);
init_deps (deps)
struct deps *deps;
{
- int maxreg = max_reg_num ();
- deps->reg_last_uses = (rtx *) xcalloc (maxreg, sizeof (rtx));
- deps->reg_last_sets = (rtx *) xcalloc (maxreg, sizeof (rtx));
- deps->reg_last_clobbers = (rtx *) xcalloc (maxreg, sizeof (rtx));
+ int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
+
+ deps->max_reg = max_reg;
+ deps->reg_last = (struct deps_reg *)
+ xcalloc (max_reg, sizeof (struct deps_reg));
+ INIT_REG_SET (&deps->reg_last_in_use);
deps->pending_read_insns = 0;
deps->pending_read_mems = 0;
free_deps (deps)
struct deps *deps;
{
- int max_reg = max_reg_num ();
int i;
- /* Note this loop is executed max_reg * nr_regions times. It's first
- implementation accounted for over 90% of the calls to free_INSN_LIST_list.
- The list was empty for the vast majority of those calls. On the PA, not
- calling free_INSN_LIST_list in those cases improves -O2 compile times by
- 3-5% on average. */
- for (i = 0; i < max_reg; ++i)
+ /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
+ times. For a test case with 42000 regs and 8000 small basic blocks,
+ this loop accounted for nearly 60% (84 sec) of the total -O2 runtime. */
+ EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i,
{
- if (deps->reg_last_clobbers[i])
- free_INSN_LIST_list (&deps->reg_last_clobbers[i]);
- if (deps->reg_last_sets[i])
- free_INSN_LIST_list (&deps->reg_last_sets[i]);
- if (deps->reg_last_uses[i])
- free_INSN_LIST_list (&deps->reg_last_uses[i]);
- }
- free (deps->reg_last_clobbers);
- free (deps->reg_last_sets);
- free (deps->reg_last_uses);
+ struct deps_reg *reg_last = &deps->reg_last[i];
+ free_INSN_LIST_list (®_last->uses);
+ free_INSN_LIST_list (®_last->sets);
+ free_INSN_LIST_list (®_last->clobbers);
+ });
+ CLEAR_REG_SET (&deps->reg_last_in_use);
+
+ free (deps->reg_last);
+ deps->reg_last = NULL;
}
/* If it is profitable to use them, initialize caches for tracking
static void init_regions PARAMS ((void));
static void schedule_region PARAMS ((int));
-static void propagate_deps PARAMS ((int, struct deps *, int));
+static void propagate_deps PARAMS ((int, struct deps *));
static void free_pending_lists PARAMS ((void));
/* Functions for construction of the control flow graph. */
static struct deps *bb_deps;
/* After computing the dependencies for block BB, propagate the dependencies
- found in TMP_DEPS to the successors of the block. MAX_REG is the number
- of registers. */
+ found in TMP_DEPS to the successors of the block. */
static void
-propagate_deps (bb, tmp_deps, max_reg)
+propagate_deps (bb, tmp_deps)
int bb;
struct deps *tmp_deps;
- int max_reg;
{
int b = BB_TO_BLOCK (bb);
int e, first_edge;
continue;
}
- for (reg = 0; reg < max_reg; reg++)
+ /* The reg_last lists are inherited by bb_succ. */
+ EXECUTE_IF_SET_IN_REG_SET (&tmp_deps->reg_last_in_use, 0, reg,
{
- /* reg-last-uses lists are inherited by bb_succ. */
- for (u = tmp_deps->reg_last_uses[reg]; u; u = XEXP (u, 1))
- {
- if (find_insn_list (XEXP (u, 0),
- succ_deps->reg_last_uses[reg]))
- continue;
-
- succ_deps->reg_last_uses[reg]
- = alloc_INSN_LIST (XEXP (u, 0),
- succ_deps->reg_last_uses[reg]);
- }
-
- /* reg-last-defs lists are inherited by bb_succ. */
- for (u = tmp_deps->reg_last_sets[reg]; u; u = XEXP (u, 1))
- {
- if (find_insn_list (XEXP (u, 0),
- succ_deps->reg_last_sets[reg]))
- continue;
-
- succ_deps->reg_last_sets[reg]
- = alloc_INSN_LIST (XEXP (u, 0),
- succ_deps->reg_last_sets[reg]);
- }
-
- for (u = tmp_deps->reg_last_clobbers[reg]; u; u = XEXP (u, 1))
- {
- if (find_insn_list (XEXP (u, 0),
- succ_deps->reg_last_clobbers[reg]))
- continue;
-
- succ_deps->reg_last_clobbers[reg]
- = alloc_INSN_LIST (XEXP (u, 0),
- succ_deps->reg_last_clobbers[reg]);
- }
- }
+ struct deps_reg *tmp_deps_reg = &tmp_deps->reg_last[reg];
+ struct deps_reg *succ_deps_reg = &succ_deps->reg_last[reg];
+
+ for (u = tmp_deps_reg->uses; u; u = XEXP (u, 1))
+ if (! find_insn_list (XEXP (u, 0), succ_deps_reg->uses))
+ succ_deps_reg->uses
+ = alloc_INSN_LIST (XEXP (u, 0), succ_deps_reg->uses);
+
+ for (u = tmp_deps_reg->sets; u; u = XEXP (u, 1))
+ if (! find_insn_list (XEXP (u, 0), succ_deps_reg->sets))
+ succ_deps_reg->sets
+ = alloc_INSN_LIST (XEXP (u, 0), succ_deps_reg->sets);
+
+ for (u = tmp_deps_reg->clobbers; u; u = XEXP (u, 1))
+ if (! find_insn_list (XEXP (u, 0), succ_deps_reg->clobbers))
+ succ_deps_reg->clobbers
+ = alloc_INSN_LIST (XEXP (u, 0), succ_deps_reg->clobbers);
+ });
+ IOR_REG_SET (&succ_deps->reg_last_in_use, &tmp_deps->reg_last_in_use);
/* Mem read/write lists are inherited by bb_succ. */
link_insn = tmp_deps->pending_read_insns;
/* last_function_call is inherited by bb_succ. */
for (u = tmp_deps->last_function_call; u; u = XEXP (u, 1))
- {
- if (find_insn_list (XEXP (u, 0),
- succ_deps->last_function_call))
- continue;
-
+ if (! find_insn_list (XEXP (u, 0), succ_deps->last_function_call))
succ_deps->last_function_call
- = alloc_INSN_LIST (XEXP (u, 0),
- succ_deps->last_function_call);
- }
+ = alloc_INSN_LIST (XEXP (u, 0), succ_deps->last_function_call);
/* last_pending_memory_flush is inherited by bb_succ. */
for (u = tmp_deps->last_pending_memory_flush; u; u = XEXP (u, 1))
- {
- if (find_insn_list (XEXP (u, 0),
+ if (! find_insn_list (XEXP (u, 0),
succ_deps->last_pending_memory_flush))
- continue;
-
succ_deps->last_pending_memory_flush
= alloc_INSN_LIST (XEXP (u, 0),
succ_deps->last_pending_memory_flush);
- }
/* sched_before_next_call is inherited by bb_succ. */
x = LOG_LINKS (tmp_deps->sched_before_next_call);
Specifically for reg-reg data dependences, the block insns are
scanned by sched_analyze () top-to-bottom. Two lists are
- maintained by sched_analyze (): reg_last_sets[] for register DEFs,
- and reg_last_uses[] for register USEs.
+ maintained by sched_analyze (): reg_last[].sets for register DEFs,
+ and reg_last[].uses for register USEs.
When analysis is completed for bb, we update for its successors:
; - DEFS[succ] = Union (DEFS [succ], DEFS [bb])
int bb;
{
rtx head, tail;
- int max_reg = max_reg_num ();
struct deps tmp_deps;
tmp_deps = bb_deps[bb];
add_branch_dependences (head, tail);
if (current_nr_blocks > 1)
- propagate_deps (bb, &tmp_deps, max_reg);
+ propagate_deps (bb, &tmp_deps);
/* Free up the INSN_LISTs. */
free_deps (&tmp_deps);
-
- /* Assert that we won't need bb_reg_last_* for this block anymore.
- The vectors we're zeroing out have just been freed by the call to
- free_deps. */
- bb_deps[bb].reg_last_uses = 0;
- bb_deps[bb].reg_last_sets = 0;
- bb_deps[bb].reg_last_clobbers = 0;
}
+
/* Remove all INSN_LISTs and EXPR_LISTs from the pending lists and add
them to the unused_*_list variables, so that they can be reused. */