it being unused. */
void verify_flow_info PROTO ((void));
-/* Flags for propagate_block. */
-
-#define PROP_DEATH_NOTES 1 /* Create DEAD and UNUSED notes. */
-#define PROP_LOG_LINKS 2 /* Create LOG_LINKS. */
-#define PROP_REG_INFO 4 /* Update regs_ever_live et al. */
-#define PROP_KILL_DEAD_CODE 8 /* Remove dead code. */
-#define PROP_SCAN_DEAD_CODE 16 /* Scan for dead code. */
-#define PROP_AUTOINC 32 /* Create autoinc mem references. */
-#define PROP_FINAL 63 /* All of the above. */
\f
/* Find basic blocks of the current function.
F is the first insn of the function and NREGS the number of register
{
int i;
+ if (basic_block_for_insn)
+ VARRAY_FREE (basic_block_for_insn);
VARRAY_BB_INIT (basic_block_for_insn, max, "basic_block_for_insn");
for (i = 0; i < n_basic_blocks; ++i)
If we find registers removed from live_at_start, that means we have
a broken peephole that is killing a register it shouldn't.
- BLOCK_FOR_INSN is assumed to be correct.
-
??? This is not true in one situation -- when a pre-reload splitter
generates subregs of a multi-word pseudo, current life analysis will
- lose the kill. So we _can_ have a pseudo go live. How irritating. */
+ lose the kill. So we _can_ have a pseudo go live. How irritating.
+
+ BLOCK_FOR_INSN is assumed to be correct.
+
+ ??? PROP_FLAGS should not contain PROP_LOG_LINKS. Need to set up
+ reg_next_use for that. Including PROP_REG_INFO does not refresh
+ regs_ever_live unless the caller resets it to zero. */
void
-update_life_info (blocks, extent)
+update_life_info (blocks, extent, prop_flags)
sbitmap blocks;
enum update_life_extent extent;
+ int prop_flags;
{
regset tmp;
int i;
/* For a global update, we go through the relaxation process again. */
if (extent != UPDATE_LIFE_LOCAL)
{
- calculate_global_regs_live (blocks, blocks, 0);
+ calculate_global_regs_live (blocks, blocks,
+ prop_flags & PROP_SCAN_DEAD_CODE);
/* If asked, remove notes from the blocks we'll update. */
if (extent == UPDATE_LIFE_GLOBAL_RM_NOTES)
COPY_REG_SET (tmp, bb->global_live_at_end);
propagate_block (tmp, bb->head, bb->end, (regset) NULL, i,
- PROP_DEATH_NOTES);
+ prop_flags);
if (extent == UPDATE_LIFE_LOCAL)
verify_local_live_at_start (tmp, bb);
vector oriented regsets would set regset_{size,bytes} here also. */
allocate_reg_info (max_regno, FALSE, FALSE);
- /* Because both reg_scan and flow_analysis want to set up the REG_N_SETS
- information, explicitly reset it here. The allocation should have
- already happened on the previous reg_scan pass. Make sure in case
- some more registers were allocated. */
+ /* Reset all the data we'll collect in propagate_block and its
+ subroutines. */
for (i = 0; i < max_regno; i++)
- REG_N_SETS (i) = 0;
+ {
+ REG_N_SETS (i) = 0;
+ REG_N_REFS (i) = 0;
+ REG_N_DEATHS (i) = 0;
+ REG_N_CALLS_CROSSED (i) = 0;
+ REG_LIVE_LENGTH (i) = 0;
+ REG_BASIC_BLOCK (i) = REG_BLOCK_UNKNOWN;
+ }
}
/* Compute the registers live at the beginning of a basic block
static rtx reemit_notes PROTO ((rtx, rtx));
static void get_block_head_tail PROTO ((int, rtx *, rtx *));
+static void get_bb_head_tail PROTO ((int, rtx *, rtx *));
static int queue_to_ready PROTO ((rtx [], int));
/* Return the head and tail pointers of BB. */
HAIFA_INLINE static void
-get_block_head_tail (bb, headp, tailp)
- int bb;
+get_block_head_tail (b, headp, tailp)
+ int b;
rtx *headp;
rtx *tailp;
{
rtx head;
rtx tail;
- int b;
-
- b = BB_TO_BLOCK (bb);
/* HEAD and TAIL delimit the basic block being scheduled. */
head = BLOCK_HEAD (b);
*tailp = tail;
}
+HAIFA_INLINE static void
+get_bb_head_tail (bb, headp, tailp)
+ int bb;
+ rtx *headp;
+ rtx *tailp;
+{
+ get_block_head_tail (BB_TO_BLOCK (bb), headp, tailp);
+}
+
/* Delete line notes from bb. Save them so they can be later restored
(in restore_line_notes ()). */
rtx head;
rtx insn;
- get_block_head_tail (bb, &head, &tail);
+ get_bb_head_tail (bb, &head, &tail);
if (head == tail
&& (GET_RTX_CLASS (GET_CODE (head)) != 'i'))
rtx line = line_note_head[BB_TO_BLOCK (bb)];
rtx insn;
- get_block_head_tail (bb, &head, &tail);
+ get_bb_head_tail (bb, &head, &tail);
next_tail = NEXT_INSN (tail);
for (insn = BLOCK_HEAD (BB_TO_BLOCK (bb));
/* Calculate INSN_REG_WEIGHT for all insns of a block. */
static void
-find_insn_reg_weight (bb)
- int bb;
+find_insn_reg_weight (b)
+ int b;
{
rtx insn, next_tail, head, tail;
- get_block_head_tail (bb, &head, &tail);
+ get_block_head_tail (b, &head, &tail);
next_tail = NEXT_INSN (tail);
for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
However, it was removed when it proved to be of marginal benefit
and caused problems because schedule_block and compute_forward_dependences
had different notions of what the "head" insn was. */
- get_block_head_tail (bb, &head, &tail);
+ get_bb_head_tail (bb, &head, &tail);
/* Interblock scheduling could have moved the original head insn from this
block into a proceeding block. This may also cause schedule_block and
rtx src_next_tail;
rtx tail, head;
- get_block_head_tail (bb_src, &head, &tail);
+ get_bb_head_tail (bb_src, &head, &tail);
src_next_tail = NEXT_INSN (tail);
src_head = head;
}
nr_inter++;
- /* Find the beginning of the scheduling group; update the
- containing block number for the insns. */
+ /* Find the beginning of the scheduling group. */
+ /* ??? Ought to update basic block here, but later bits of
+ schedule_block assumes the original insn block is
+ still intact. */
+
temp = insn;
- set_block_num (temp, target_bb);
while (SCHED_GROUP_P (insn))
- {
- temp = PREV_INSN (temp);
- set_block_num (temp, target_bb);
- }
+ temp = PREV_INSN (temp);
/* Update source block boundaries. */
b1 = BLOCK_FOR_INSN (temp);
rtx next_tail;
enum reg_note dep_type;
- get_block_head_tail (bb, &head, &tail);
+ get_bb_head_tail (bb, &head, &tail);
next_tail = NEXT_INSN (tail);
for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
{
}
/* Do the analysis for this block. */
- get_block_head_tail (bb, &head, &tail);
+ get_bb_head_tail (bb, &head, &tail);
sched_analyze (head, tail);
add_branch_dependences (head, tail);
rtx next_tail;
rtx insn;
- get_block_head_tail (bb, &head, &tail);
+ get_bb_head_tail (bb, &head, &tail);
next_tail = NEXT_INSN (tail);
fprintf (dump, "\n;; --- Region Dependences --- b %d bb %d \n",
BB_TO_BLOCK (bb), bb);
rtx prev_head;
rtx head;
- get_block_head_tail (bb, &head, &tail);
+ get_bb_head_tail (bb, &head, &tail);
prev_head = PREV_INSN (head);
if (head == tail
int bb;
int rgn_n_insns = 0;
int sched_rgn_n_insns = 0;
- int initial_deaths;
- sbitmap blocks;
/* Set variables for the current region. */
current_nr_blocks = RGN_NR_BLOCKS (rgn);
reg_pending_clobbers = ALLOCA_REG_SET ();
reg_pending_sets_all = 0;
- /* Create a bitmap of the blocks in this region. */
- blocks = sbitmap_alloc (n_basic_blocks);
- sbitmap_zero (blocks);
-
- for (bb = current_nr_blocks - 1; bb >= 0; --bb)
- SET_BIT (blocks, BB_TO_BLOCK (bb));
-
/* Initializations for region data dependence analyisis. */
if (current_nr_blocks > 1)
{
for (bb = current_nr_blocks - 1; bb >= 0; bb--)
compute_block_forward_dependences (bb);
- /* Compute INSN_REG_WEIGHT. */
- for (bb = current_nr_blocks - 1; bb >= 0; bb--)
- find_insn_reg_weight (bb);
-
- /* Remove death notes. */
- initial_deaths = count_or_remove_death_notes (blocks, 1);
-
/* Delete line notes and set priorities. */
for (bb = 0; bb < current_nr_blocks; bb++)
{
if (sched_rgn_n_insns != rgn_n_insns)
abort ();
- /* Update register life and usage information. Scheduling a multi-block
- region requires a global update. */
- if (current_nr_blocks > 1)
- update_life_info (blocks, UPDATE_LIFE_GLOBAL);
- else
- {
- update_life_info (blocks, UPDATE_LIFE_LOCAL);
-
- /* In the single block case, the count of registers that died should
- not have changed during the schedule. */
- if (count_or_remove_death_notes (blocks, 0) != initial_deaths)
- abort ();
- }
-
/* Restore line notes. */
if (write_symbols != NO_DEBUG)
{
FREE_REG_SET (reg_pending_sets);
FREE_REG_SET (reg_pending_clobbers);
- sbitmap_free (blocks);
}
/* The one entry point in this file. DUMP_FILE is the dump file for
schedule_insns (dump_file)
FILE *dump_file;
{
-
+ int *deaths_in_region;
+ sbitmap blocks, large_region_blocks;
int max_uid;
int b;
rtx insn;
int rgn;
-
int luid;
+ int any_large_regions;
/* Disable speculative loads in their presence if cc0 defined. */
#ifdef HAVE_cc0
block_to_bb = (int *) alloca ((n_basic_blocks) * sizeof (int));
containing_rgn = (int *) alloca ((n_basic_blocks) * sizeof (int));
+ blocks = sbitmap_alloc (n_basic_blocks);
+ large_region_blocks = sbitmap_alloc (n_basic_blocks);
+
compute_bb_for_insn (max_uid);
/* Compute regions for scheduling. */
insn_dep_count = (int *) xcalloc (max_uid, sizeof (int));
insn_depend = (rtx *) xcalloc (max_uid, sizeof (rtx));
+ deaths_in_region = (int *) alloca (sizeof(int) * nr_regions);
+
init_alias_analysis ();
if (write_symbols != NO_DEBUG)
&& GET_CODE (NEXT_INSN (insn)) == BARRIER)))
emit_note_after (NOTE_INSN_DELETED, BLOCK_END (n_basic_blocks - 1));
+ /* Compute INSN_REG_WEIGHT for all blocks. We must do this before
+ removing death notes. */
+ for (b = n_basic_blocks - 1; b >= 0; b--)
+ find_insn_reg_weight (b);
+
+ /* Remove all death notes from the subroutine. */
+ for (rgn = 0; rgn < nr_regions; rgn++)
+ {
+ sbitmap_zero (blocks);
+ for (b = RGN_NR_BLOCKS (rgn) - 1; b >= 0; --b)
+ SET_BIT (blocks, rgn_bb_table [RGN_BLOCKS (rgn) + b]);
+
+ deaths_in_region[rgn] = count_or_remove_death_notes (blocks, 1);
+ }
+
/* Schedule every region in the subroutine. */
for (rgn = 0; rgn < nr_regions; rgn++)
{
#endif
}
+ /* Update life analysis for the subroutine. Do single block regions
+ first so that we can verify that live_at_start didn't change. Then
+ do all other blocks. */
+ /* ??? There is an outside possibility that update_life_info, or more
+ to the point propagate_block, could get called with non-zero flags
+ more than once for one basic block. This would be kinda bad if it
+ were to happen, since REG_INFO would be accumulated twice for the
+ block, and we'd have twice the REG_DEAD notes.
+
+ I'm fairly certain that this _shouldn't_ happen, since I don't think
+ that live_at_start should change at region heads. Not sure what the
+ best way to test for this kind of thing... */
+
+ allocate_reg_life_data ();
+ compute_bb_for_insn (max_uid);
+
+ any_large_regions = 0;
+ sbitmap_ones (large_region_blocks);
+
+ for (rgn = 0; rgn < nr_regions; rgn++)
+ if (RGN_NR_BLOCKS (rgn) > 1)
+ any_large_regions = 1;
+ else
+ {
+ sbitmap_zero (blocks);
+ SET_BIT (blocks, rgn_bb_table[RGN_BLOCKS (rgn)]);
+ RESET_BIT (large_region_blocks, rgn_bb_table[RGN_BLOCKS (rgn)]);
+
+ update_life_info (blocks, UPDATE_LIFE_LOCAL,
+ PROP_DEATH_NOTES | PROP_REG_INFO);
+
+ /* In the single block case, the count of registers that died should
+ not have changed during the schedule. */
+ if (count_or_remove_death_notes (blocks, 0) != deaths_in_region[rgn])
+ abort ();
+ }
+
+ if (any_large_regions)
+ {
+ update_life_info (large_region_blocks, UPDATE_LIFE_GLOBAL,
+ PROP_DEATH_NOTES | PROP_REG_INFO);
+ }
+
/* Reposition the prologue and epilogue notes in case we moved the
prologue/epilogue insns. */
if (reload_completed)
free (out_edges);
out_edges = NULL;
}
+
+ sbitmap_free (blocks);
+ sbitmap_free (large_region_blocks);
}
#endif /* INSN_SCHEDULING */