+Tue Nov 2 15:38:17 1999 Richard Henderson <rth@cygnus.com>
+
+ * resource.c: Revert Oct 26 20:42 and Oct 27 00:56 changes.
+ * toplev.c: Revert Nov 1 13:22 change.
+
Tue Nov 2 14:21:37 1999 Jason Eckhardt <jle@cygnus.com>
* config/pa/pa.md (height reduction patterns): Add checks for
* combine.c (combine_instructions): Use xmalloc instead of alloca.
->>>>>>> 1.4890
Mon Nov 1 13:22:30 1999 Richard Henderson <rth@cygnus.com>
* toplev.c (rest_of_compilation): Don't optimize the CFG
* stupid.c (find_clobbered_regs): Take additional parameter.
(stupid_life_analysis): Adjust calls to note_stores.
->>>>>>> 1.4876
Wed Oct 27 19:26:12 1999 Nick Clifton <nickc@cygnus.com>
* config/arm/coff.h (STRUCTURE_SIZE_BOUNDARY): Delete
static HARD_REG_SET pending_dead_regs;
\f
static void update_live_status PROTO ((rtx, rtx, void *));
+static int find_basic_block PROTO ((rtx));
static rtx next_insn_no_annul PROTO ((rtx));
static rtx find_dead_or_set_registers PROTO ((rtx, struct resources*,
rtx*, int, struct resources,
CLEAR_HARD_REG_BIT (pending_dead_regs, i);
}
}
+/* Find the number of the basic block that starts closest to INSN. Return -1
+ if we couldn't find such a basic block. */
+
+static int
+find_basic_block (insn)
+ rtx insn;
+{
+ int i;
+
+ /* Scan backwards to the previous BARRIER. Then see if we can find a
+ label that starts a basic block. Return the basic block number. */
+
+ for (insn = prev_nonnote_insn (insn);
+ insn && GET_CODE (insn) != BARRIER;
+ insn = prev_nonnote_insn (insn))
+ ;
+
+ /* The start of the function is basic block zero. */
+ if (insn == 0)
+ return 0;
+
+ /* See if any of the upcoming CODE_LABELs start a basic block. If we reach
+ anything other than a CODE_LABEL or note, we can't find this code. */
+ for (insn = next_nonnote_insn (insn);
+ insn && GET_CODE (insn) == CODE_LABEL;
+ insn = next_nonnote_insn (insn))
+ {
+ for (i = 0; i < n_basic_blocks; i++)
+ if (insn == BLOCK_HEAD (i))
+ return i;
+ }
+
+ return -1;
+}
\f
/* Similar to next_insn, but ignores insns in the delay slots of
an annulled branch. */
b = tinfo->block;
}
- if (b == -1
- && INSN_UID (target) < (int) VARRAY_SIZE (basic_block_for_insn))
- b = BLOCK_NUM (target);
+ if (b == -1)
+ b = find_basic_block (target);
if (target_hash_table != NULL)
{
target_hash_table = (struct target_info **)
xcalloc (TARGET_HASH_PRIME, sizeof (struct target_info *));
bb_ticks = (int *) xcalloc (n_basic_blocks, sizeof (int));
-
- compute_bb_for_insn (get_max_uid ());
}
\f
/* Free up the resources allcated to mark_target_live_regs (). This
incr_ticks_for_insn (insn)
rtx insn;
{
- int b = BLOCK_NUM (insn);
+ int b = find_basic_block (insn);
if (b != -1)
bb_ticks[b]++;
/* And we don't clobber traceback for noreturn functions. */
if ((regno == FRAME_POINTER_REGNUM || regno == HARD_FRAME_POINTER_REGNUM)
&& (! reload_completed || frame_pointer_needed))
- continue;
+ continue;
success = 1;
for (j = HARD_REGNO_NREGS (regno, mode) - 1; j >= 0; j--)
TIMEVAR
(dbr_sched_time,
{
- /* ??? Keep the CFG up to date after cross-jumping. */
- find_basic_blocks (insns, max_reg_num (), rtl_dump_file, 0);
- count_or_remove_death_notes (NULL, 1);
- life_analysis (insns, max_reg_num (), rtl_dump_file, 0);
-
dbr_schedule (insns, rtl_dump_file);
});