+2004-08-24 Zack Weinberg <zack@codesourcery.com>
+
+ * basic-block.h (struct basic_block_def): Reorder fields to
+ eliminate interior padding. Remove 'partition' field.
+ (BB_DISABLE_SCHEDULE, BB_HOT_PARTITION, BB_COLD_PARTITION)
+ (BB_UNPARTITIONED, BB_PARTITION, BB_SET_PARTITION)
+ (BB_COPY_PARTITION): New macros.
+ * bb-reorder.c, cfgcleanup.c, cfglayout.c, cfgrtl.c, ifcvt.c
+ Replace all references to the 'partition' field of a basic
+ block with new macros.
+
+ * insn-notes.def: Delete NOTE_INSN_DISABLE_SCHED_OF_BLOCK.
+ * final.c (final_scan_insn): Don't handle it.
+ * modulo-sched.c: Set BB_DISABLE_SCHEDULE flag on g->bb
+ instead of emitting a NOTE_INSN_DISABLE_SCHED_OF_BLOCK note.
+ * sched-rgn.c (sched_is_disabled_for_current_region_p):
+ Look for a BB_DISABLE_SCHEDULE flag on the block instead of a note.
+
2004-08-24 Nathan Sidwell <nathan@codesourcery.com>
* c-decl.c (c_init_decl_processing): Adjust
* objc/objc-act.h (TREE_STATIC_TEMPLATE): Use TREE_PRIVATE.
2004-08-24 Richard Henderson <rth@redhat.com>
- Andrew Pinski <apinski@apple.com>
+ Andrew Pinski <apinski@apple.com>
* gimplify.c (gimplify_array_ref_to_plus): Delete.
(gimplify_addr_expr): Do not call gimplify_array_ref_to_plus
/* Auxiliary info specific to a pass. */
PTR GTY ((skip (""))) aux;
- /* The index of this block. */
- int index;
+ /* Innermost loop containing the block. */
+ struct loop * GTY ((skip (""))) loop_father;
+
+ /* The dominance and postdominance information node. */
+ struct et_node * GTY ((skip (""))) dom[2];
/* Previous and next blocks in the chain. */
struct basic_block_def *prev_bb;
struct basic_block_def *next_bb;
- /* The loop depth of this block. */
- int loop_depth;
-
- /* Innermost loop containing the block. */
- struct loop * GTY ((skip (""))) loop_father;
+ /* The data used by basic block copying and reordering functions. */
+ struct reorder_block_def * GTY ((skip (""))) rbi;
- /* The dominance and postdominance information node. */
- struct et_node * GTY ((skip (""))) dom[2];
+ /* Annotations used at the tree level. */
+ struct bb_ann_d *tree_annotations;
/* Expected number of executions: calculated in profile.c. */
gcov_type count;
+ /* The index of this block. */
+ int index;
+
+ /* The loop depth of this block. */
+ int loop_depth;
+
/* Expected frequency. Normalized to be in range 0 to BB_FREQ_MAX. */
int frequency;
/* Various flags. See BB_* below. */
int flags;
-
- /* Which section block belongs in, when partitioning basic blocks. */
- int partition;
-
- /* The data used by basic block copying and reordering functions. */
- struct reorder_block_def * GTY ((skip (""))) rbi;
-
- /* Annotations used at the tree level. */
- struct bb_ann_d *tree_annotations;
};
typedef struct basic_block_def *basic_block;
#define BB_VISITED 8
#define BB_IRREDUCIBLE_LOOP 16
#define BB_SUPERBLOCK 32
+#define BB_DISABLE_SCHEDULE 64
+
+#define BB_HOT_PARTITION 128
+#define BB_COLD_PARTITION 256
+#define BB_UNPARTITIONED 0
/* Partitions, to be used when partitioning hot and cold basic blocks into
separate sections. */
-#define UNPARTITIONED 0
-#define HOT_PARTITION 1
-#define COLD_PARTITION 2
+#define BB_PARTITION(bb) ((bb)->flags & (BB_HOT_PARTITION|BB_COLD_PARTITION))
+#define BB_SET_PARTITION(bb, part) ((bb)->flags |= (part))
+#define BB_COPY_PARTITION(dstbb, srcbb) \
+ BB_SET_PARTITION (dstbb, BB_PARTITION (srcbb))
/* Number of basic blocks in the current function. */
next_round_is_last = round + 1 == number_of_rounds - 1;
cold_block = (flag_reorder_blocks_and_partition
- && bb->partition == COLD_PARTITION);
+ && BB_PARTITION (bb) == BB_COLD_PARTITION);
block_not_hot_enough = (bb->frequency < exec_th
|| bb->count < count_th
if (flag_reorder_blocks_and_partition
&& next_round_is_last
- && bb->partition != COLD_PARTITION)
+ && BB_PARTITION (bb) != BB_COLD_PARTITION)
return false;
else if (there_exists_another_round
&& (cold_block || block_not_hot_enough))
&& e->dest->rbi->visited != *n_traces)
continue;
- if (e->dest->partition == COLD_PARTITION
+ if (BB_PARTITION (e->dest) == BB_COLD_PARTITION
&& round < last_round)
continue;
basic_block new_bb;
new_bb = duplicate_block (old_bb, e);
- new_bb->partition = old_bb->partition;
+ BB_COPY_PARTITION (new_bb, old_bb);
if (e->dest != new_bb)
abort ();
/* Do not start in probably never executed blocks. */
- if (bb->partition == COLD_PARTITION || probably_never_executed_bb_p (bb))
+ if (BB_PARTITION (bb) == BB_COLD_PARTITION
+ || probably_never_executed_bb_p (bb))
return BB_FREQ_MAX;
/* Prefer blocks whose predecessor is an end of some trace
if (flag_reorder_blocks_and_partition)
for (i = 0; i < n_traces; i++)
{
- if (traces[i].first->partition == COLD_PARTITION)
+ if (BB_PARTITION (traces[i].first) == BB_COLD_PARTITION)
{
connected[i] = true;
cold_traces[i] = true;
/* Add the UNLIKELY_EXECUTED_NOTES to each cold basic block. */
FOR_EACH_BB (bb)
- if (bb->partition == COLD_PARTITION)
+ if (BB_PARTITION (bb) == BB_COLD_PARTITION)
mark_bb_for_unlikely_executed_section (bb);
}
FOR_EACH_BB (bb)
{
if (probably_never_executed_bb_p (bb))
- bb->partition = COLD_PARTITION;
+ BB_SET_PARTITION (bb, BB_COLD_PARTITION);
else
{
- bb->partition = HOT_PARTITION;
+ BB_SET_PARTITION (bb, BB_HOT_PARTITION);
has_hot_blocks = true;
}
}
for (e = ENTRY_BLOCK_PTR->succ; e; e = e->succ_next)
if (e->dest->index >= 0)
{
- e->dest->partition = HOT_PARTITION;
+ BB_SET_PARTITION (e->dest, BB_HOT_PARTITION);
break;
}
{
if (e->src != ENTRY_BLOCK_PTR
&& e->dest != EXIT_BLOCK_PTR
- && e->src->partition != e->dest->partition)
+ && BB_PARTITION (e->src) != BB_PARTITION (e->dest))
{
e->flags |= EDGE_CROSSING;
if (i == *max_idx)
/* Make sure new fall-through bb is in same
partition as bb it's falling through from. */
-
- new_bb->partition = cur_bb->partition;
+
+ BB_COPY_PARTITION (new_bb, cur_bb);
new_bb->succ->flags |= EDGE_CROSSING;
}
/* Make sure new bb is in same partition as source
of conditional branch. */
-
- new_bb->partition = cur_bb->partition;
+ BB_COPY_PARTITION (new_bb, cur_bb);
}
/* Make old jump branch to new bb. */
and cold sections. */
if (flag_reorder_blocks_and_partition
- && (jump_block->partition != jump_dest_block->partition
+ && (BB_PARTITION (jump_block) != BB_PARTITION (jump_dest_block)
|| (cbranch_jump_edge->flags & EDGE_CROSSING)))
return false;
and cold sections. */
if (flag_reorder_blocks_and_partition
- && (a->partition != b->partition
+ && (BB_PARTITION (a) != BB_PARTITION (b)
|| find_reg_note (BB_END (a), REG_CROSSING_JUMP, NULL_RTX)))
return;
if (flag_reorder_blocks_and_partition
&& (find_reg_note (BB_END (a), REG_CROSSING_JUMP, NULL_RTX)
- || a->partition != b->partition))
+ || BB_PARTITION (a) != BB_PARTITION (b)))
return;
real_b_end = BB_END (b);
if (flag_reorder_blocks_and_partition
&& (find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX)
|| find_reg_note (BB_END (c), REG_CROSSING_JUMP, NULL_RTX)
- || b->partition != c->partition))
+ || BB_PARTITION (b) != BB_PARTITION (c)))
return NULL;
and cold sections. */
if (flag_reorder_blocks_and_partition
- && (bb->pred->src->partition != bb->pred->pred_next->src->partition
+ && (BB_PARTITION (bb->pred->src) != BB_PARTITION (bb->pred->pred_next->src)
|| (bb->pred->flags & EDGE_CROSSING)))
return false;
/* Make sure new bb is tagged for correct section (same as
fall-thru source). */
- e_fall->src->partition = bb->pred->src->partition;
+ BB_COPY_PARTITION (e_fall->src, bb->pred->src);
if (flag_reorder_blocks_and_partition
&& targetm.have_named_sections)
{
- if (bb->pred->src->partition == COLD_PARTITION)
+ if (BB_PARTITION (bb->pred->src) == BB_COLD_PARTITION)
{
rtx new_note;
rtx note = BB_HEAD (e_fall->src);
insn ? get_last_insn () : NULL,
EXIT_BLOCK_PTR->prev_bb);
- new_bb->partition = bb->partition;
+ BB_COPY_PARTITION (new_bb, bb);
if (bb->rbi->header)
{
insn = bb->rbi->header;
link_block (bb, after);
BASIC_BLOCK (bb->index) = bb;
update_bb_for_insn (bb);
- bb->partition = UNPARTITIONED;
+ BB_SET_PARTITION (bb, BB_UNPARTITIONED);
/* Tag the block so that we know it has been used when considering
other basic block notes. */
/* Create the new basic block. */
new_bb = create_basic_block (NEXT_INSN (insn), BB_END (bb), bb);
- new_bb->partition = bb->partition;
+ BB_COPY_PARTITION (new_bb, bb);
BB_END (bb) = insn;
/* Redirect the outgoing edges. */
static bool
rtl_can_merge_blocks (basic_block a,basic_block b)
{
- bool partitions_ok = true;
-
/* If we are partitioning hot/cold basic blocks, we don't want to
mess up unconditional or indirect jumps that cross between hot
- and cold sections. */
-
+ and cold sections.
+
+ ??? If two basic blocks could otherwise be merged (which implies
+ that the jump between the two is unconditional), and one is in a
+ hot section and the other is in a cold section, surely that means
+ that one of the section choices is wrong. */
+
if (flag_reorder_blocks_and_partition
&& (find_reg_note (BB_END (a), REG_CROSSING_JUMP, NULL_RTX)
|| find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX)
- || a->partition != b->partition))
- partitions_ok = false;
+ || BB_PARTITION (a) != BB_PARTITION (b)))
+ return false;
/* There must be exactly one edge in between the blocks. */
return (a->succ && !a->succ->succ_next && a->succ->dest == b
&& !b->pred->pred_next && a != b
/* Must be simple edge. */
&& !(a->succ->flags & EDGE_COMPLEX)
- && partitions_ok
&& a->next_bb == b
&& a != ENTRY_BLOCK_PTR && b != EXIT_BLOCK_PTR
/* If the jump insn has side effects,
if (flag_reorder_blocks_and_partition
&& (find_reg_note (insn, REG_CROSSING_JUMP, NULL_RTX)
- || (src->partition != target->partition)))
+ || BB_PARTITION (src) != BB_PARTITION (target)))
return NULL;
/* Verify that all targets will be TARGET. */
/* Make sure new block ends up in correct hot/cold section. */
- jump_block->partition = e->src->partition;
+ BB_COPY_PARTITION (jump_block, e->src);
if (flag_reorder_blocks_and_partition
&& targetm.have_named_sections)
{
- if (e->src->partition == COLD_PARTITION)
+ if (BB_PARTITION (jump_block) == BB_COLD_PARTITION)
{
rtx bb_note, new_note;
for (bb_note = BB_HEAD (jump_block);
new_note = emit_note_after (NOTE_INSN_UNLIKELY_EXECUTED_CODE,
bb_note);
NOTE_BASIC_BLOCK (new_note) = jump_block;
- jump_block->partition = COLD_PARTITION;
}
if (JUMP_P (BB_END (jump_block))
&& !any_condjump_p (BB_END (jump_block))
&& NOTE_LINE_NUMBER (before) == NOTE_INSN_LOOP_END)
before = NEXT_INSN (before);
bb = create_basic_block (before, NULL, edge_in->src);
- bb->partition = edge_in->src->partition;
+ BB_COPY_PARTITION (bb, edge_in->src);
}
else
{
bb = create_basic_block (before, NULL, edge_in->dest->prev_bb);
- bb->partition = edge_in->dest->partition;
+ /* ??? Why not edge_in->dest->prev_bb here? */
+ BB_COPY_PARTITION (bb, edge_in->dest);
}
/* ??? This info is likely going to be out of date very soon. */
if (flag_reorder_blocks_and_partition
&& targetm.have_named_sections
&& e->src != ENTRY_BLOCK_PTR
- && e->src->partition == COLD_PARTITION
+ && BB_PARTITION (e->src) == BB_COLD_PARTITION
&& !(e->flags & EDGE_CROSSING))
{
rtx bb_note, new_note, cur_insn;
{
n_fallthru++, fallthru = e;
if ((e->flags & EDGE_CROSSING)
- || (e->src->partition != e->dest->partition
+ || (BB_PARTITION (e->src) != BB_PARTITION (e->dest)
&& e->src != ENTRY_BLOCK_PTR
&& e->dest != EXIT_BLOCK_PTR))
{
static bool
cfg_layout_can_merge_blocks_p (basic_block a, basic_block b)
{
- bool partitions_ok = true;
-
/* If we are partitioning hot/cold basic blocks, we don't want to
mess up unconditional or indirect jumps that cross between hot
- and cold sections. */
+ and cold sections.
+
+ ??? If two basic blocks could otherwise be merged (which implies
+ that the jump between the two is unconditional), and one is in a
+ hot section and the other is in a cold section, surely that means
+ that one of the section choices is wrong. */
if (flag_reorder_blocks_and_partition
&& (find_reg_note (BB_END (a), REG_CROSSING_JUMP, NULL_RTX)
|| find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX)
- || a->partition != b->partition))
- partitions_ok = false;
+ || BB_PARTITION (a) != BB_PARTITION (b)))
+ return false;
/* There must be exactly one edge in between the blocks. */
return (a->succ && !a->succ->succ_next && a->succ->dest == b
&& !b->pred->pred_next && a != b
/* Must be simple edge. */
&& !(a->succ->flags & EDGE_COMPLEX)
- && partitions_ok
&& a != ENTRY_BLOCK_PTR && b != EXIT_BLOCK_PTR
/* If the jump insn has side effects,
we can't kill the edge. */
case NOTE_INSN_FUNCTION_END:
case NOTE_INSN_REPEATED_LINE_NUMBER:
case NOTE_INSN_EXPECTED_VALUE:
- case NOTE_INSN_DISABLE_SCHED_OF_BLOCK:
break;
case NOTE_INSN_UNLIKELY_EXECUTED_CODE:
{
new_bb->index = then_bb_index;
BASIC_BLOCK (then_bb_index) = new_bb;
- new_bb->partition = test_bb->partition;
+ /* ??? Should be then_bb? */
+ BB_COPY_PARTITION (new_bb, test_bb);
}
/* We've possibly created jump to next insn, cleanup_cfg will solve that
later. */
this a bit on the basic block structure. */
INSN_NOTE (UNLIKELY_EXECUTED_CODE)
-/* Mark that a block shouldn't be scheduled. This is currently used
- in modulo scheduling. Modulo scheduling adds this note to the
- blocks of the modulo-scheduled loops to disable scheduling them in
- the later traditional scheduling passes. FIXME: Make this a bit on
- the basic block structure. */
-INSN_NOTE (DISABLE_SCHED_OF_BLOCK)
-
#undef INSN_NOTE
/* Mark this loop as software pipelined so the later
scheduling passes doesn't touch it. */
if (! flag_resched_modulo_sched)
- emit_note_before (NOTE_INSN_DISABLE_SCHED_OF_BLOCK,
- g->closing_branch->insn);
+ g->bb->flags |= BB_DISABLE_SCHEDULE;
generate_reg_moves (ps);
if (dump_file)
static bool
sched_is_disabled_for_current_region_p (void)
{
- rtx first_bb_insn, last_bb_insn, insn;
int bb;
for (bb = 0; bb < current_nr_blocks; bb++)
- {
- bool disable_sched = false;
- /* Searching for NOTE_DISABLE_SCHED_OF_BLOCK note between the
- start and end of the basic block. */
- get_block_head_tail (BB_TO_BLOCK (bb), &first_bb_insn,
- &last_bb_insn);
- for (insn = last_bb_insn; insn != NULL && insn != first_bb_insn;
- insn = PREV_INSN (insn))
- if (GET_CODE (insn) == NOTE
- && (NOTE_LINE_NUMBER (insn)
- == NOTE_INSN_DISABLE_SCHED_OF_BLOCK))
- {
- disable_sched = true;
- break;
- }
- if (! disable_sched)
- return false;
- }
+ if (!(BASIC_BLOCK (BB_TO_BLOCK (bb))->flags & BB_DISABLE_SCHEDULE))
+ return false;
return true;
}