+2002-05-27 Zdenek Dvorak <rakdver@atrey.karlin.mff.cuni.cz>
+
+ * basic-block.h (last_basic_block): Defined as synonym for
+ n_basic_blocks.
+ * cfganal.c (mark_dfs_back_edges, flow_reverse_top_sort_order_compute,
+ flow_depth_first_order_compute, flow_preorder_transversal_compute,
+ flow_dfs_compute_reverse_init): Replaced relevant occurences of
+ n_basic_blocks with last_basic_block.
+ * cfgbuild.c (make_edges): Likewise.
+ * cfgloop.c (flow_loop_scan, flow_loops_find): Likewise.
+ * cfgrtl.c (verify_flow_info, purge_all_dead_edges): Likewise.
+ * combine.c (combine_instructions): Likewise.
+ * df.c (df_alloc, df_analyse_1, df_analyse, iterative_dataflow_sbitmap,
+ iterative_dataflow_bitmap): Likewise.
+ * dominance.c (init_dom_info, calc_dfs_tree_nonrec, calc_dfs_tree,
+ calc_idoms, idoms_to_doms): Likewise.
+ * flow.c (update_life_info_in_dirty_blocks, free_basic_block_vars):
+ Likewise.
+ * gcse.c (gcse_main, alloc_gcse_mem, compute_local_properties,
+ compute_hash_table, expr_reaches_here_p, one_classic_gcse_pass,
+ one_cprop_pass, compute_pre_data, pre_expr_reaches_here_p,
+ one_pre_gcse_pass, compute_transpout, delete_null_pointer_checks_1,
+ delete_null_pointer_checks, compute_code_hoist_vbeinout,
+ hoist_expr_reaches_here_p, hoist_code, one_code_hoisting_pass,
+ compute_store_table, build_store_vectors): Likewise.
+ * haifa-sched.c (sched_init): Likewise.
+ * ifcvt.c (if_convert): Likewise.
+ * lcm.c (compute_antinout_edge, compute_laterin, compute_insert_delete,
+ pre_edge_lcm, compute_available, compute_nearerout,
+ compute_rev_insert_delete, pre_edge_rev_lcm, optimize_mode_switching):
+ Likewise.
+ * predict.c (estimate_probability, process_note_prediction,
+ note_prediction_to_br_prob): Likewise.
+ * profile.c (GCOV_INDEX_TO_BB, BB_TO_GCOV_INDEX): Likewise.
+ * recog.c (split_all_insns, peephole2_optimize): Likewise.
+ * regrename.c (copyprop_hardreg_forward): Likewise.
+ * resource.c (init_resource_info): Likewise.
+ * sched-rgn.c (build_control_flow, find_rgns, compute_trg_info,
+ init_regions, schedule_insns): Likewise.
+ * ssa-ccp.c (ssa_const_prop): Likewise.
+ * ssa-dce.c (ssa_eliminate_dead_code): Likewise.
+ * ssa.c (compute_dominance_frontiers,
+ compute_iterated_dominance_frontiers, convert_to_ssa): Likewise.
+
+ * df.c (df_refs_unlink): Fix FOR_EACH_BB usage (in #if 0'ed code)
+ * gcse.c (alloc_rd_mem, alloc_avail_expr_mem): Use n_blocks for vector
+ sizes consistently.
+
Mon May 27 14:28:12 CEST 2002 Jan Hubicka <jh@suse.cz>
* basic-block.h (can_hoist_p, hoist_insn_after, hoist_insn_to_edge):
extern int n_basic_blocks;
+/* First free basic block number. */
+
+#define last_basic_block n_basic_blocks
+
/* Number of edges in the current function. */
extern int n_edges;
bool found = false;
/* Allocate the preorder and postorder number arrays. */
- pre = (int *) xcalloc (n_basic_blocks, sizeof (int));
- post = (int *) xcalloc (n_basic_blocks, sizeof (int));
+ pre = (int *) xcalloc (last_basic_block, sizeof (int));
+ post = (int *) xcalloc (last_basic_block, sizeof (int));
/* Allocate stack for back-tracking up CFG. */
stack = (edge *) xmalloc ((n_basic_blocks + 1) * sizeof (edge));
sp = 0;
/* Allocate bitmap to track nodes that have been visited. */
- visited = sbitmap_alloc (n_basic_blocks);
+ visited = sbitmap_alloc (last_basic_block);
/* None of the nodes in the CFG have been visited yet. */
sbitmap_zero (visited);
sp = 0;
/* Allocate bitmap to track nodes that have been visited. */
- visited = sbitmap_alloc (n_basic_blocks);
+ visited = sbitmap_alloc (last_basic_block);
/* None of the nodes in the CFG have been visited yet. */
sbitmap_zero (visited);
sp = 0;
/* Allocate bitmap to track nodes that have been visited. */
- visited = sbitmap_alloc (n_basic_blocks);
+ visited = sbitmap_alloc (last_basic_block);
/* None of the nodes in the CFG have been visited yet. */
sbitmap_zero (visited);
sp = 0;
/* Allocate the tree. */
- dfst = (struct dfst_node *) xcalloc (n_basic_blocks,
+ dfst = (struct dfst_node *) xcalloc (last_basic_block,
sizeof (struct dfst_node));
FOR_EACH_BB (bb)
}
/* Allocate bitmap to track nodes that have been visited. */
- visited = sbitmap_alloc (n_basic_blocks);
+ visited = sbitmap_alloc (last_basic_block);
/* None of the nodes in the CFG have been visited yet. */
sbitmap_zero (visited);
/* Free the tree. */
- for (i = 0; i < n_basic_blocks; i++)
+ for (i = 0; i < last_basic_block; i++)
if (dfst[i].node)
free (dfst[i].node);
data->sp = 0;
/* Allocate bitmap to track nodes that have been visited. */
- data->visited_blocks = sbitmap_alloc (n_basic_blocks - (INVALID_BLOCK + 1));
+ data->visited_blocks = sbitmap_alloc (last_basic_block - (INVALID_BLOCK + 1));
/* None of the nodes in the CFG have been visited yet. */
sbitmap_zero (data->visited_blocks);
amount of time searching the edge lists for duplicates. */
if (forced_labels || label_value_list)
{
- edge_cache = sbitmap_vector_alloc (n_basic_blocks, n_basic_blocks);
- sbitmap_vector_zero (edge_cache, n_basic_blocks);
+ edge_cache = sbitmap_vector_alloc (last_basic_block, last_basic_block);
+ sbitmap_vector_zero (edge_cache, last_basic_block);
if (update_p)
FOR_BB_BETWEEN (bb, min, max->next_bb, next_bb)
/* Determine which loop nodes dominate all the exits
of the loop. */
- loop->exits_doms = sbitmap_alloc (n_basic_blocks);
+ loop->exits_doms = sbitmap_alloc (last_basic_block);
sbitmap_copy (loop->exits_doms, loop->nodes);
for (j = 0; j < loop->num_exits; j++)
sbitmap_a_and_b (loop->exits_doms, loop->exits_doms,
rc_order = NULL;
/* Compute the dominators. */
- dom = sbitmap_vector_alloc (n_basic_blocks, n_basic_blocks);
+ dom = sbitmap_vector_alloc (last_basic_block, last_basic_block);
calculate_dominance_info (NULL, dom, CDI_DOMINATORS);
/* Count the number of loop edges (back edges). This should be the
loops->array
= (struct loop *) xcalloc (num_loops, sizeof (struct loop));
- headers = sbitmap_alloc (n_basic_blocks);
+ headers = sbitmap_alloc (last_basic_block);
sbitmap_zero (headers);
- loops->shared_headers = sbitmap_alloc (n_basic_blocks);
+ loops->shared_headers = sbitmap_alloc (last_basic_block);
sbitmap_zero (loops->shared_headers);
/* Find and record information about all the natural loops
SET_BIT (headers, loop->header->index);
/* Find nodes contained within the loop. */
- loop->nodes = sbitmap_alloc (n_basic_blocks);
+ loop->nodes = sbitmap_alloc (last_basic_block);
loop->num_nodes
= flow_loop_nodes_find (loop->header, loop->latch, loop->nodes);
basic_block bb, last_bb_seen;
bb_info = (basic_block *) xcalloc (max_uid, sizeof (basic_block));
- last_visited = (basic_block *) xcalloc (n_basic_blocks + 2,
+ last_visited = (basic_block *) xcalloc (last_basic_block + 2,
sizeof (basic_block));
- edge_checksum = (size_t *) xcalloc (n_basic_blocks + 2, sizeof (size_t));
+ edge_checksum = (size_t *) xcalloc (last_basic_block + 2, sizeof (size_t));
/* Check bb chain & numbers. */
last_bb_seen = ENTRY_BLOCK_PTR;
if (update_life_p)
{
- blocks = sbitmap_alloc (n_basic_blocks);
+ blocks = sbitmap_alloc (last_basic_block);
sbitmap_zero (blocks);
}
setup_incoming_promotions ();
- refresh_blocks = sbitmap_alloc (n_basic_blocks);
+ refresh_blocks = sbitmap_alloc (last_basic_block);
sbitmap_zero (refresh_blocks);
need_refresh = 0;
df->uses = xmalloc (df->use_size * sizeof (*df->uses));
df->n_regs = n_regs;
- df->n_bbs = n_basic_blocks;
+ df->n_bbs = last_basic_block;
/* Allocate temporary working array used during local dataflow analysis. */
df->reg_def_last = xmalloc (df->n_regs * sizeof (struct ref *));
df->flags = 0;
- df->bbs = xcalloc (df->n_bbs, sizeof (struct bb_info));
+ df->bbs = xcalloc (last_basic_block, sizeof (struct bb_info));
df->all_blocks = BITMAP_XMALLOC ();
FOR_EACH_BB (bb)
df->dfs_order = xmalloc (sizeof(int) * n_basic_blocks);
df->rc_order = xmalloc (sizeof(int) * n_basic_blocks);
df->rts_order = xmalloc (sizeof(int) * n_basic_blocks);
- df->inverse_dfs_map = xmalloc (sizeof(int) * n_basic_blocks);
- df->inverse_rc_map = xmalloc (sizeof(int) * n_basic_blocks);
- df->inverse_rts_map = xmalloc (sizeof(int) * n_basic_blocks);
+ df->inverse_dfs_map = xmalloc (sizeof(int) * last_basic_block);
+ df->inverse_rc_map = xmalloc (sizeof(int) * last_basic_block);
+ df->inverse_rts_map = xmalloc (sizeof(int) * last_basic_block);
flow_depth_first_order_compute (df->dfs_order, df->rc_order);
flow_reverse_top_sort_order_compute (df->rts_order);
/* Compute the sets of gens and kills for the defs of each bb. */
df_rd_local_compute (df, df->flags & DF_RD ? blocks : df->all_blocks);
{
- bitmap *in = xmalloc (sizeof (bitmap) * n_basic_blocks);
- bitmap *out = xmalloc (sizeof (bitmap) * n_basic_blocks);
- bitmap *gen = xmalloc (sizeof (bitmap) * n_basic_blocks);
- bitmap *kill = xmalloc (sizeof (bitmap) * n_basic_blocks);
+ bitmap *in = xmalloc (sizeof (bitmap) * last_basic_block);
+ bitmap *out = xmalloc (sizeof (bitmap) * last_basic_block);
+ bitmap *gen = xmalloc (sizeof (bitmap) * last_basic_block);
+ bitmap *kill = xmalloc (sizeof (bitmap) * last_basic_block);
FOR_EACH_BB (bb)
{
in[bb->index] = DF_BB_INFO (df, bb)->rd_in;
uses in each bb. */
df_ru_local_compute (df, df->flags & DF_RU ? blocks : df->all_blocks);
{
- bitmap *in = xmalloc (sizeof (bitmap) * n_basic_blocks);
- bitmap *out = xmalloc (sizeof (bitmap) * n_basic_blocks);
- bitmap *gen = xmalloc (sizeof (bitmap) * n_basic_blocks);
- bitmap *kill = xmalloc (sizeof (bitmap) * n_basic_blocks);
+ bitmap *in = xmalloc (sizeof (bitmap) * last_basic_block);
+ bitmap *out = xmalloc (sizeof (bitmap) * last_basic_block);
+ bitmap *gen = xmalloc (sizeof (bitmap) * last_basic_block);
+ bitmap *kill = xmalloc (sizeof (bitmap) * last_basic_block);
FOR_EACH_BB (bb)
{
in[bb->index] = DF_BB_INFO (df, bb)->ru_in;
/* Compute the sets of defs and uses of live variables. */
df_lr_local_compute (df, df->flags & DF_LR ? blocks : df->all_blocks);
{
- bitmap *in = xmalloc (sizeof (bitmap) * n_basic_blocks);
- bitmap *out = xmalloc (sizeof (bitmap) * n_basic_blocks);
- bitmap *use = xmalloc (sizeof (bitmap) * n_basic_blocks);
- bitmap *def = xmalloc (sizeof (bitmap) * n_basic_blocks);
+ bitmap *in = xmalloc (sizeof (bitmap) * last_basic_block);
+ bitmap *out = xmalloc (sizeof (bitmap) * last_basic_block);
+ bitmap *use = xmalloc (sizeof (bitmap) * last_basic_block);
+ bitmap *def = xmalloc (sizeof (bitmap) * last_basic_block);
FOR_EACH_BB (bb)
{
in[bb->index] = DF_BB_INFO (df, bb)->lr_in;
/* We could deal with additional basic blocks being created by
rescanning everything again. */
- if (df->n_bbs && df->n_bbs != (unsigned int)n_basic_blocks)
+ if (df->n_bbs && df->n_bbs != (unsigned int) last_basic_block)
abort ();
update = df_modified_p (df, blocks);
}
else
{
- FOR_EACH_BB (bb,
- {
+ FOR_EACH_BB (bb)
df_bb_refs_unlink (df, bb);
- });
}
}
#endif
fibheap_t worklist;
basic_block bb;
sbitmap visited, pending;
- pending = sbitmap_alloc (n_basic_blocks);
- visited = sbitmap_alloc (n_basic_blocks);
+ pending = sbitmap_alloc (last_basic_block);
+ visited = sbitmap_alloc (last_basic_block);
sbitmap_zero (pending);
sbitmap_zero (visited);
worklist = fibheap_new ();
fibheap_t worklist;
basic_block bb;
sbitmap visited, pending;
- pending = sbitmap_alloc (n_basic_blocks);
- visited = sbitmap_alloc (n_basic_blocks);
+ pending = sbitmap_alloc (last_basic_block);
+ visited = sbitmap_alloc (last_basic_block);
sbitmap_zero (pending);
sbitmap_zero (visited);
worklist = fibheap_new ();
number of the corresponding basic block. Please note, that we include the
artificial ENTRY_BLOCK (or EXIT_BLOCK in the post-dom case) in our lists to
support multiple entry points. As it has no real basic block index we use
- 'n_basic_blocks' for that. Its dfs number is of course 1. */
+ 'last_basic_block' for that. Its dfs number is of course 1. */
/* Type of Basic Block aka. TBB */
typedef unsigned int TBB;
init_ar (di->set_size, unsigned int, num, 1);
init_ar (di->set_child, TBB, num, 0);
- init_ar (di->dfs_order, TBB, (unsigned int) n_basic_blocks + 1, 0);
+ init_ar (di->dfs_order, TBB, (unsigned int) last_basic_block + 1, 0);
init_ar (di->dfs_to_bb, basic_block, num, 0);
di->dfsnum = 1;
if (bb != en_block)
my_i = di->dfs_order[bb->index];
else
- my_i = di->dfs_order[n_basic_blocks];
+ my_i = di->dfs_order[last_basic_block];
child_i = di->dfs_order[bn->index] = di->dfsnum++;
di->dfs_to_bb[child_i] = bn;
di->dfs_parent[child_i] = my_i;
{
/* The first block is the ENTRY_BLOCK (or EXIT_BLOCK if REVERSE). */
basic_block begin = reverse ? EXIT_BLOCK_PTR : ENTRY_BLOCK_PTR;
- di->dfs_order[n_basic_blocks] = di->dfsnum;
+ di->dfs_order[last_basic_block] = di->dfsnum;
di->dfs_to_bb[di->dfsnum] = begin;
di->dfsnum++;
e_next = e->pred_next;
}
if (b == en_block)
- k1 = di->dfs_order[n_basic_blocks];
+ k1 = di->dfs_order[last_basic_block];
else
k1 = di->dfs_order[b->index];
{
TBB i, e_index;
int bb, bb_idom;
- sbitmap_vector_zero (dominators, n_basic_blocks);
+ sbitmap_vector_zero (dominators, last_basic_block);
/* We have to be careful, to not include the ENTRY_BLOCK or EXIT_BLOCK
in the list of (post)-doms, so remember that in e_index. */
- e_index = di->dfs_order[n_basic_blocks];
+ e_index = di->dfs_order[last_basic_block];
for (i = 1; i <= di->nodes; i++)
{
}
/* The main entry point into this module. IDOM is an integer array with room
- for n_basic_blocks integers, DOMS is a preallocated sbitmap array having
- room for n_basic_blocks^2 bits, and POST is true if the caller wants to
+ for last_basic_block integers, DOMS is a preallocated sbitmap array having
+ room for last_basic_block^2 bits, and POST is true if the caller wants to
know post-dominators.
On return IDOM[i] will be the BB->index of the immediate (post) dominator
enum update_life_extent extent;
int prop_flags;
{
- sbitmap update_life_blocks = sbitmap_alloc (n_basic_blocks);
+ sbitmap update_life_blocks = sbitmap_alloc (last_basic_block);
int n = 0;
basic_block bb;
int retval = 0;
VARRAY_FREE (basic_block_info);
}
n_basic_blocks = 0;
+ last_basic_block = 0;
ENTRY_BLOCK_PTR->aux = NULL;
ENTRY_BLOCK_PTR->global_live_at_end = NULL;
{
free_modify_mem_tables ();
modify_mem_list
- = (rtx *) gmalloc (n_basic_blocks * sizeof (rtx));
+ = (rtx *) gmalloc (last_basic_block * sizeof (rtx));
canon_modify_mem_list
- = (rtx *) gmalloc (n_basic_blocks * sizeof (rtx));
- memset ((char *) modify_mem_list, 0, n_basic_blocks * sizeof (rtx));
- memset ((char *) canon_modify_mem_list, 0, n_basic_blocks * sizeof (rtx));
+ = (rtx *) gmalloc (last_basic_block * sizeof (rtx));
+ memset ((char *) modify_mem_list, 0, last_basic_block * sizeof (rtx));
+ memset ((char *) canon_modify_mem_list, 0, last_basic_block * sizeof (rtx));
orig_bb_count = n_basic_blocks;
}
free_reg_set_mem ();
reg_set_bitmap = BITMAP_XMALLOC ();
/* Allocate vars to track sets of regs, memory per block. */
- reg_set_in_block = (sbitmap *) sbitmap_vector_alloc (n_basic_blocks,
+ reg_set_in_block = (sbitmap *) sbitmap_vector_alloc (last_basic_block,
max_gcse_regno);
/* Allocate array to keep a list of insns which modify memory in each
basic block. */
- modify_mem_list = (rtx *) gmalloc (n_basic_blocks * sizeof (rtx));
- canon_modify_mem_list = (rtx *) gmalloc (n_basic_blocks * sizeof (rtx));
- memset ((char *) modify_mem_list, 0, n_basic_blocks * sizeof (rtx));
- memset ((char *) canon_modify_mem_list, 0, n_basic_blocks * sizeof (rtx));
+ modify_mem_list = (rtx *) gmalloc (last_basic_block * sizeof (rtx));
+ canon_modify_mem_list = (rtx *) gmalloc (last_basic_block * sizeof (rtx));
+ memset ((char *) modify_mem_list, 0, last_basic_block * sizeof (rtx));
+ memset ((char *) canon_modify_mem_list, 0, last_basic_block * sizeof (rtx));
modify_mem_list_set = BITMAP_XMALLOC ();
canon_modify_mem_list_set = BITMAP_XMALLOC ();
}
if (transp)
{
if (setp)
- sbitmap_vector_zero (transp, n_basic_blocks);
+ sbitmap_vector_zero (transp, last_basic_block);
else
- sbitmap_vector_ones (transp, n_basic_blocks);
+ sbitmap_vector_ones (transp, last_basic_block);
}
if (comp)
- sbitmap_vector_zero (comp, n_basic_blocks);
+ sbitmap_vector_zero (comp, last_basic_block);
if (antloc)
- sbitmap_vector_zero (antloc, n_basic_blocks);
+ sbitmap_vector_zero (antloc, last_basic_block);
/* We use the same code for cprop, pre and hoisting. For cprop
we care about the set hash table, for pre and hoisting we
registers are set in which blocks.
??? This isn't needed during const/copy propagation, but it's cheap to
compute. Later. */
- sbitmap_vector_zero (reg_set_in_block, n_basic_blocks);
+ sbitmap_vector_zero (reg_set_in_block, last_basic_block);
/* re-Cache any INSN_LIST nodes we have allocated. */
clear_modify_mem_tables ();
int n_blocks, n_insns;
{
rd_kill = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_insns);
- sbitmap_vector_zero (rd_kill, n_basic_blocks);
+ sbitmap_vector_zero (rd_kill, n_blocks);
rd_gen = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_insns);
- sbitmap_vector_zero (rd_gen, n_basic_blocks);
+ sbitmap_vector_zero (rd_gen, n_blocks);
reaching_defs = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_insns);
- sbitmap_vector_zero (reaching_defs, n_basic_blocks);
+ sbitmap_vector_zero (reaching_defs, n_blocks);
rd_out = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_insns);
- sbitmap_vector_zero (rd_out, n_basic_blocks);
+ sbitmap_vector_zero (rd_out, n_blocks);
}
/* Free reaching def variables. */
int n_blocks, n_exprs;
{
ae_kill = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_exprs);
- sbitmap_vector_zero (ae_kill, n_basic_blocks);
+ sbitmap_vector_zero (ae_kill, n_blocks);
ae_gen = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_exprs);
- sbitmap_vector_zero (ae_gen, n_basic_blocks);
+ sbitmap_vector_zero (ae_gen, n_blocks);
ae_in = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_exprs);
- sbitmap_vector_zero (ae_in, n_basic_blocks);
+ sbitmap_vector_zero (ae_in, n_blocks);
ae_out = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_exprs);
- sbitmap_vector_zero (ae_out, n_basic_blocks);
+ sbitmap_vector_zero (ae_out, n_blocks);
}
static void
int check_self_loop;
{
int rval;
- char *visited = (char *) xcalloc (n_basic_blocks, 1);
+ char *visited = (char *) xcalloc (last_basic_block, 1);
rval = expr_reaches_here_p_work (occr, expr, bb, check_self_loop, visited);
gcse_create_count = 0;
alloc_expr_hash_table (max_cuid);
- alloc_rd_mem (n_basic_blocks, max_cuid);
+ alloc_rd_mem (last_basic_block, max_cuid);
compute_expr_hash_table ();
if (gcse_file)
dump_hash_table (gcse_file, "Expression", expr_hash_table,
{
compute_kill_rd ();
compute_rd ();
- alloc_avail_expr_mem (n_basic_blocks, n_exprs);
+ alloc_avail_expr_mem (last_basic_block, n_exprs);
compute_ae_gen ();
compute_ae_kill (ae_gen, ae_kill);
compute_available (ae_gen, ae_kill, ae_out, ae_in);
n_sets);
if (n_sets > 0)
{
- alloc_cprop_mem (n_basic_blocks, n_sets);
+ alloc_cprop_mem (last_basic_block, n_sets);
compute_cprop_data ();
changed = cprop (alter_jumps);
free_cprop_mem ();
unsigned int ui;
compute_local_properties (transp, comp, antloc, 0);
- sbitmap_vector_zero (ae_kill, n_basic_blocks);
+ sbitmap_vector_zero (ae_kill, last_basic_block);
/* Collect expressions which might trap. */
trapping_expr = sbitmap_alloc (n_exprs);
basic_block bb;
{
int rval;
- char *visited = (char *) xcalloc (n_basic_blocks, 1);
+ char *visited = (char *) xcalloc (last_basic_block, 1);
rval = pre_expr_reaches_here_p_work (occr_bb, expr, bb, visited);
if (n_exprs > 0)
{
- alloc_pre_mem (n_basic_blocks, n_exprs);
+ alloc_pre_mem (last_basic_block, n_exprs);
compute_pre_data ();
changed |= pre_gcse ();
free_edge_list (edge_list);
unsigned int i;
struct expr *expr;
- sbitmap_vector_ones (transpout, n_basic_blocks);
+ sbitmap_vector_ones (transpout, last_basic_block);
FOR_EACH_BB (bb)
{
Note that a register can have both properties in a single block. That
indicates that it's killed, then later in the block a new value is
computed. */
- sbitmap_vector_zero (nonnull_local, n_basic_blocks);
- sbitmap_vector_zero (nonnull_killed, n_basic_blocks);
+ sbitmap_vector_zero (nonnull_local, last_basic_block);
+ sbitmap_vector_zero (nonnull_killed, last_basic_block);
FOR_EACH_BB (current_block)
{
/* We need four bitmaps, each with a bit for each register in each
basic block. */
max_reg = max_reg_num ();
- regs_per_pass = get_bitmap_width (4, n_basic_blocks, max_reg);
+ regs_per_pass = get_bitmap_width (4, last_basic_block, max_reg);
/* Allocate bitmaps to hold local and global properties. */
- npi.nonnull_local = sbitmap_vector_alloc (n_basic_blocks, regs_per_pass);
- npi.nonnull_killed = sbitmap_vector_alloc (n_basic_blocks, regs_per_pass);
- nonnull_avin = sbitmap_vector_alloc (n_basic_blocks, regs_per_pass);
- nonnull_avout = sbitmap_vector_alloc (n_basic_blocks, regs_per_pass);
+ npi.nonnull_local = sbitmap_vector_alloc (last_basic_block, regs_per_pass);
+ npi.nonnull_killed = sbitmap_vector_alloc (last_basic_block, regs_per_pass);
+ nonnull_avin = sbitmap_vector_alloc (last_basic_block, regs_per_pass);
+ nonnull_avout = sbitmap_vector_alloc (last_basic_block, regs_per_pass);
/* Go through the basic blocks, seeing whether or not each block
ends with a conditional branch whose condition is a comparison
against zero. Record the register compared in BLOCK_REG. */
- block_reg = (unsigned int *) xcalloc (n_basic_blocks, sizeof (int));
+ block_reg = (unsigned int *) xcalloc (last_basic_block, sizeof (int));
FOR_EACH_BB (bb)
{
rtx last_insn = bb->end;
int changed, passes;
basic_block bb;
- sbitmap_vector_zero (hoist_vbeout, n_basic_blocks);
- sbitmap_vector_zero (hoist_vbein, n_basic_blocks);
+ sbitmap_vector_zero (hoist_vbeout, last_basic_block);
+ sbitmap_vector_zero (hoist_vbein, last_basic_block);
passes = 0;
changed = 1;
if (visited == NULL)
{
visited_allocated_locally = 1;
- visited = xcalloc (n_basic_blocks, 1);
+ visited = xcalloc (last_basic_block, 1);
}
for (pred = bb->pred; pred != NULL; pred = pred->pred_next)
struct expr **index_map;
struct expr *expr;
- sbitmap_vector_zero (hoist_exprs, n_basic_blocks);
+ sbitmap_vector_zero (hoist_exprs, last_basic_block);
/* Compute a mapping from expression number (`bitmap_index') to
hash table entry. */
if (n_exprs > 0)
{
- alloc_code_hoist_mem (n_basic_blocks, n_exprs);
+ alloc_code_hoist_mem (last_basic_block, n_exprs);
compute_code_hoist_data ();
hoist_code ();
free_code_hoist_mem ();
max_gcse_regno = max_reg_num ();
- reg_set_in_block = (sbitmap *) sbitmap_vector_alloc (n_basic_blocks,
+ reg_set_in_block = (sbitmap *) sbitmap_vector_alloc (last_basic_block,
max_gcse_regno);
- sbitmap_vector_zero (reg_set_in_block, n_basic_blocks);
+ sbitmap_vector_zero (reg_set_in_block, last_basic_block);
pre_ldst_mems = 0;
/* Find all the stores we care about. */
/* Build the gen_vector. This is any store in the table which is not killed
by aliasing later in its block. */
- ae_gen = (sbitmap *) sbitmap_vector_alloc (n_basic_blocks, num_stores);
- sbitmap_vector_zero (ae_gen, n_basic_blocks);
+ ae_gen = (sbitmap *) sbitmap_vector_alloc (last_basic_block, num_stores);
+ sbitmap_vector_zero (ae_gen, last_basic_block);
- st_antloc = (sbitmap *) sbitmap_vector_alloc (n_basic_blocks, num_stores);
- sbitmap_vector_zero (st_antloc, n_basic_blocks);
+ st_antloc = (sbitmap *) sbitmap_vector_alloc (last_basic_block, num_stores);
+ sbitmap_vector_zero (st_antloc, last_basic_block);
for (ptr = first_ls_expr (); ptr != NULL; ptr = next_ls_expr (ptr))
{
free_INSN_LIST_list (&store_list);
}
- ae_kill = (sbitmap *) sbitmap_vector_alloc (n_basic_blocks, num_stores);
- sbitmap_vector_zero (ae_kill, n_basic_blocks);
+ ae_kill = (sbitmap *) sbitmap_vector_alloc (last_basic_block, num_stores);
+ sbitmap_vector_zero (ae_kill, last_basic_block);
- transp = (sbitmap *) sbitmap_vector_alloc (n_basic_blocks, num_stores);
- sbitmap_vector_zero (transp, n_basic_blocks);
+ transp = (sbitmap *) sbitmap_vector_alloc (last_basic_block, num_stores);
+ sbitmap_vector_zero (transp, last_basic_block);
for (ptr = first_ls_expr (); ptr != NULL; ptr = next_ls_expr (ptr))
FOR_EACH_BB (b)
{
fprintf (gcse_file, "ST_avail and ST_antic (shown under loads..)\n");
print_ldst_list (gcse_file);
- dump_sbitmap_vector (gcse_file, "st_antloc", "", st_antloc, n_basic_blocks);
- dump_sbitmap_vector (gcse_file, "st_kill", "", ae_kill, n_basic_blocks);
- dump_sbitmap_vector (gcse_file, "Transpt", "", transp, n_basic_blocks);
- dump_sbitmap_vector (gcse_file, "st_avloc", "", ae_gen, n_basic_blocks);
+ dump_sbitmap_vector (gcse_file, "st_antloc", "", st_antloc, last_basic_block);
+ dump_sbitmap_vector (gcse_file, "st_kill", "", ae_kill, last_basic_block);
+ dump_sbitmap_vector (gcse_file, "Transpt", "", transp, last_basic_block);
+ dump_sbitmap_vector (gcse_file, "st_avloc", "", ae_gen, last_basic_block);
}
}
{
rtx line;
- line_note_head = (rtx *) xcalloc (n_basic_blocks, sizeof (rtx));
+ line_note_head = (rtx *) xcalloc (last_basic_block, sizeof (rtx));
/* Save-line-note-head:
Determine the line-number at the start of each basic block.
post_dominators = NULL;
if (HAVE_conditional_execution || life_data_ok)
{
- post_dominators = sbitmap_vector_alloc (n_basic_blocks, n_basic_blocks);
+ post_dominators = sbitmap_vector_alloc (last_basic_block, last_basic_block);
calculate_dominance_info (NULL, post_dominators, CDI_POST_DOMINATORS);
}
if (life_data_ok)
/* We want a maximal solution, so make an optimistic initialization of
ANTIN. */
- sbitmap_vector_ones (antin, n_basic_blocks);
+ sbitmap_vector_ones (antin, last_basic_block);
/* Put every block on the worklist; this is necessary because of the
optimistic initialization of ANTIN above. */
/* Computation of insertion and deletion points requires computing LATERIN
for the EXIT block. We allocated an extra entry in the LATERIN array
for just this purpose. */
- sbitmap_ones (laterin[n_basic_blocks]);
+ sbitmap_ones (laterin[last_basic_block]);
for (e = EXIT_BLOCK_PTR->pred; e != NULL; e = e->pred_next)
- sbitmap_a_and_b (laterin[n_basic_blocks],
- laterin[n_basic_blocks],
+ sbitmap_a_and_b (laterin[last_basic_block],
+ laterin[last_basic_block],
later[(size_t) e->aux]);
clear_aux_for_edges ();
basic_block b = INDEX_EDGE_SUCC_BB (edge_list, x);
if (b == EXIT_BLOCK_PTR)
- sbitmap_difference (insert[x], later[x], laterin[n_basic_blocks]);
+ sbitmap_difference (insert[x], later[x], laterin[last_basic_block]);
else
sbitmap_difference (insert[x], later[x], laterin[b->index]);
}
fprintf (file, "Edge List:\n");
verify_edge_list (file, edge_list);
print_edge_list (file, edge_list);
- dump_sbitmap_vector (file, "transp", "", transp, n_basic_blocks);
- dump_sbitmap_vector (file, "antloc", "", antloc, n_basic_blocks);
- dump_sbitmap_vector (file, "avloc", "", avloc, n_basic_blocks);
- dump_sbitmap_vector (file, "kill", "", kill, n_basic_blocks);
+ dump_sbitmap_vector (file, "transp", "", transp, last_basic_block);
+ dump_sbitmap_vector (file, "antloc", "", antloc, last_basic_block);
+ dump_sbitmap_vector (file, "avloc", "", avloc, last_basic_block);
+ dump_sbitmap_vector (file, "kill", "", kill, last_basic_block);
}
#endif
/* Compute global availability. */
- avin = sbitmap_vector_alloc (n_basic_blocks, n_exprs);
- avout = sbitmap_vector_alloc (n_basic_blocks, n_exprs);
+ avin = sbitmap_vector_alloc (last_basic_block, n_exprs);
+ avout = sbitmap_vector_alloc (last_basic_block, n_exprs);
compute_available (avloc, kill, avout, avin);
sbitmap_vector_free (avin);
/* Compute global anticipatability. */
- antin = sbitmap_vector_alloc (n_basic_blocks, n_exprs);
- antout = sbitmap_vector_alloc (n_basic_blocks, n_exprs);
+ antin = sbitmap_vector_alloc (last_basic_block, n_exprs);
+ antout = sbitmap_vector_alloc (last_basic_block, n_exprs);
compute_antinout_edge (antloc, transp, antin, antout);
#ifdef LCM_DEBUG_INFO
if (file)
{
- dump_sbitmap_vector (file, "antin", "", antin, n_basic_blocks);
- dump_sbitmap_vector (file, "antout", "", antout, n_basic_blocks);
+ dump_sbitmap_vector (file, "antin", "", antin, last_basic_block);
+ dump_sbitmap_vector (file, "antout", "", antout, last_basic_block);
}
#endif
later = sbitmap_vector_alloc (num_edges, n_exprs);
/* Allocate an extra element for the exit block in the laterin vector. */
- laterin = sbitmap_vector_alloc (n_basic_blocks + 1, n_exprs);
+ laterin = sbitmap_vector_alloc (last_basic_block + 1, n_exprs);
compute_laterin (edge_list, earliest, antloc, later, laterin);
#ifdef LCM_DEBUG_INFO
if (file)
{
- dump_sbitmap_vector (file, "laterin", "", laterin, n_basic_blocks + 1);
+ dump_sbitmap_vector (file, "laterin", "", laterin, last_basic_block + 1);
dump_sbitmap_vector (file, "later", "", later, num_edges);
}
#endif
sbitmap_vector_free (earliest);
*insert = sbitmap_vector_alloc (num_edges, n_exprs);
- *delete = sbitmap_vector_alloc (n_basic_blocks, n_exprs);
+ *delete = sbitmap_vector_alloc (last_basic_block, n_exprs);
compute_insert_delete (edge_list, antloc, later, laterin, *insert, *delete);
sbitmap_vector_free (laterin);
{
dump_sbitmap_vector (file, "pre_insert_map", "", *insert, num_edges);
dump_sbitmap_vector (file, "pre_delete_map", "", *delete,
- n_basic_blocks);
+ last_basic_block);
}
#endif
= (basic_block *) xmalloc (sizeof (basic_block) * n_basic_blocks);
/* We want a maximal solution. */
- sbitmap_vector_ones (avout, n_basic_blocks);
+ sbitmap_vector_ones (avout, last_basic_block);
/* Put every block on the worklist; this is necessary because of the
optimistic initialization of AVOUT above. */
/* Computation of insertion and deletion points requires computing NEAREROUT
for the ENTRY block. We allocated an extra entry in the NEAREROUT array
for just this purpose. */
- sbitmap_ones (nearerout[n_basic_blocks]);
+ sbitmap_ones (nearerout[last_basic_block]);
for (e = ENTRY_BLOCK_PTR->succ; e != NULL; e = e->succ_next)
- sbitmap_a_and_b (nearerout[n_basic_blocks],
- nearerout[n_basic_blocks],
+ sbitmap_a_and_b (nearerout[last_basic_block],
+ nearerout[last_basic_block],
nearer[(size_t) e->aux]);
clear_aux_for_edges ();
{
basic_block b = INDEX_EDGE_PRED_BB (edge_list, x);
if (b == ENTRY_BLOCK_PTR)
- sbitmap_difference (insert[x], nearer[x], nearerout[n_basic_blocks]);
+ sbitmap_difference (insert[x], nearer[x], nearerout[last_basic_block]);
else
sbitmap_difference (insert[x], nearer[x], nearerout[b->index]);
}
edge_list = create_edge_list ();
num_edges = NUM_EDGES (edge_list);
- st_antin = (sbitmap *) sbitmap_vector_alloc (n_basic_blocks, n_exprs);
- st_antout = (sbitmap *) sbitmap_vector_alloc (n_basic_blocks, n_exprs);
- sbitmap_vector_zero (st_antin, n_basic_blocks);
- sbitmap_vector_zero (st_antout, n_basic_blocks);
+ st_antin = (sbitmap *) sbitmap_vector_alloc (last_basic_block, n_exprs);
+ st_antout = (sbitmap *) sbitmap_vector_alloc (last_basic_block, n_exprs);
+ sbitmap_vector_zero (st_antin, last_basic_block);
+ sbitmap_vector_zero (st_antout, last_basic_block);
compute_antinout_edge (st_antloc, transp, st_antin, st_antout);
/* Compute global anticipatability. */
- st_avout = sbitmap_vector_alloc (n_basic_blocks, n_exprs);
- st_avin = sbitmap_vector_alloc (n_basic_blocks, n_exprs);
+ st_avout = sbitmap_vector_alloc (last_basic_block, n_exprs);
+ st_avin = sbitmap_vector_alloc (last_basic_block, n_exprs);
compute_available (st_avloc, kill, st_avout, st_avin);
#ifdef LCM_DEBUG_INFO
fprintf (file, "Edge List:\n");
verify_edge_list (file, edge_list);
print_edge_list (file, edge_list);
- dump_sbitmap_vector (file, "transp", "", transp, n_basic_blocks);
- dump_sbitmap_vector (file, "st_avloc", "", st_avloc, n_basic_blocks);
- dump_sbitmap_vector (file, "st_antloc", "", st_antloc, n_basic_blocks);
- dump_sbitmap_vector (file, "st_antin", "", st_antin, n_basic_blocks);
- dump_sbitmap_vector (file, "st_antout", "", st_antout, n_basic_blocks);
- dump_sbitmap_vector (file, "st_kill", "", kill, n_basic_blocks);
+ dump_sbitmap_vector (file, "transp", "", transp, last_basic_block);
+ dump_sbitmap_vector (file, "st_avloc", "", st_avloc, last_basic_block);
+ dump_sbitmap_vector (file, "st_antloc", "", st_antloc, last_basic_block);
+ dump_sbitmap_vector (file, "st_antin", "", st_antin, last_basic_block);
+ dump_sbitmap_vector (file, "st_antout", "", st_antout, last_basic_block);
+ dump_sbitmap_vector (file, "st_kill", "", kill, last_basic_block);
}
#endif
#ifdef LCM_DEBUG_INFO
if (file)
{
- dump_sbitmap_vector (file, "st_avout", "", st_avout, n_basic_blocks);
- dump_sbitmap_vector (file, "st_avin", "", st_avin, n_basic_blocks);
+ dump_sbitmap_vector (file, "st_avout", "", st_avout, last_basic_block);
+ dump_sbitmap_vector (file, "st_avin", "", st_avin, last_basic_block);
}
#endif
nearer = sbitmap_vector_alloc (num_edges, n_exprs);
/* Allocate an extra element for the entry block. */
- nearerout = sbitmap_vector_alloc (n_basic_blocks + 1, n_exprs);
+ nearerout = sbitmap_vector_alloc (last_basic_block + 1, n_exprs);
compute_nearerout (edge_list, farthest, st_avloc, nearer, nearerout);
#ifdef LCM_DEBUG_INFO
if (file)
{
dump_sbitmap_vector (file, "nearerout", "", nearerout,
- n_basic_blocks + 1);
+ last_basic_block + 1);
dump_sbitmap_vector (file, "nearer", "", nearer, num_edges);
}
#endif
sbitmap_vector_free (farthest);
*insert = sbitmap_vector_alloc (num_edges, n_exprs);
- *delete = sbitmap_vector_alloc (n_basic_blocks, n_exprs);
+ *delete = sbitmap_vector_alloc (last_basic_block, n_exprs);
compute_rev_insert_delete (edge_list, st_avloc, nearer, nearerout,
*insert, *delete);
{
dump_sbitmap_vector (file, "pre_insert_map", "", *insert, num_edges);
dump_sbitmap_vector (file, "pre_delete_map", "", *delete,
- n_basic_blocks);
+ last_basic_block);
}
#endif
return edge_list;
clear_bb_flags ();
#ifdef NORMAL_MODE
- /* Increment n_basic_blocks before allocating bb_info. */
- n_basic_blocks++;
+ /* Increment last_basic_block before allocating bb_info. */
+ last_basic_block++;
#endif
for (e = N_ENTITIES - 1, n_entities = 0; e >= 0; e--)
{
/* Create the list of segments within each basic block. */
bb_info[n_entities]
- = (struct bb_info *) xcalloc (n_basic_blocks, sizeof **bb_info);
+ = (struct bb_info *) xcalloc (last_basic_block, sizeof **bb_info);
entity_map[n_entities++] = e;
if (num_modes[e] > max_num_modes)
max_num_modes = num_modes[e];
#ifdef NORMAL_MODE
/* Decrement it back in case we return below. */
- n_basic_blocks--;
+ last_basic_block--;
#endif
if (! n_entities)
EXIT_BLOCK isn't optimized away. We do this by incrementing the
basic block count, growing the VARRAY of basic_block_info and
appending the EXIT_BLOCK_PTR to it. */
- n_basic_blocks++;
- if (VARRAY_SIZE (basic_block_info) < n_basic_blocks)
- VARRAY_GROW (basic_block_info, n_basic_blocks);
- BASIC_BLOCK (n_basic_blocks - 1) = EXIT_BLOCK_PTR;
- EXIT_BLOCK_PTR->index = n_basic_blocks - 1;
+ last_basic_block++;
+ if (VARRAY_SIZE (basic_block_info) < last_basic_block)
+ VARRAY_GROW (basic_block_info, last_basic_block);
+ BASIC_BLOCK (last_basic_block - 1) = EXIT_BLOCK_PTR;
+ EXIT_BLOCK_PTR->index = last_basic_block - 1;
#endif
/* Create the bitmap vectors. */
- antic = sbitmap_vector_alloc (n_basic_blocks, n_entities);
- transp = sbitmap_vector_alloc (n_basic_blocks, n_entities);
- comp = sbitmap_vector_alloc (n_basic_blocks, n_entities);
+ antic = sbitmap_vector_alloc (last_basic_block, n_entities);
+ transp = sbitmap_vector_alloc (last_basic_block, n_entities);
+ comp = sbitmap_vector_alloc (last_basic_block, n_entities);
- sbitmap_vector_ones (transp, n_basic_blocks);
+ sbitmap_vector_ones (transp, last_basic_block);
for (j = n_entities - 1; j >= 0; j--)
{
#endif /* NORMAL_MODE */
}
- kill = sbitmap_vector_alloc (n_basic_blocks, n_entities);
+ kill = sbitmap_vector_alloc (last_basic_block, n_entities);
for (i = 0; i < max_num_modes; i++)
{
int current_mode[N_ENTITIES];
/* Set the anticipatable and computing arrays. */
- sbitmap_vector_zero (antic, n_basic_blocks);
- sbitmap_vector_zero (comp, n_basic_blocks);
+ sbitmap_vector_zero (antic, last_basic_block);
+ sbitmap_vector_zero (comp, last_basic_block);
for (j = n_entities - 1; j >= 0; j--)
{
int m = current_mode[j] = MODE_PRIORITY_TO_MODE (entity_map[j], i);
int no_mode = num_modes[entity_map[j]];
#ifdef NORMAL_MODE
- if (bb_info[j][n_basic_blocks].seginfo->mode != no_mode)
+ if (bb_info[j][last_basic_block].seginfo->mode != no_mode)
{
edge eg;
- struct seginfo *ptr = bb_info[j][n_basic_blocks].seginfo;
+ struct seginfo *ptr = bb_info[j][last_basic_block].seginfo;
for (eg = EXIT_BLOCK_PTR->pred; eg; eg = eg->pred_next)
{
basic_block bb;
int i;
- dominators = sbitmap_vector_alloc (n_basic_blocks, n_basic_blocks);
- post_dominators = sbitmap_vector_alloc (n_basic_blocks, n_basic_blocks);
+ dominators = sbitmap_vector_alloc (last_basic_block, last_basic_block);
+ post_dominators = sbitmap_vector_alloc (last_basic_block, last_basic_block);
calculate_dominance_info (NULL, dominators, CDI_DOMINATORS);
calculate_dominance_info (NULL, post_dominators, CDI_POST_DOMINATORS);
/* Now find the edge that leads to our branch and aply the prediction. */
- if (y == n_basic_blocks)
+ if (y == last_basic_block)
return;
for (e = BASIC_BLOCK (y)->succ; e; e = e->succ_next)
if (e->dest->index >= 0
add_noreturn_fake_exit_edges ();
connect_infinite_loops_to_exit ();
- dominators = xmalloc (sizeof (int) * n_basic_blocks);
- memset (dominators, -1, sizeof (int) * n_basic_blocks);
- post_dominators = sbitmap_vector_alloc (n_basic_blocks, n_basic_blocks);
+ dominators = xmalloc (sizeof (int) * last_basic_block);
+ memset (dominators, -1, sizeof (int) * last_basic_block);
+ post_dominators = sbitmap_vector_alloc (last_basic_block, last_basic_block);
calculate_dominance_info (NULL, post_dominators, CDI_POST_DOMINATORS);
calculate_dominance_info (dominators, NULL, CDI_DOMINATORS);
- heads = xmalloc (sizeof (int) * n_basic_blocks);
- memset (heads, -1, sizeof (int) * n_basic_blocks);
- heads[ENTRY_BLOCK_PTR->next_bb->index] = n_basic_blocks;
+ heads = xmalloc (sizeof (int) * last_basic_block);
+ memset (heads, -1, sizeof (int) * last_basic_block);
+ heads[ENTRY_BLOCK_PTR->next_bb->index] = last_basic_block;
/* Process all prediction notes. */
/* Keep all basic block indexes nonnegative in the gcov output. Index 0
is used for entry block, last block exit block. */
#define GCOV_INDEX_TO_BB(i) ((i) == 0 ? ENTRY_BLOCK_PTR \
- : (((i) == n_basic_blocks + 1) \
+ : (((i) == last_basic_block + 1) \
? EXIT_BLOCK_PTR : BASIC_BLOCK ((i)-1)))
#define BB_TO_GCOV_INDEX(bb) ((bb) == ENTRY_BLOCK_PTR ? 0 \
: ((bb) == EXIT_BLOCK_PTR \
- ? n_basic_blocks + 1 : (bb)->index + 1))
+ ? last_basic_block + 1 : (bb)->index + 1))
/* Instantiate the profile info structure. */
int changed;
basic_block bb;
- blocks = sbitmap_alloc (n_basic_blocks);
+ blocks = sbitmap_alloc (last_basic_block);
sbitmap_zero (blocks);
changed = 0;
live = INITIALIZE_REG_SET (rs_heads[i]);
#ifdef HAVE_conditional_execution
- blocks = sbitmap_alloc (n_basic_blocks);
+ blocks = sbitmap_alloc (last_basic_block);
sbitmap_zero (blocks);
changed = false;
#else
need_refresh = false;
- all_vd = xmalloc (sizeof (struct value_data) * n_basic_blocks);
+ all_vd = xmalloc (sizeof (struct value_data) * last_basic_block);
FOR_EACH_BB (bb)
{
/* Allocate and initialize the tables used by mark_target_live_regs. */
target_hash_table = (struct target_info **)
xcalloc (TARGET_HASH_PRIME, sizeof (struct target_info *));
- bb_ticks = (int *) xcalloc (n_basic_blocks, sizeof (int));
+ bb_ticks = (int *) xcalloc (last_basic_block, sizeof (int));
}
\f
/* Free up the resources allcated to mark_target_live_regs (). This
}
/* ??? We can kill these soon. */
- in_edges = (int *) xcalloc (n_basic_blocks, sizeof (int));
- out_edges = (int *) xcalloc (n_basic_blocks, sizeof (int));
+ in_edges = (int *) xcalloc (last_basic_block, sizeof (int));
+ out_edges = (int *) xcalloc (last_basic_block, sizeof (int));
edge_table = (haifa_edge *) xcalloc (num_edges, sizeof (haifa_edge));
nr_edges = 0;
STACK, SP and DFS_NR are only used during the first traversal. */
/* Allocate and initialize variables for the first traversal. */
- max_hdr = (int *) xmalloc (n_basic_blocks * sizeof (int));
- dfs_nr = (int *) xcalloc (n_basic_blocks, sizeof (int));
+ max_hdr = (int *) xmalloc (last_basic_block * sizeof (int));
+ dfs_nr = (int *) xcalloc (last_basic_block, sizeof (int));
stack = (int *) xmalloc (nr_edges * sizeof (int));
- inner = sbitmap_alloc (n_basic_blocks);
+ inner = sbitmap_alloc (last_basic_block);
sbitmap_ones (inner);
- header = sbitmap_alloc (n_basic_blocks);
+ header = sbitmap_alloc (last_basic_block);
sbitmap_zero (header);
passed = sbitmap_alloc (nr_edges);
sbitmap_zero (passed);
- in_queue = sbitmap_alloc (n_basic_blocks);
+ in_queue = sbitmap_alloc (last_basic_block);
sbitmap_zero (in_queue);
- in_stack = sbitmap_alloc (n_basic_blocks);
+ in_stack = sbitmap_alloc (last_basic_block);
sbitmap_zero (in_stack);
for (i = 0; i < n_basic_blocks; i++)
add the TO block to the update block list. This list can end
up with a lot of duplicates. We need to weed them out to avoid
overrunning the end of the bblst_table. */
- update_blocks = (char *) alloca (n_basic_blocks);
- memset (update_blocks, 0, n_basic_blocks);
+ update_blocks = (char *) alloca (last_basic_block);
+ memset (update_blocks, 0, last_basic_block);
update_idx = 0;
for (j = 0; j < el.nr_members; j++)
nr_regions = 0;
rgn_table = (region *) xmalloc ((n_basic_blocks) * sizeof (region));
rgn_bb_table = (int *) xmalloc ((n_basic_blocks) * sizeof (int));
- block_to_bb = (int *) xmalloc ((n_basic_blocks) * sizeof (int));
- containing_rgn = (int *) xmalloc ((n_basic_blocks) * sizeof (int));
+ block_to_bb = (int *) xmalloc ((last_basic_block) * sizeof (int));
+ containing_rgn = (int *) xmalloc ((last_basic_block) * sizeof (int));
/* Compute regions for scheduling. */
if (reload_completed
sbitmap *dom;
struct edge_list *edge_list;
- dom = sbitmap_vector_alloc (n_basic_blocks, n_basic_blocks);
+ dom = sbitmap_vector_alloc (last_basic_block, last_basic_block);
/* The scheduler runs after flow; therefore, we can't blindly call
back into find_basic_blocks since doing so could invalidate the
if (CHECK_DEAD_NOTES)
{
- blocks = sbitmap_alloc (n_basic_blocks);
+ blocks = sbitmap_alloc (last_basic_block);
deaths_in_region = (int *) xmalloc (sizeof (int) * nr_regions);
/* Remove all death notes from the subroutine. */
for (rgn = 0; rgn < nr_regions; rgn++)
compute_bb_for_insn (get_max_uid ());
any_large_regions = 0;
- large_region_blocks = sbitmap_alloc (n_basic_blocks);
+ large_region_blocks = sbitmap_alloc (last_basic_block);
sbitmap_zero (large_region_blocks);
FOR_EACH_BB (bb)
SET_BIT (large_region_blocks, bb->index);
- blocks = sbitmap_alloc (n_basic_blocks);
+ blocks = sbitmap_alloc (last_basic_block);
sbitmap_zero (blocks);
/* Update life information. For regions consisting of multiple blocks
ssa_edges = sbitmap_alloc (VARRAY_SIZE (ssa_definition));
sbitmap_zero (ssa_edges);
- executable_blocks = sbitmap_alloc (n_basic_blocks);
+ executable_blocks = sbitmap_alloc (last_basic_block);
sbitmap_zero (executable_blocks);
executable_edges = sbitmap_alloc (NUM_EDGES (edges));
mark_all_insn_unnecessary ();
VARRAY_RTX_INIT (unprocessed_instructions, 64,
"unprocessed instructions");
- cdbte = control_dependent_block_to_edge_map_create (n_basic_blocks);
+ cdbte = control_dependent_block_to_edge_map_create (last_basic_block);
/* Prepare for use of BLOCK_NUM (). */
connect_infinite_loops_to_exit ();
compute_bb_for_insn (max_insn_uid);
/* Compute control dependence. */
- pdom = (int *) xmalloc (n_basic_blocks * sizeof (int));
- for (i = 0; i < n_basic_blocks; ++i)
+ pdom = (int *) xmalloc (last_basic_block * sizeof (int));
+ for (i = 0; i < last_basic_block; ++i)
pdom[i] = INVALID_BLOCK;
calculate_dominance_info (pdom, NULL, CDI_POST_DOMINATORS);
/* Assume there is a path from each node to the exit block. */
- for (i = 0; i < n_basic_blocks; ++i)
+ for (i = 0; i < last_basic_block; ++i)
if (pdom[i] == INVALID_BLOCK)
pdom[i] = EXIT_BLOCK;
el = create_edge_list ();
sbitmap *frontiers;
int *idom;
{
- sbitmap done = sbitmap_alloc (n_basic_blocks);
+ sbitmap done = sbitmap_alloc (last_basic_block);
sbitmap_zero (done);
compute_dominance_frontiers_1 (frontiers, idom, 0, done);
sbitmap worklist;
int reg, passes = 0;
- worklist = sbitmap_alloc (n_basic_blocks);
+ worklist = sbitmap_alloc (last_basic_block);
for (reg = 0; reg < nregs; ++reg)
{
dead code. We'll let the SSA optimizers do that. */
life_analysis (get_insns (), NULL, 0);
- idom = (int *) alloca (n_basic_blocks * sizeof (int));
- memset ((void *) idom, -1, (size_t) n_basic_blocks * sizeof (int));
+ idom = (int *) alloca (last_basic_block * sizeof (int));
+ memset ((void *) idom, -1, (size_t) last_basic_block * sizeof (int));
calculate_dominance_info (idom, NULL, CDI_DOMINATORS);
if (rtl_dump_file)
/* Compute dominance frontiers. */
- dfs = sbitmap_vector_alloc (n_basic_blocks, n_basic_blocks);
+ dfs = sbitmap_vector_alloc (last_basic_block, last_basic_block);
compute_dominance_frontiers (dfs, idom);
if (rtl_dump_file)
{
dump_sbitmap_vector (rtl_dump_file, ";; Dominance Frontiers:",
- "; Basic Block", dfs, n_basic_blocks);
+ "; Basic Block", dfs, last_basic_block);
fflush (rtl_dump_file);
}
ssa_max_reg_num = max_reg_num ();
nregs = ssa_max_reg_num;
- evals = sbitmap_vector_alloc (nregs, n_basic_blocks);
+ evals = sbitmap_vector_alloc (nregs, last_basic_block);
find_evaluations (evals, nregs);
/* Compute the iterated dominance frontier for each register. */
- idfs = sbitmap_vector_alloc (nregs, n_basic_blocks);
+ idfs = sbitmap_vector_alloc (nregs, last_basic_block);
compute_iterated_dominance_frontiers (idfs, dfs, evals, nregs);
if (rtl_dump_file)