+2016-07-26 Trevor Saunders <tbsaunde+gcc@tbsaunde.org>
+
+ * bt-load.c (compute_out): Use auto_sbitmap class.
+ (link_btr_uses): Likewise.
+ * cfganal.c (mark_dfs_back_edges): Likewise.
+ (post_order_compute): Likewise.
+ (inverted_post_order_compute): Likewise.
+ (pre_and_rev_post_order_compute_fn): Likewise.
+ (single_pred_before_succ_order): Likewise.
+ * cfgexpand.c (pass_expand::execute): Likewise.
+ * cfgloop.c (verify_loop_structure): Likewise.
+ * cfgloopmanip.c (fix_bb_placements): Likewise.
+ (remove_path): Likewise.
+ (update_dominators_in_loop): Likewise.
+ * cfgrtl.c (break_superblocks): Likewise.
+ * ddg.c (check_sccs): Likewise.
+ (create_ddg_all_sccs): Likewise.
+ * df-core.c (df_worklist_dataflow): Likewise.
+ * dse.c (dse_step3): Likewise.
+ * except.c (eh_region_outermost): Likewise.
+ * function.c (thread_prologue_and_epilogue_insns): Likewise.
+ * gcse.c (prune_expressions): Likewise.
+ (prune_insertions_deletions): Likewise.
+ * gimple-ssa-backprop.c (backprop::~backprop): Likewise.
+ * graph.c (draw_cfg_nodes_no_loops): Likewise.
+ * ira-lives.c (remove_some_program_points_and_update_live_ranges): Likewise.
+ * lcm.c (compute_earliest): Likewise.
+ (compute_farthest): Likewise.
+ * loop-unroll.c (unroll_loop_constant_iterations): Likewise.
+ (unroll_loop_runtime_iterations): Likewise.
+ (unroll_loop_stupid): Likewise.
+ * lower-subreg.c (decompose_multiword_subregs): Likewise.
+ * lra-lives.c: Likewise.
+ * lra.c (lra): Likewise.
+ * modulo-sched.c (schedule_reg_moves): Likewise.
+ (optimize_sc): Likewise.
+ (get_sched_window): Likewise.
+ (sms_schedule_by_order): Likewise.
+ (check_nodes_order): Likewise.
+ (order_nodes_of_sccs): Likewise.
+ (order_nodes_in_scc): Likewise.
+ * recog.c (split_all_insns): Likewise.
+ * regcprop.c (pass_cprop_hardreg::execute): Likewise.
+ * reload1.c (reload): Likewise.
+ * sched-rgn.c (haifa_find_rgns): Likewise.
+ (split_edges): Likewise.
+ (compute_trg_info): Likewise.
+ * sel-sched.c (init_seqno): Likewise.
+ * store-motion.c (remove_reachable_equiv_notes): Likewise.
+ * tree-into-ssa.c (update_ssa): Likewise.
+ * tree-ssa-live.c (live_worklist): Likewise.
+ * tree-ssa-loop-im.c (fill_always_executed_in): Likewise.
+ * tree-ssa-loop-ivcanon.c (try_unroll_loop_completely):
+ * Likewise.
+ (try_peel_loop): Likewise.
+ * tree-ssa-loop-manip.c (tree_transform_and_unroll_loop):
+ * Likewise.
+ * tree-ssa-pre.c (compute_antic): Likewise.
+ * tree-ssa-reassoc.c (undistribute_ops_list): Likewise.
+ * tree-stdarg.c (reachable_at_most_once): Likewise.
+ * tree-vect-slp.c (vect_attempt_slp_rearrange_stmts): Likewise.
+ * var-tracking.c (vt_find_locations): Likewise.
+
2016-07-26 Trevor Saunders <tbsaunde+gcc@tbsaunde.org>
* sbitmap.h (auto_sbitmap): New class.
Iterate until the bb_out sets stop growing. */
int i;
int changed;
- sbitmap bb_in = sbitmap_alloc (max_uid);
+ auto_sbitmap bb_in (max_uid);
for (i = NUM_FIXED_BLOCKS; i < last_basic_block_for_fn (cfun); i++)
bitmap_copy (bb_out[i], bb_gen[i]);
bb_in, bb_kill[i]);
}
}
- sbitmap_free (bb_in);
}
static void
sbitmap *btr_defset, int max_uid)
{
int i;
- sbitmap reaching_defs = sbitmap_alloc (max_uid);
+ auto_sbitmap reaching_defs (max_uid);
/* Link uses to the uses lists of all of their reaching defs.
Count up the number of reaching defs of each use. */
if (user != NULL)
{
/* Find all the reaching defs for this use. */
- sbitmap reaching_defs_of_reg = sbitmap_alloc (max_uid);
+ auto_sbitmap reaching_defs_of_reg (max_uid);
unsigned int uid = 0;
sbitmap_iterator sbi;
user->next = def->uses;
def->uses = user;
}
- sbitmap_free (reaching_defs_of_reg);
}
if (CALL_P (insn))
}
}
}
- sbitmap_free (reaching_defs);
}
static void
int sp;
int prenum = 1;
int postnum = 1;
- sbitmap visited;
bool found = false;
/* Allocate the preorder and postorder number arrays. */
sp = 0;
/* Allocate bitmap to track nodes that have been visited. */
- visited = sbitmap_alloc (last_basic_block_for_fn (cfun));
+ auto_sbitmap visited (last_basic_block_for_fn (cfun));
/* None of the nodes in the CFG have been visited yet. */
bitmap_clear (visited);
free (pre);
free (post);
free (stack);
- sbitmap_free (visited);
return found;
}
edge_iterator *stack;
int sp;
int post_order_num = 0;
- sbitmap visited;
int count;
if (include_entry_exit)
sp = 0;
/* Allocate bitmap to track nodes that have been visited. */
- visited = sbitmap_alloc (last_basic_block_for_fn (cfun));
+ auto_sbitmap visited (last_basic_block_for_fn (cfun));
/* None of the nodes in the CFG have been visited yet. */
bitmap_clear (visited);
}
free (stack);
- sbitmap_free (visited);
return post_order_num;
}
edge_iterator *stack;
int sp;
int post_order_num = 0;
- sbitmap visited;
if (flag_checking)
verify_no_unreachable_blocks ();
sp = 0;
/* Allocate bitmap to track nodes that have been visited. */
- visited = sbitmap_alloc (last_basic_block_for_fn (cfun));
+ auto_sbitmap visited (last_basic_block_for_fn (cfun));
/* None of the nodes in the CFG have been visited yet. */
bitmap_clear (visited);
post_order[post_order_num++] = EXIT_BLOCK;
free (stack);
- sbitmap_free (visited);
return post_order_num;
}
int sp;
int pre_order_num = 0;
int rev_post_order_num = n_basic_blocks_for_fn (cfun) - 1;
- sbitmap visited;
/* Allocate stack for back-tracking up CFG. */
stack = XNEWVEC (edge_iterator, n_basic_blocks_for_fn (cfun) + 1);
rev_post_order_num -= NUM_FIXED_BLOCKS;
/* Allocate bitmap to track nodes that have been visited. */
- visited = sbitmap_alloc (last_basic_block_for_fn (cfun));
+ auto_sbitmap visited (last_basic_block_for_fn (cfun));
/* None of the nodes in the CFG have been visited yet. */
bitmap_clear (visited);
}
free (stack);
- sbitmap_free (visited);
if (include_entry_exit)
{
basic_block *order = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
unsigned n = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS;
unsigned np, i;
- sbitmap visited = sbitmap_alloc (last_basic_block_for_fn (cfun));
+ auto_sbitmap visited (last_basic_block_for_fn (cfun));
#define MARK_VISITED(BB) (bitmap_set_bit (visited, (BB)->index))
#define VISITED_P(BB) (bitmap_bit_p (visited, (BB)->index))
n -= np;
}
- sbitmap_free (visited);
gcc_assert (n == 0);
return order;
pass_expand::execute (function *fun)
{
basic_block bb, init_block;
- sbitmap blocks;
edge_iterator ei;
edge e;
rtx_insn *var_seq, *var_ret_seq;
}
}
- blocks = sbitmap_alloc (last_basic_block_for_fn (fun));
+ auto_sbitmap blocks (last_basic_block_for_fn (fun));
bitmap_ones (blocks);
find_many_sub_basic_blocks (blocks);
- sbitmap_free (blocks);
purge_all_dead_edges ();
expand_stack_alignment ();
verify_loop_structure (void)
{
unsigned *sizes, i, j;
- sbitmap irreds;
basic_block bb, *bbs;
struct loop *loop;
int err = 0;
unsigned num = number_of_loops (cfun);
struct loop_exit *exit, *mexit;
bool dom_available = dom_info_available_p (CDI_DOMINATORS);
- sbitmap visited;
if (loops_state_satisfies_p (LOOPS_NEED_FIXUP))
{
}
/* Check the recorded loop father and sizes of loops. */
- visited = sbitmap_alloc (last_basic_block_for_fn (cfun));
+ auto_sbitmap visited (last_basic_block_for_fn (cfun));
bitmap_clear (visited);
bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
}
}
free (bbs);
- sbitmap_free (visited);
/* Check headers and latches. */
FOR_EACH_LOOP (loop, 0)
if (loops_state_satisfies_p (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS))
{
/* Record old info. */
- irreds = sbitmap_alloc (last_basic_block_for_fn (cfun));
+ auto_sbitmap irreds (last_basic_block_for_fn (cfun));
FOR_EACH_BB_FN (bb, cfun)
{
edge_iterator ei;
e->flags &= ~(EDGE_ALL_FLAGS + 1);
}
}
- free (irreds);
}
/* Check the recorded loop exits. */
bool *irred_invalidated,
bitmap loop_closed_ssa_invalidated)
{
- sbitmap in_queue;
basic_block *queue, *qtop, *qbeg, *qend;
struct loop *base_loop, *target_loop;
edge e;
|| from == base_loop->header)
return;
- in_queue = sbitmap_alloc (last_basic_block_for_fn (cfun));
+ auto_sbitmap in_queue (last_basic_block_for_fn (cfun));
bitmap_clear (in_queue);
bitmap_set_bit (in_queue, from->index);
/* Prevent us from going out of the base_loop. */
bitmap_set_bit (in_queue, pred->index);
}
}
- free (in_queue);
free (queue);
}
basic_block *rem_bbs, *bord_bbs, from, bb;
vec<basic_block> dom_bbs;
int i, nrem, n_bord_bbs;
- sbitmap seen;
bool irred_invalidated = false;
edge_iterator ei;
struct loop *l, *f;
n_bord_bbs = 0;
bord_bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
- seen = sbitmap_alloc (last_basic_block_for_fn (cfun));
+ auto_sbitmap seen (last_basic_block_for_fn (cfun));
bitmap_clear (seen);
/* Find "border" hexes -- i.e. those with predecessor in removed path. */
dom_bbs.safe_push (ldom);
}
- free (seen);
-
/* Recount dominators. */
iterate_fix_dominators (CDI_DOMINATORS, dom_bbs, true);
dom_bbs.release ();
update_dominators_in_loop (struct loop *loop)
{
vec<basic_block> dom_bbs = vNULL;
- sbitmap seen;
basic_block *body;
unsigned i;
- seen = sbitmap_alloc (last_basic_block_for_fn (cfun));
+ auto_sbitmap seen (last_basic_block_for_fn (cfun));
bitmap_clear (seen);
body = get_loop_body (loop);
iterate_fix_dominators (CDI_DOMINATORS, dom_bbs, false);
free (body);
- free (seen);
dom_bbs.release ();
}
void
break_superblocks (void)
{
- sbitmap superblocks;
bool need = false;
basic_block bb;
- superblocks = sbitmap_alloc (last_basic_block_for_fn (cfun));
+ auto_sbitmap superblocks (last_basic_block_for_fn (cfun));
bitmap_clear (superblocks);
FOR_EACH_BB_FN (bb, cfun)
rebuild_jump_labels (get_insns ());
find_many_sub_basic_blocks (superblocks);
}
-
- free (superblocks);
}
/* Finalize the changes: reorder insn list according to the sequence specified
check_sccs (ddg_all_sccs_ptr sccs, int num_nodes)
{
int i = 0;
- sbitmap tmp = sbitmap_alloc (num_nodes);
+ auto_sbitmap tmp (num_nodes);
bitmap_clear (tmp);
for (i = 0; i < sccs->num_sccs; i++)
gcc_assert (!bitmap_intersect_p (tmp, sccs->sccs[i]->nodes));
bitmap_ior (tmp, tmp, sccs->sccs[i]->nodes);
}
- sbitmap_free (tmp);
}
/* Perform the Strongly Connected Components decomposing algorithm on the
{
int i;
int num_nodes = g->num_nodes;
- sbitmap from = sbitmap_alloc (num_nodes);
- sbitmap to = sbitmap_alloc (num_nodes);
- sbitmap scc_nodes = sbitmap_alloc (num_nodes);
+ auto_sbitmap from (num_nodes);
+ auto_sbitmap to (num_nodes);
+ auto_sbitmap scc_nodes (num_nodes);
ddg_all_sccs_ptr sccs = (ddg_all_sccs_ptr)
xmalloc (sizeof (struct ddg_all_sccs));
}
}
order_sccs (sccs);
- sbitmap_free (from);
- sbitmap_free (to);
- sbitmap_free (scc_nodes);
if (flag_checking)
check_sccs (sccs, num_nodes);
int n_blocks)
{
bitmap pending = BITMAP_ALLOC (&df_bitmap_obstack);
- sbitmap considered = sbitmap_alloc (last_basic_block_for_fn (cfun));
bitmap_iterator bi;
unsigned int *bbindex_to_postorder;
int i;
bbindex_to_postorder[i] = last_basic_block_for_fn (cfun);
/* Initialize the considered map. */
+ auto_sbitmap considered (last_basic_block_for_fn (cfun));
bitmap_clear (considered);
EXECUTE_IF_SET_IN_BITMAP (blocks_to_consider, 0, index, bi)
{
blocks_in_postorder,
bbindex_to_postorder,
n_blocks);
- sbitmap_free (considered);
free (bbindex_to_postorder);
}
dse_step3 ()
{
basic_block bb;
- sbitmap unreachable_blocks = sbitmap_alloc (last_basic_block_for_fn (cfun));
sbitmap_iterator sbi;
bitmap all_ones = NULL;
unsigned int i;
+ auto_sbitmap unreachable_blocks (last_basic_block_for_fn (cfun));
bitmap_ones (unreachable_blocks);
FOR_ALL_BB_FN (bb, cfun)
if (all_ones)
BITMAP_FREE (all_ones);
- sbitmap_free (unreachable_blocks);
}
eh_region_outermost (struct function *ifun, eh_region region_a,
eh_region region_b)
{
- sbitmap b_outer;
-
gcc_assert (ifun->eh->region_array);
gcc_assert (ifun->eh->region_tree);
- b_outer = sbitmap_alloc (ifun->eh->region_array->length ());
+ auto_sbitmap b_outer (ifun->eh->region_array->length ());
bitmap_clear (b_outer);
do
}
while (region_a);
- sbitmap_free (b_outer);
return region_a;
}
\f
commit_edge_insertions ();
/* Look for basic blocks within the prologue insns. */
- sbitmap blocks = sbitmap_alloc (last_basic_block_for_fn (cfun));
+ auto_sbitmap blocks (last_basic_block_for_fn (cfun));
bitmap_clear (blocks);
bitmap_set_bit (blocks, entry_edge->dest->index);
bitmap_set_bit (blocks, orig_entry_edge->dest->index);
find_many_sub_basic_blocks (blocks);
- sbitmap_free (blocks);
}
default_rtl_profile ();
static void
prune_expressions (bool pre_p)
{
- sbitmap prune_exprs;
struct gcse_expr *expr;
unsigned int ui;
basic_block bb;
- prune_exprs = sbitmap_alloc (expr_hash_table.n_elems);
+ auto_sbitmap prune_exprs (expr_hash_table.n_elems);
bitmap_clear (prune_exprs);
for (ui = 0; ui < expr_hash_table.size; ui++)
{
break;
}
}
-
- sbitmap_free (prune_exprs);
}
/* It may be necessary to insert a large number of insns on edges to
prune_insertions_deletions (int n_elems)
{
sbitmap_iterator sbi;
- sbitmap prune_exprs;
/* We always use I to iterate over blocks/edges and J to iterate over
expressions. */
/* Set of expressions which require too many insertions relative to
the number of deletions achieved. We will prune these out of the
insertion/deletion sets. */
- prune_exprs = sbitmap_alloc (n_elems);
+ auto_sbitmap prune_exprs (n_elems);
bitmap_clear (prune_exprs);
/* Iterate over the edges counting the number of times each expression
bitmap_clear_bit (pre_delete_map[i], j);
}
- sbitmap_free (prune_exprs);
free (insertions);
free (deletions);
}
/* A bitmap of blocks that we have finished processing in the initial
post-order walk. */
- sbitmap m_visited_blocks;
+ auto_sbitmap m_visited_blocks;
/* A worklist of SSA names whose definitions need to be reconsidered. */
auto_vec <tree, 64> m_worklist;
backprop::backprop (function *fn)
: m_fn (fn),
m_info_pool ("usage_info"),
- m_visited_blocks (sbitmap_alloc (last_basic_block_for_fn (m_fn))),
+ m_visited_blocks (last_basic_block_for_fn (m_fn)),
m_worklist_names (BITMAP_ALLOC (NULL))
{
bitmap_clear (m_visited_blocks);
backprop::~backprop ()
{
BITMAP_FREE (m_worklist_names);
- sbitmap_free (m_visited_blocks);
m_info_pool.release ();
}
{
int *rpo = XNEWVEC (int, n_basic_blocks_for_fn (fun));
int i, n;
- sbitmap visited;
- visited = sbitmap_alloc (last_basic_block_for_fn (cfun));
+ auto_sbitmap visited (last_basic_block_for_fn (cfun));
bitmap_clear (visited);
n = pre_and_rev_post_order_compute_fn (fun, NULL, rpo, true);
if (! bitmap_bit_p (visited, bb->index))
draw_cfg_node (pp, fun->funcdef_no, bb);
}
-
- sbitmap_free (visited);
}
/* Draw all the basic blocks in LOOP. Print the blocks in breath-first
ira_object_t obj;
ira_object_iterator oi;
live_range_t r, prev_r, next_r;
- sbitmap born_or_dead, born, dead;
sbitmap_iterator sbi;
bool born_p, dead_p, prev_born_p, prev_dead_p;
- born = sbitmap_alloc (ira_max_point);
- dead = sbitmap_alloc (ira_max_point);
+ auto_sbitmap born (ira_max_point);
+ auto_sbitmap dead (ira_max_point);
bitmap_clear (born);
bitmap_clear (dead);
FOR_EACH_OBJECT (obj, oi)
bitmap_set_bit (dead, r->finish);
}
- born_or_dead = sbitmap_alloc (ira_max_point);
+ auto_sbitmap born_or_dead (ira_max_point);
bitmap_ior (born_or_dead, born, dead);
map = (int *) ira_allocate (sizeof (int) * ira_max_point);
n = -1;
prev_born_p = born_p;
prev_dead_p = dead_p;
}
- sbitmap_free (born_or_dead);
- sbitmap_free (born);
- sbitmap_free (dead);
+
n++;
if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
fprintf (ira_dump_file, "Compressing live ranges: from %d to %d - %d%%\n",
sbitmap *antout, sbitmap *avout, sbitmap *kill,
sbitmap *earliest)
{
- sbitmap difference, temp_bitmap;
int x, num_edges;
basic_block pred, succ;
num_edges = NUM_EDGES (edge_list);
- difference = sbitmap_alloc (n_exprs);
- temp_bitmap = sbitmap_alloc (n_exprs);
-
+ auto_sbitmap difference (n_exprs), temp_bitmap (n_exprs);
for (x = 0; x < num_edges; x++)
{
pred = INDEX_EDGE_PRED_BB (edge_list, x);
}
}
}
-
- sbitmap_free (temp_bitmap);
- sbitmap_free (difference);
}
/* later(p,s) is dependent on the calculation of laterin(p).
sbitmap *st_avout, sbitmap *st_avin, sbitmap *st_antin,
sbitmap *kill, sbitmap *farthest)
{
- sbitmap difference, temp_bitmap;
int x, num_edges;
basic_block pred, succ;
num_edges = NUM_EDGES (edge_list);
- difference = sbitmap_alloc (n_exprs);
- temp_bitmap = sbitmap_alloc (n_exprs);
-
+ auto_sbitmap difference (n_exprs), temp_bitmap (n_exprs);
for (x = 0; x < num_edges; x++)
{
pred = INDEX_EDGE_PRED_BB (edge_list, x);
}
}
}
-
- sbitmap_free (temp_bitmap);
- sbitmap_free (difference);
}
/* Compute nearer and nearerout vectors for edge based lcm.
{
unsigned HOST_WIDE_INT niter;
unsigned exit_mod;
- sbitmap wont_exit;
unsigned i;
edge e;
unsigned max_unroll = loop->lpt_decision.times;
exit_mod = niter % (max_unroll + 1);
- wont_exit = sbitmap_alloc (max_unroll + 1);
+ auto_sbitmap wont_exit (max_unroll + 1);
bitmap_ones (wont_exit);
auto_vec<edge> remove_edges;
free_opt_info (opt_info);
}
- free (wont_exit);
-
if (exit_at_end)
{
basic_block exit_block = get_bb_copy (desc->in_edge->src);
rtx_insn *init_code, *branch_code;
unsigned i, j, p;
basic_block preheader, *body, swtch, ezc_swtch;
- sbitmap wont_exit;
int may_exit_copy;
unsigned n_peel;
edge e;
auto_vec<edge> remove_edges;
- wont_exit = sbitmap_alloc (max_unroll + 2);
+ auto_sbitmap wont_exit (max_unroll + 2);
/* Peel the first copy of loop body (almost always we must leave exit test
here; the only exception is when we have extra zero check and the number
free_opt_info (opt_info);
}
- free (wont_exit);
-
if (exit_at_end)
{
basic_block exit_block = get_bb_copy (desc->in_edge->src);
static void
unroll_loop_stupid (struct loop *loop)
{
- sbitmap wont_exit;
unsigned nunroll = loop->lpt_decision.times;
struct niter_desc *desc = get_simple_loop_desc (loop);
struct opt_info *opt_info = NULL;
|| flag_variable_expansion_in_unroller)
opt_info = analyze_insns_in_loop (loop);
-
- wont_exit = sbitmap_alloc (nunroll + 1);
+ auto_sbitmap wont_exit (nunroll + 1);
bitmap_clear (wont_exit);
opt_info_start_duplication (opt_info);
free_opt_info (opt_info);
}
- free (wont_exit);
-
if (desc->simple_p)
{
/* We indeed may get here provided that there are nontrivial assumptions
bitmap_and_compl_into (decomposable_context, non_decomposable_context);
if (!bitmap_empty_p (decomposable_context))
{
- sbitmap sub_blocks;
unsigned int i;
sbitmap_iterator sbi;
bitmap_iterator iter;
propagate_pseudo_copies ();
- sub_blocks = sbitmap_alloc (last_basic_block_for_fn (cfun));
+ auto_sbitmap sub_blocks (last_basic_block_for_fn (cfun));
bitmap_clear (sub_blocks);
EXECUTE_IF_SET_IN_BITMAP (decomposable_context, 0, regno, iter)
insn = NEXT_INSN (insn);
}
}
-
- sbitmap_free (sub_blocks);
}
{
int n, max_regno;
int *map;
lra_live_range_t r, prev_r, next_r;
- sbitmap born_or_dead, born, dead;
sbitmap_iterator sbi;
bool born_p, dead_p, prev_born_p, prev_dead_p;
- born = sbitmap_alloc (lra_live_max_point);
- dead = sbitmap_alloc (lra_live_max_point);
+ auto_sbitmap born (lra_live_max_point);
+ auto_sbitmap dead (lra_live_max_point);
bitmap_clear (born);
bitmap_clear (dead);
max_regno = max_reg_num ();
bitmap_set_bit (dead, r->finish);
}
}
- born_or_dead = sbitmap_alloc (lra_live_max_point);
+ auto_sbitmap born_or_dead (lra_live_max_point);
bitmap_ior (born_or_dead, born, dead);
map = XCNEWVEC (int, lra_live_max_point);
n = -1;
prev_born_p = born_p;
prev_dead_p = dead_p;
}
- sbitmap_free (born_or_dead);
- sbitmap_free (born);
- sbitmap_free (dead);
n++;
if (lra_dump_file != NULL)
fprintf (lra_dump_file, "Compressing live ranges: from %d to %d - %d%%\n",
/* We've possibly turned single trapping insn into multiple ones. */
if (cfun->can_throw_non_call_exceptions)
{
- sbitmap blocks;
- blocks = sbitmap_alloc (last_basic_block_for_fn (cfun));
+ auto_sbitmap blocks (last_basic_block_for_fn (cfun));
bitmap_ones (blocks);
find_many_sub_basic_blocks (blocks);
- sbitmap_free (blocks);
}
if (inserted_p)
rtx prev_reg, old_reg;
int first_move;
int distances[2];
- sbitmap must_follow;
sbitmap distance1_uses;
rtx set = single_set (u->insn);
}
}
- must_follow = sbitmap_alloc (first_move + nreg_moves);
+ auto_sbitmap must_follow (first_move + nreg_moves);
for (i_reg_move = 0; i_reg_move < nreg_moves; i_reg_move++)
if (!schedule_reg_move (ps, first_move + i_reg_move,
distance1_uses, must_follow))
break;
- sbitmap_free (must_follow);
if (distance1_uses)
sbitmap_free (distance1_uses);
if (i_reg_move < nreg_moves)
optimize_sc (partial_schedule_ptr ps, ddg_ptr g)
{
int amount = PS_MIN_CYCLE (ps);
- sbitmap sched_nodes = sbitmap_alloc (g->num_nodes);
int start, end, step;
int ii = ps->ii;
bool ok = false;
if (dump_file)
fprintf (dump_file, "SMS SC already optimized.\n");
- ok = false;
- goto clear;
+ return false;
}
if (dump_file)
}
if (SMODULO (SCHED_TIME (g->closing_branch->cuid), ii) == ii - 1)
- {
- ok = true;
- goto clear;
- }
+ return true;
+ auto_sbitmap sched_nodes (g->num_nodes);
bitmap_ones (sched_nodes);
/* Calculate the new placement of the branch. It should be in row
int branch_cycle = SCHED_TIME (g->closing_branch->cuid);
int row = SMODULO (branch_cycle, ps->ii);
int num_splits = 0;
- sbitmap must_precede, must_follow, tmp_precede, tmp_follow;
+ sbitmap tmp_precede, tmp_follow;
int min_cycle, c;
if (dump_file)
gcc_assert (c >= start);
if (c >= end)
{
- ok = false;
if (dump_file)
fprintf (dump_file,
"SMS failed to schedule branch at cycle: %d\n", c);
- goto clear;
+ return false;
}
}
else
if (dump_file)
fprintf (dump_file,
"SMS failed to schedule branch at cycle: %d\n", c);
- ok = false;
- goto clear;
+ return false;
}
}
- must_precede = sbitmap_alloc (g->num_nodes);
- must_follow = sbitmap_alloc (g->num_nodes);
+ auto_sbitmap must_precede (g->num_nodes);
+ auto_sbitmap must_follow (g->num_nodes);
/* Try to schedule the branch is it's new cycle. */
calculate_must_precede_follow (g->closing_branch, start, end,
/* This might have been added to a new first stage. */
if (PS_MIN_CYCLE (ps) < min_cycle)
reset_sched_times (ps, 0);
-
- free (must_precede);
- free (must_follow);
}
-clear:
- free (sched_nodes);
return ok;
}
int start, step, end;
int early_start, late_start;
ddg_edge_ptr e;
- sbitmap psp = sbitmap_alloc (ps->g->num_nodes);
- sbitmap pss = sbitmap_alloc (ps->g->num_nodes);
+ auto_sbitmap psp (ps->g->num_nodes);
+ auto_sbitmap pss (ps->g->num_nodes);
sbitmap u_node_preds = NODE_PREDECESSORS (u_node);
sbitmap u_node_succs = NODE_SUCCESSORS (u_node);
int psp_not_empty;
*start_p = start;
*step_p = step;
*end_p = end;
- sbitmap_free (psp);
- sbitmap_free (pss);
if ((start >= end && step == 1) || (start <= end && step == -1))
{
int flush_and_start_over = true;
int num_nodes = g->num_nodes;
int start, end, step; /* Place together into one struct? */
- sbitmap sched_nodes = sbitmap_alloc (num_nodes);
- sbitmap must_precede = sbitmap_alloc (num_nodes);
- sbitmap must_follow = sbitmap_alloc (num_nodes);
- sbitmap tobe_scheduled = sbitmap_alloc (num_nodes);
+ auto_sbitmap sched_nodes (num_nodes);
+ auto_sbitmap must_precede (num_nodes);
+ auto_sbitmap must_follow (num_nodes);
+ auto_sbitmap tobe_scheduled (num_nodes);
partial_schedule_ptr ps = create_partial_schedule (ii, g, DFA_HISTORY);
else
gcc_assert (bitmap_equal_p (tobe_scheduled, sched_nodes));
- sbitmap_free (sched_nodes);
- sbitmap_free (must_precede);
- sbitmap_free (must_follow);
- sbitmap_free (tobe_scheduled);
-
return ps;
}
check_nodes_order (int *node_order, int num_nodes)
{
int i;
- sbitmap tmp = sbitmap_alloc (num_nodes);
+ auto_sbitmap tmp (num_nodes);
bitmap_clear (tmp);
if (dump_file)
fprintf (dump_file, "\n");
-
- sbitmap_free (tmp);
}
/* Order the nodes of G for scheduling and pass the result in
int i, pos = 0;
ddg_ptr g = all_sccs->ddg;
int num_nodes = g->num_nodes;
- sbitmap prev_sccs = sbitmap_alloc (num_nodes);
- sbitmap on_path = sbitmap_alloc (num_nodes);
- sbitmap tmp = sbitmap_alloc (num_nodes);
- sbitmap ones = sbitmap_alloc (num_nodes);
+ auto_sbitmap prev_sccs (num_nodes);
+ auto_sbitmap on_path (num_nodes);
+ auto_sbitmap tmp (num_nodes);
+ auto_sbitmap ones (num_nodes);
bitmap_clear (prev_sccs);
bitmap_ones (ones);
bitmap_and_compl (tmp, ones, prev_sccs);
pos = order_nodes_in_scc (g, prev_sccs, tmp, node_order, pos);
}
- sbitmap_free (prev_sccs);
- sbitmap_free (on_path);
- sbitmap_free (tmp);
- sbitmap_free (ones);
}
/* MII is needed if we consider backarcs (that do not close recursive cycles). */
{
enum sms_direction dir;
int num_nodes = g->num_nodes;
- sbitmap workset = sbitmap_alloc (num_nodes);
- sbitmap tmp = sbitmap_alloc (num_nodes);
+ auto_sbitmap workset (num_nodes);
+ auto_sbitmap tmp (num_nodes);
sbitmap zero_bitmap = sbitmap_alloc (num_nodes);
- sbitmap predecessors = sbitmap_alloc (num_nodes);
- sbitmap successors = sbitmap_alloc (num_nodes);
+ auto_sbitmap predecessors (num_nodes);
+ auto_sbitmap successors (num_nodes);
bitmap_clear (predecessors);
find_predecessors (predecessors, g, nodes_ordered);
bitmap_and (workset, successors, scc);
}
}
- sbitmap_free (tmp);
- sbitmap_free (workset);
sbitmap_free (zero_bitmap);
- sbitmap_free (predecessors);
- sbitmap_free (successors);
return pos;
}
void
split_all_insns (void)
{
- sbitmap blocks;
bool changed;
basic_block bb;
- blocks = sbitmap_alloc (last_basic_block_for_fn (cfun));
+ auto_sbitmap blocks (last_basic_block_for_fn (cfun));
bitmap_clear (blocks);
changed = false;
find_many_sub_basic_blocks (blocks);
checking_verify_flow_info ();
-
- sbitmap_free (blocks);
}
/* Same as split_all_insns, but do not expect CFG to be available.
{
struct value_data *all_vd;
basic_block bb;
- sbitmap visited;
bool analyze_called = false;
all_vd = XNEWVEC (struct value_data, last_basic_block_for_fn (fun));
- visited = sbitmap_alloc (last_basic_block_for_fn (fun));
+ auto_sbitmap visited (last_basic_block_for_fn (fun));
bitmap_clear (visited);
FOR_EACH_BB_FN (bb, fun)
queued_debug_insn_change_pool.release ();
}
- sbitmap_free (visited);
free (all_vd);
return 0;
}
/* We've possibly turned single trapping insn into multiple ones. */
if (cfun->can_throw_non_call_exceptions)
{
- sbitmap blocks;
- blocks = sbitmap_alloc (last_basic_block_for_fn (cfun));
+ auto_sbitmap blocks (last_basic_block_for_fn (cfun));
bitmap_ones (blocks);
find_many_sub_basic_blocks (blocks);
- sbitmap_free (blocks);
}
if (inserted)
int too_large_failure;
basic_block bb;
- /* Note if a block is a natural loop header. */
- sbitmap header;
-
- /* Note if a block is a natural inner loop header. */
- sbitmap inner;
-
- /* Note if a block is in the block queue. */
- sbitmap in_queue;
-
- /* Note if a block is in the block queue. */
- sbitmap in_stack;
-
/* Perform a DFS traversal of the cfg. Identify loop headers, inner loops
and a mapping from block to its loop header (if the block is contained
in a loop, else -1).
dfs_nr = XCNEWVEC (int, last_basic_block_for_fn (cfun));
stack = XNEWVEC (edge_iterator, n_edges_for_fn (cfun));
- inner = sbitmap_alloc (last_basic_block_for_fn (cfun));
+ /* Note if a block is a natural inner loop header. */
+ auto_sbitmap inner (last_basic_block_for_fn (cfun));
bitmap_ones (inner);
- header = sbitmap_alloc (last_basic_block_for_fn (cfun));
+ /* Note if a block is a natural loop header. */
+ auto_sbitmap header (last_basic_block_for_fn (cfun));
bitmap_clear (header);
- in_queue = sbitmap_alloc (last_basic_block_for_fn (cfun));
+ /* Note if a block is in the block queue. */
+ auto_sbitmap in_queue (last_basic_block_for_fn (cfun));
bitmap_clear (in_queue);
- in_stack = sbitmap_alloc (last_basic_block_for_fn (cfun));
+ /* Note if a block is in the block queue. */
+ auto_sbitmap in_stack (last_basic_block_for_fn (cfun));
bitmap_clear (in_stack);
for (i = 0; i < last_basic_block_for_fn (cfun); i++)
free (max_hdr);
free (degree);
free (stack);
- sbitmap_free (header);
- sbitmap_free (inner);
- sbitmap_free (in_queue);
- sbitmap_free (in_stack);
}
static void
split_edges (int bb_src, int bb_trg, edgelst *bl)
{
- sbitmap src = sbitmap_alloc (SBITMAP_SIZE (pot_split[bb_src]));
+ auto_sbitmap src (SBITMAP_SIZE (pot_split[bb_src]));
bitmap_copy (src, pot_split[bb_src]);
bitmap_and_compl (src, src, pot_split[bb_trg]);
extract_edgelst (src, bl);
- sbitmap_free (src);
}
/* Find the valid candidate-source-blocks for the target block TRG, compute
edgelst el = { NULL, 0 };
int i, j, k, update_idx;
basic_block block;
- sbitmap visited;
edge_iterator ei;
edge e;
sp->is_speculative = 0;
sp->src_prob = REG_BR_PROB_BASE;
- visited = sbitmap_alloc (last_basic_block_for_fn (cfun));
+ auto_sbitmap visited (last_basic_block_for_fn (cfun));
for (i = trg + 1; i < current_nr_blocks; i++)
{
sp->src_prob = 0;
}
}
-
- sbitmap_free (visited);
}
/* Free the computed target info. */
static int
init_seqno (bitmap blocks_to_reschedule, basic_block from)
{
- sbitmap visited_bbs;
bitmap_iterator bi;
unsigned bbi;
- visited_bbs = sbitmap_alloc (current_nr_blocks);
+ auto_sbitmap visited_bbs (current_nr_blocks);
if (blocks_to_reschedule)
{
removed by the call to purge_empty_blocks in sel_sched_region_1). */
gcc_assert (cur_seqno >= 0);
- sbitmap_free (visited_bbs);
return sched_max_luid - 1;
}
edge_iterator *stack, ei;
int sp;
edge act;
- sbitmap visited = sbitmap_alloc (last_basic_block_for_fn (cfun));
+ auto_sbitmap visited (last_basic_block_for_fn (cfun));
rtx note;
rtx_insn *insn;
rtx mem = smexpr->pattern;
if (!sp)
{
free (stack);
- sbitmap_free (visited);
return;
}
act = ei_edge (stack[--sp]);
will grow while we are traversing it (but it will not
gain any new members). Copy OLD_SSA_NAMES to a temporary
for traversal. */
- sbitmap tmp = sbitmap_alloc (SBITMAP_SIZE (old_ssa_names));
+ auto_sbitmap tmp (SBITMAP_SIZE (old_ssa_names));
bitmap_copy (tmp, old_ssa_names);
EXECUTE_IF_SET_IN_BITMAP (tmp, 0, i, sbi)
insert_updated_phi_nodes_for (ssa_name (i), dfs, blocks_to_update,
update_flags);
- sbitmap_free (tmp);
}
symbols_to_rename.qsort (insert_updated_phi_nodes_compare_uids);
{
unsigned b;
basic_block bb;
- sbitmap visited = sbitmap_alloc (last_basic_block_for_fn (cfun) + 1);
+ auto_sbitmap visited (last_basic_block_for_fn (cfun) + 1);
bitmap_clear (visited);
b = *--(live->stack_top);
loe_visit_block (live, BASIC_BLOCK_FOR_FN (cfun, b), visited);
}
-
- sbitmap_free (visited);
}
static void
fill_always_executed_in (void)
{
- sbitmap contains_call = sbitmap_alloc (last_basic_block_for_fn (cfun));
basic_block bb;
struct loop *loop;
+ auto_sbitmap contains_call (last_basic_block_for_fn (cfun));
bitmap_clear (contains_call);
FOR_EACH_BB_FN (bb, cfun)
{
for (loop = current_loops->tree_root->inner; loop; loop = loop->next)
fill_always_executed_in_1 (loop, contains_call);
-
- sbitmap_free (contains_call);
}
if (n_unroll)
{
- sbitmap wont_exit;
bool large;
if (ul == UL_SINGLE_ITER)
return false;
"loop turned into non-loop; it never loops.\n");
initialize_original_copy_tables ();
- wont_exit = sbitmap_alloc (n_unroll + 1);
+ auto_sbitmap wont_exit (n_unroll + 1);
if (exit && niter
&& TREE_CODE (niter) == INTEGER_CST
&& wi::leu_p (n_unroll, wi::to_widest (niter)))
| DLTHE_FLAG_COMPLETTE_PEEL))
{
free_original_copy_tables ();
- free (wont_exit);
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "Failed to duplicate the loop\n");
return false;
}
- free (wont_exit);
free_original_copy_tables ();
}
HOST_WIDE_INT npeel;
struct loop_size size;
int peeled_size;
- sbitmap wont_exit;
if (!flag_peel_loops || PARAM_VALUE (PARAM_MAX_PEEL_TIMES) <= 0
|| !peeled_loops)
/* Duplicate possibly eliminating the exits. */
initialize_original_copy_tables ();
- wont_exit = sbitmap_alloc (npeel + 1);
+ auto_sbitmap wont_exit (npeel + 1);
if (exit && niter
&& TREE_CODE (niter) == INTEGER_CST
&& wi::leu_p (npeel, wi::to_widest (niter)))
DLTHE_FLAG_UPDATE_FREQ))
{
free_original_copy_tables ();
- free (wont_exit);
return false;
}
- free (wont_exit);
free_original_copy_tables ();
if (dump_file && (dump_flags & TDF_DETAILS))
{
unsigned est_niter, prob_entry, scale_unrolled, scale_rest, freq_e, freq_h;
unsigned new_est_niter, i, prob;
unsigned irr = loop_preheader_edge (loop)->flags & EDGE_IRREDUCIBLE_LOOP;
- sbitmap wont_exit;
auto_vec<edge> to_remove;
est_niter = expected_loop_iterations (loop);
/* Unroll the loop and remove the exits in all iterations except for the
last one. */
- wont_exit = sbitmap_alloc (factor);
+ auto_sbitmap wont_exit (factor);
bitmap_ones (wont_exit);
bitmap_clear_bit (wont_exit, factor - 1);
ok = gimple_duplicate_loop_to_header_edge
(loop, loop_latch_edge (loop), factor - 1,
wont_exit, new_exit, &to_remove, DLTHE_FLAG_UPDATE_FREQ);
- free (wont_exit);
gcc_assert (ok);
FOR_EACH_VEC_ELT (to_remove, i, e)
int *postorder = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
int postorder_num = inverted_post_order_compute (postorder);
- sbitmap worklist = sbitmap_alloc (last_basic_block_for_fn (cfun) + 1);
+ auto_sbitmap worklist (last_basic_block_for_fn (cfun) + 1);
bitmap_ones (worklist);
while (changed)
{
}
sbitmap_free (has_abnormal_preds);
- sbitmap_free (worklist);
free (postorder);
}
unsigned int length = ops->length ();
operand_entry *oe1;
unsigned i, j;
- sbitmap candidates, candidates2;
unsigned nr_candidates, nr_candidates2;
sbitmap_iterator sbi0;
vec<operand_entry *> *subops;
return false;
/* Build a list of candidates to process. */
- candidates = sbitmap_alloc (length);
+ auto_sbitmap candidates (length);
bitmap_clear (candidates);
nr_candidates = 0;
FOR_EACH_VEC_ELT (*ops, i, oe1)
}
if (nr_candidates < 2)
- {
- sbitmap_free (candidates);
- return false;
- }
+ return false;
if (dump_file && (dump_flags & TDF_DETAILS))
{
}
/* Process the (operand, code) pairs in order of most occurrence. */
- candidates2 = sbitmap_alloc (length);
+ auto_sbitmap candidates2 (length);
while (!cvec.is_empty ())
{
oecount *c = &cvec.last ();
subops[i].release ();
free (subops);
cvec.release ();
- sbitmap_free (candidates);
- sbitmap_free (candidates2);
return changed;
}
auto_vec<edge, 10> stack;
edge e;
edge_iterator ei;
- sbitmap visited;
bool ret;
if (va_arg_bb == va_start_bb)
if (! dominated_by_p (CDI_DOMINATORS, va_arg_bb, va_start_bb))
return false;
- visited = sbitmap_alloc (last_basic_block_for_fn (cfun));
+ auto_sbitmap visited (last_basic_block_for_fn (cfun));
bitmap_clear (visited);
ret = true;
}
}
- sbitmap_free (visited);
return ret;
}
{
unsigned int group_size = SLP_INSTANCE_GROUP_SIZE (slp_instn);
unsigned int i, j;
- sbitmap load_index;
unsigned int lidx;
slp_tree node, load;
/* Check that the loads in the first sequence are different and there
are no gaps between them. */
- load_index = sbitmap_alloc (group_size);
+ auto_sbitmap load_index (group_size);
bitmap_clear (load_index);
FOR_EACH_VEC_ELT (node->load_permutation, i, lidx)
{
if (lidx >= group_size)
- {
- sbitmap_free (load_index);
- return false;
- }
+ return false;
if (bitmap_bit_p (load_index, lidx))
- {
- sbitmap_free (load_index);
- return false;
- }
+ return false;
+
bitmap_set_bit (load_index, lidx);
}
for (i = 0; i < group_size; i++)
if (!bitmap_bit_p (load_index, i))
- {
- sbitmap_free (load_index);
- return false;
- }
- sbitmap_free (load_index);
+ return false;
/* This permutation is valid for reduction. Since the order of the
statements in the nodes is not important unless they are memory
{
bb_heap_t *worklist = new bb_heap_t (LONG_MIN);
bb_heap_t *pending = new bb_heap_t (LONG_MIN);
- sbitmap visited, in_worklist, in_pending;
+ sbitmap in_worklist, in_pending;
basic_block bb;
edge e;
int *bb_order;
bb_order[rc_order[i]] = i;
free (rc_order);
- visited = sbitmap_alloc (last_basic_block_for_fn (cfun));
+ auto_sbitmap visited (last_basic_block_for_fn (cfun));
in_worklist = sbitmap_alloc (last_basic_block_for_fn (cfun));
in_pending = sbitmap_alloc (last_basic_block_for_fn (cfun));
bitmap_clear (in_worklist);
free (bb_order);
delete worklist;
delete pending;
- sbitmap_free (visited);
sbitmap_free (in_worklist);
sbitmap_free (in_pending);