+2013-11-22 Trevor Saunders <tsaunders@mozilla.com>
+
+ * vec.h (auto_vec): New class.
+ * cfganal.c, cfgloop.c, cgraphunit.c, config/i386/i386.c, dwarf2out.c,
+ function.c, genautomata.c, gimple.c, haifa-sched.c, ipa-inline.c,
+ ira-build.c, loop-unroll.c, omp-low.c, ree.c, trans-mem.c,
+ tree-call-cdce.c, tree-eh.c, tree-if-conv.c, tree-into-ssa.c,
+ tree-loop-distribution.c, tree-predcom.c, tree-sra.c,
+ tree-sssa-forwprop.c, tree-ssa-loop-manip.c, tree-ssa-pre.c,
+ tree-ssa-reassoc.c, tree-ssa-sccvn.c, tree-ssa-structalias.c,
+ tree-vect-loop.c, tree-vect-stmts.c: Use auto_vec and stack_vec as
+ appropriate instead of vec for local variables.
+
2013-11-21 Teresa Johnson <tejohnson@google.com>
PR target/59233
{
bitmap_iterator bi;
unsigned bb_index, i;
- vec<int> work_stack;
bitmap phi_insertion_points;
/* Each block can appear at most twice on the work-stack. */
- work_stack.create (2 * n_basic_blocks_for_fn (cfun));
+ auto_vec<int> work_stack (2 * n_basic_blocks_for_fn (cfun));
phi_insertion_points = BITMAP_ALLOC (NULL);
/* Seed the work list with all the blocks in DEF_BLOCKS. We use
}
}
- work_stack.release ();
-
return phi_insertion_points;
}
int *rc_order;
int b;
unsigned i;
- vec<loop_p> larray;
/* Ensure that the dominators are computed. */
calculate_dominance_info (CDI_DOMINATORS);
/* Gather all loop headers in reverse completion order and allocate
loop structures for loops that are not already present. */
- larray.create (loops->larray->length ());
+ auto_vec<loop_p> larray (loops->larray->length ());
for (b = 0; b < n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS; b++)
{
basic_block header = BASIC_BLOCK (rc_order[b]);
}
}
- larray.release ();
-
return loops;
}
int i;
tree resdecl;
tree restmp = NULL;
- vec<tree> vargs;
gimple call;
gimple ret;
for (arg = a; arg; arg = DECL_CHAIN (arg))
nargs++;
- vargs.create (nargs);
+ auto_vec<tree> vargs (nargs);
if (this_adjusting)
vargs.quick_push (thunk_adjust (&bsi, a, 1, fixed_offset,
virtual_offset));
vargs.quick_push (arg);
call = gimple_build_call_vec (build_fold_addr_expr_loc (0, alias), vargs);
node->callees->call_stmt = call;
- vargs.release ();
gimple_call_set_from_thunk (call, true);
if (restmp)
{
{
tree resolver_decl;
basic_block empty_bb;
- vec<tree> fn_ver_vec = vNULL;
tree default_ver_decl;
struct cgraph_node *versn;
struct cgraph_node *node;
push_cfun (DECL_STRUCT_FUNCTION (resolver_decl));
- fn_ver_vec.create (2);
+ stack_vec<tree, 2> fn_ver_vec;
for (versn_info = node_version_info->next; versn_info;
versn_info = versn_info->next)
}
dispatch_function_versions (resolver_decl, &fn_ver_vec, &empty_bb);
- fn_ver_vec.release ();
rebuild_cgraph_edges ();
pop_cfun ();
return resolver_decl;
+2013-11-22 Trevor Saunders <tsaunders@mozilla.com>
+
+ * parser.c, semantics.c: Change some local variables from vec to
+ auto_vec or stack_vec.
+
2013-11-18 Richard Sandiford <rdsandiford@googlemail.com>
* decl.c (reshape_init_array_1): Use tree_to_uhwi rather than
cp_parser_omp_declare_reduction (cp_parser *parser, cp_token *pragma_tok,
enum pragma_context)
{
- vec<tree> types = vNULL;
+ auto_vec<tree> types;
enum tree_code reduc_code = ERROR_MARK;
tree reduc_id = NULL_TREE, orig_reduc_id = NULL_TREE, type;
unsigned int i;
{
fail:
cp_parser_skip_to_pragma_eol (parser, pragma_tok);
- types.release ();
return;
}
}
cp_parser_require_pragma_eol (parser, pragma_tok);
- types.release ();
}
/* OpenMP 4.0
{
bool maybe_zero_len = false;
unsigned int first_non_one = 0;
- vec<tree> types = vNULL;
+ auto_vec<tree> types;
tree first = handle_omp_array_sections_1 (c, OMP_CLAUSE_DECL (c), types,
maybe_zero_len, first_non_one);
if (first == error_mark_node)
- {
- types.release ();
- return true;
- }
+ return true;
if (first == NULL_TREE)
- {
- types.release ();
- return false;
- }
+ return false;
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND)
{
tree t = OMP_CLAUSE_DECL (c);
tree tem = NULL_TREE;
- types.release ();
if (processing_template_decl)
return false;
/* Need to evaluate side effects in the length expressions
if (int_size_in_bytes (TREE_TYPE (first)) <= 0)
maybe_zero_len = true;
if (processing_template_decl && maybe_zero_len)
- {
- types.release ();
- return false;
- }
+ return false;
for (i = num, t = OMP_CLAUSE_DECL (c); i > 0;
t = TREE_CHAIN (t))
"array section is not contiguous in %qs "
"clause",
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
- types.release ();
return true;
}
}
size = size_binop (MULT_EXPR, size, l);
}
}
- types.release ();
if (!processing_template_decl)
{
if (side_effects)
gen_producer_string (void)
{
size_t j;
- vec<dchar_p> switches = vNULL;
+ auto_vec<dchar_p> switches;
const char *language_string = lang_hooks.name;
char *producer, *tail;
const char *p;
}
*tail = '\0';
- switches.release ();
return producer;
}
rtx label;
edge_iterator ei;
edge e;
- vec<basic_block> src_bbs;
+ auto_vec<basic_block> src_bbs (EDGE_COUNT (last_bb->preds));
- src_bbs.create (EDGE_COUNT (last_bb->preds));
FOR_EACH_EDGE (e, ei, last_bb->preds)
if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun))
src_bbs.quick_push (e->src);
uniq_sort_alt_states (alt_state_t alt_states_list)
{
alt_state_t curr_alt_state;
- vec<alt_state_t> alt_states;
size_t i;
size_t prev_unique_state_ind;
alt_state_t result;
if (alt_states_list->next_alt_state == 0)
return alt_states_list;
- alt_states.create (150);
+ stack_vec<alt_state_t, 150> alt_states;
for (curr_alt_state = alt_states_list;
curr_alt_state != NULL;
curr_alt_state = curr_alt_state->next_alt_state)
result = alt_states[0];
- alt_states.release ();
return result;
}
bool annotation_reservation_message_reported_p;
regexp_t seq, allof, unit;
struct unit_usage *unit_usage_ptr;
- vec<int> marked;
if (regexp == NULL || regexp->mode != rm_oneof)
return;
unit_usage_ptr = unit_usage_ptr->next)
unit_usage_ptr->unit_decl->last_distribution_check_cycle = -1;
n_alts = REGEXP_ONEOF (regexp)->regexps_num;
- marked.create (n_alts);
+ auto_vec<int> marked (n_alts);
for (i = 0; i < n_alts; i++)
marked.safe_push (0);
annotation_reservation_message_reported_p = false;
}
}
}
- marked.release ();
cycle_alt_unit_usages.release ();
obstack_free (&unit_usages, NULL);
}
{
ainsn_t curr_ainsn;
size_t i;
- vec<ainsn_t> last_insns;
- last_insns.create (150);
+ stack_vec<ainsn_t, 150> last_insns;
for (curr_ainsn = automaton->ainsn_list;
curr_ainsn != NULL;
curr_ainsn->first_insn_with_same_reservs = 1;
}
}
- last_insns.release ();
}
/* Forming unit reservations which can affect creating the automaton
state_t state;
state_t start_state;
state_t state2;
- vec<state_t> state_stack;
- state_stack.create (150);
+ stack_vec<state_t, 150> state_stack;
int states_n;
reserv_sets_t reservs_matter = form_reservs_matter (automaton);
}
add_arc (state, state2, automaton->advance_ainsn);
}
- state_stack.release ();
}
/* Form lists of all arcs of STATE marked by the same ainsn. */
state_t start_state;
state_t state;
decl_t decl;
- vec<state_t> state_stack;
+ auto_vec<state_t> state_stack;
int i;
int states_n;
- state_stack.create (0);
-
/* Create the start state (empty state). */
start_state = automaton->start_state;
start_state->it_was_placed_in_stack_for_DFA_forming = 1;
add_arc (state, state, automaton->collapse_ainsn);
}
}
- state_stack.release ();
}
/* The following variable value is current number (1, 2, ...) of passing
static void
minimize_DFA (automaton_t automaton)
{
- vec<state_t> equiv_classes = vNULL;
+ auto_vec<state_t> equiv_classes;
evaluate_equiv_classes (automaton, &equiv_classes);
merge_states (automaton, equiv_classes);
pass_states (automaton, set_new_cycle_flags);
-
- equiv_classes.release ();
}
/* Values of two variables are counted number of states and arcs in an
{
int i;
int nargs = gimple_call_num_args (stmt);
- vec<tree> vargs;
- vargs.create (nargs);
+ auto_vec<tree> vargs (nargs);
gimple new_stmt;
for (i = 0; i < nargs; i++)
vargs);
else
new_stmt = gimple_build_call_vec (gimple_call_fn (stmt), vargs);
- vargs.release ();
+
if (gimple_call_lhs (stmt))
gimple_call_set_lhs (new_stmt, gimple_call_lhs (stmt));
static void
unschedule_insns_until (rtx insn)
{
- vec<rtx> recompute_vec = vNULL;
+ auto_vec<rtx> recompute_vec;
/* Make two passes over the insns to be unscheduled. First, we clear out
dependencies and other trivial bookkeeping. */
else if (QUEUE_INDEX (con) != QUEUE_SCHEDULED)
TODO_SPEC (con) = recompute_todo_spec (con, true);
}
- recompute_vec.release ();
}
/* Restore scheduler state from the topmost entry on the backtracking queue.
fibheap_t edge_heap = fibheap_new ();
bitmap updated_nodes = BITMAP_ALLOC (NULL);
int min_size, max_size;
- vec<cgraph_edge_p> new_indirect_edges = vNULL;
+ auto_vec<cgraph_edge_p> new_indirect_edges;
int initial_size = 0;
struct cgraph_node **order = XCNEWVEC (struct cgraph_node *, cgraph_n_nodes);
struct cgraph_edge_hook_list *edge_removal_hook_holder;
}
free_growth_caches ();
- new_indirect_edges.release ();
fibheap_delete (edge_heap);
if (dump_file)
fprintf (dump_file,
{
ira_loop_tree_node_t subloop_node;
unsigned int i;
- vec<ira_loop_tree_node_t> dfs_stack;
+ auto_vec<ira_loop_tree_node_t> dfs_stack;
/* This is a bit of strange abuse of the BB_VISITED flag: We use
the flag to mark blocks we still have to visit to add them to
}
#undef BB_TO_VISIT
- dfs_stack.release ();
}
gcc_assert (topsort_nodes.length () == n_loop_preorder);
if (bb_p)
{
- vec<ira_loop_tree_node_t>
- loop_preorder = vNULL;
+ auto_vec<ira_loop_tree_node_t> loop_preorder;
unsigned int i;
/* Add all nodes to the set of nodes to visit. The IRA loop tree
(*postorder_func) (subloop_node);
loop_rev_postorder.release ();
}
-
- loop_preorder.release ();
}
for (subloop_node = loop_node->subloops;
sbitmap wont_exit;
unsigned HOST_WIDE_INT npeel;
unsigned i;
- vec<edge> remove_edges;
edge ein;
struct niter_desc *desc = get_simple_loop_desc (loop);
struct opt_info *opt_info = NULL;
if (desc->noloop_assumptions)
bitmap_clear_bit (wont_exit, 1);
- remove_edges.create (0);
-
+ auto_vec<edge> remove_edges;
if (flag_split_ivs_in_unroller)
opt_info = analyze_insns_in_loop (loop);
/* Remove the exit edges. */
FOR_EACH_VEC_ELT (remove_edges, i, ein)
remove_path (ein);
- remove_edges.release ();
}
ein = desc->in_edge;
unsigned exit_mod;
sbitmap wont_exit;
unsigned i;
- vec<edge> remove_edges;
edge e;
unsigned max_unroll = loop->lpt_decision.times;
struct niter_desc *desc = get_simple_loop_desc (loop);
wont_exit = sbitmap_alloc (max_unroll + 1);
bitmap_ones (wont_exit);
- remove_edges.create (0);
+ auto_vec<edge> remove_edges;
if (flag_split_ivs_in_unroller
|| flag_variable_expansion_in_unroller)
opt_info = analyze_insns_in_loop (loop);
/* Remove the edges. */
FOR_EACH_VEC_ELT (remove_edges, i, e)
remove_path (e);
- remove_edges.release ();
if (dump_file)
fprintf (dump_file,
rtx old_niter, niter, init_code, branch_code, tmp;
unsigned i, j, p;
basic_block preheader, *body, swtch, ezc_swtch;
- vec<basic_block> dom_bbs;
sbitmap wont_exit;
int may_exit_copy;
unsigned n_peel;
- vec<edge> remove_edges;
edge e;
bool extra_zero_check, last_may_exit;
unsigned max_unroll = loop->lpt_decision.times;
opt_info = analyze_insns_in_loop (loop);
/* Remember blocks whose dominators will have to be updated. */
- dom_bbs.create (0);
+ auto_vec<basic_block> dom_bbs;
body = get_loop_body (loop);
for (i = 0; i < loop->num_nodes; i++)
/* Precondition the loop. */
split_edge_and_insert (loop_preheader_edge (loop), init_code);
- remove_edges.create (0);
+ auto_vec<edge> remove_edges;
wont_exit = sbitmap_alloc (max_unroll + 2);
/* Remove the edges. */
FOR_EACH_VEC_ELT (remove_edges, i, e)
remove_path (e);
- remove_edges.release ();
/* We must be careful when updating the number of iterations due to
preconditioning and the fact that the value must be valid at entry
";; Unrolled loop %d times, counting # of iterations "
"in runtime, %i insns\n",
max_unroll, num_loop_insns (loop));
-
- dom_bbs.release ();
}
/* Decide whether to simply peel LOOP and how much. */
expand_omp_sections (struct omp_region *region)
{
tree t, u, vin = NULL, vmain, vnext, l2;
- vec<tree> label_vec;
unsigned len;
basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
gimple_stmt_iterator si, switch_si;
/* Use vec::quick_push on label_vec throughout, since we know the size
in advance. */
- label_vec.create (len);
+ auto_vec<tree> label_vec (len);
/* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
GIMPLE_OMP_SECTIONS statement. */
stmt = gimple_build_switch (vmain, u, label_vec);
gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT);
gsi_remove (&switch_si, true);
- label_vec.release ();
si = gsi_start_bb (default_bb);
stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP), 0);
rtx curr_insn = NULL_RTX;
int num_re_opportunities = 0, num_realized = 0, i;
vec<ext_cand> reinsn_list;
- vec<rtx> reinsn_del_list;
+ auto_vec<rtx> reinsn_del_list;
ext_state state;
/* Construct DU chain to get all reaching definitions of each
df_set_flags (DF_DEFER_INSN_RESCAN);
max_insn_uid = get_max_uid ();
- reinsn_del_list.create (0);
reinsn_list = find_removable_extensions ();
state.defs_list.create (0);
state.copies_list.create (0);
delete_insn (curr_insn);
reinsn_list.release ();
- reinsn_del_list.release ();
state.defs_list.release ();
state.copies_list.release ();
state.modified_list.release ();
edge_iterator ei;
edge e;
basic_block bb;
- vec<basic_block> queue = vNULL;
+ auto_vec<basic_block> queue;
bitmap visited_blocks = BITMAP_ALLOC (NULL);
struct tm_region *old_region;
- vec<tm_region_p> bb_regions = vNULL;
+ auto_vec<tm_region_p> bb_regions;
all_tm_regions = region;
bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
}
}
while (!queue.is_empty ());
- queue.release ();
BITMAP_FREE (visited_blocks);
- bb_regions.release ();
}
/* The "gate" function for all transactional memory expansion and optimization
{
struct tm_ipa_cg_data *d;
bitmap new_irr, old_irr;
- vec<basic_block> queue;
bool ret = false;
/* Builtin operators (operator new, and such). */
calculate_dominance_info (CDI_DOMINATORS);
d = get_cg_data (&node, true);
- queue.create (10);
+ stack_vec<basic_block, 10> queue;
new_irr = BITMAP_ALLOC (&tm_obstack);
/* Scan each tm region, propagating irrevocable status through the tree. */
else
BITMAP_FREE (new_irr);
- queue.release ();
pop_cfun ();
return ret;
bool need_ssa_rename = false;
edge e;
edge_iterator ei;
- vec<basic_block> queue = vNULL;
+ auto_vec<basic_block> queue;
bitmap visited_blocks = BITMAP_ALLOC (NULL);
queue.safe_push (bb);
}
while (!queue.is_empty ());
- queue.release ();
BITMAP_FREE (visited_blocks);
return need_ssa_rename;
basic_block bb;
gimple_stmt_iterator i;
bool something_changed = false;
- vec<gimple> cond_dead_built_in_calls = vNULL;
+ auto_vec<gimple> cond_dead_built_in_calls;
FOR_EACH_BB (bb)
{
/* Collect dead call candidates. */
something_changed
= shrink_wrap_conditional_dead_built_in_calls (cond_dead_built_in_calls);
- cond_dead_built_in_calls.release ();
-
if (something_changed)
{
free_dominance_info (CDI_DOMINATORS);
{
case ERT_TRY:
{
- vec<tree> labels = vNULL;
+ auto_vec<tree> labels;
tree default_label = NULL;
eh_catch c;
edge_iterator ei;
x = gimple_build_switch (filter, default_label, labels);
gsi_insert_before (&gsi, x, GSI_SAME_STMT);
-
- labels.release ();
}
pointer_set_destroy (seen_values);
}
free (dr->aux);
}
- loop_nest.release ();
free_data_refs (refs);
free_dependence_relations (ddrs);
return res;
static void
prune_unused_phi_nodes (bitmap phis, bitmap kills, bitmap uses)
{
- vec<int> worklist;
bitmap_iterator bi;
unsigned i, b, p, u, top;
bitmap live_phis;
dfs_out numbers, increase the dfs number by one (so that it corresponds
to the start of the following interval, not to the end of the current
one). We use WORKLIST as a stack. */
- worklist.create (n_defs + 1);
+ auto_vec<int> worklist (n_defs + 1);
worklist.quick_push (1);
top = 1;
n_defs = 1;
}
}
- worklist.release ();
bitmap_copy (phis, live_phis);
BITMAP_FREE (live_phis);
free (defs);
hash_table <var_info_hasher>::iterator hi;
unsigned i;
var_info_p info;
- vec<var_info_p> vars;
timevar_push (TV_TREE_INSERT_PHI_NODES);
- vars.create (var_infos.elements ());
+ auto_vec<var_info_p> vars (var_infos.elements ());
FOR_EACH_HASH_TABLE_ELEMENT (var_infos, info, var_info_p, hi)
if (info->info.need_phi_state != NEED_PHI_STATE_NO)
vars.quick_push (info);
BITMAP_FREE (idf);
}
- vars.release ();
-
timevar_pop (TV_TREE_INSERT_PHI_NODES);
}
control_dependences *cd, int *nb_calls)
{
struct graph *rdg;
- vec<partition_t> partitions;
partition_t partition;
bool any_builtin;
int i, nbp;
if (dump_file && (dump_flags & TDF_DETAILS))
dump_rdg (dump_file, rdg);
- partitions.create (3);
+ stack_vec<partition_t, 3> partitions;
rdg_build_partitions (rdg, stmts, &partitions);
any_builtin = false;
FOR_EACH_VEC_ELT (partitions, i, partition)
partition_free (partition);
- partitions.release ();
free_rdg (rdg);
return nbp - *nb_calls;
walking to innermost loops. */
FOR_EACH_LOOP (loop, LI_ONLY_INNERMOST)
{
- vec<gimple> work_list = vNULL;
+ auto_vec<gimple> work_list;
basic_block *bbs;
int num = loop->num;
unsigned int i;
}
else if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "Loop %d is the same.\n", num);
-
- work_list.release ();
}
if (cd)
static void
execute_load_motion (struct loop *loop, chain_p chain, bitmap tmp_vars)
{
- vec<tree> vars;
+ auto_vec<tree> vars;
dref a;
unsigned n_writes = 0, ridx, i;
tree var;
replace_ref_with (a->stmt, vars[ridx],
!is_read, !is_read);
}
-
- vars.release ();
}
/* Returns the single statement in that NAME is used, excepting
{
unsigned i, j;
chain_p ch1, ch2, cch;
- vec<chain_p> worklist = vNULL;
+ auto_vec<chain_p> worklist;
FOR_EACH_VEC_ELT (*chains, i, ch1)
if (chain_can_be_combined_p (ch1))
}
}
}
-
- worklist.release ();
}
/* Prepare initializers for CHAIN in LOOP. Returns false if this is
static void
propagate_dereference_distances (void)
{
- vec<basic_block> queue;
basic_block bb;
- queue.create (last_basic_block_for_function (cfun));
+ auto_vec<basic_block> queue (last_basic_block_for_function (cfun));
queue.quick_push (ENTRY_BLOCK_PTR_FOR_FN (cfun));
FOR_EACH_BB (bb)
{
queue.quick_push (e->src);
}
}
-
- queue.release ();
}
/* Dump a dereferences TABLE with heading STR to file F. */
simplify_gimple_switch_label_vec (gimple stmt, tree index_type)
{
unsigned int branch_num = gimple_switch_num_labels (stmt);
- vec<tree> labels;
- labels.create (branch_num);
+ auto_vec<tree> labels (branch_num);
unsigned int i, len;
/* Collect the existing case labels in a VEC, and preprocess it as if
}
BITMAP_FREE (target_blocks);
}
-
- labels.release ();
}
/* STMT is a SWITCH_EXPR for which we attempt to find equivalent forms of
{
unsigned i;
bitmap_iterator bi;
- vec<basic_block> worklist;
struct loop *def_loop = def_bb->loop_father;
unsigned def_loop_depth = loop_depth (def_loop);
bitmap def_loop_exits;
/* Normally the work list size is bounded by the number of basic
blocks in the largest loop. We don't know this number, but we
can be fairly sure that it will be relatively small. */
- worklist.create (MAX (8, n_basic_blocks_for_fn (cfun) / 128));
+ auto_vec<basic_block> worklist (MAX (8, n_basic_blocks_for_fn (cfun) / 128));
EXECUTE_IF_SET_IN_BITMAP (use_blocks, 0, i, bi)
{
worklist.quick_push (pred);
}
}
- worklist.release ();
def_loop_exits = BITMAP_ALLOC (&loop_renamer_obstack);
for (struct loop *loop = def_loop;
unsigned new_est_niter, i, prob;
unsigned irr = loop_preheader_edge (loop)->flags & EDGE_IRREDUCIBLE_LOOP;
sbitmap wont_exit;
- vec<edge> to_remove = vNULL;
+ auto_vec<edge> to_remove;
est_niter = expected_loop_iterations (loop);
determine_exit_conditions (loop, desc, factor,
ok = remove_path (e);
gcc_assert (ok);
}
- to_remove.release ();
update_ssa (TODO_update_ssa);
/* Ensure that the frequencies in the loop match the new estimated
phis to translate through. */
else
{
- vec<basic_block> worklist;
size_t i;
basic_block bprime, first = NULL;
- worklist.create (EDGE_COUNT (block->succs));
+ auto_vec<basic_block> worklist (EDGE_COUNT (block->succs));
FOR_EACH_EDGE (e, ei, block->succs)
{
if (!first
else
bitmap_set_and (ANTIC_OUT, ANTIC_IN (bprime));
}
- worklist.release ();
}
/* Prune expressions that are clobbered in block and thus become
them. */
else
{
- vec<basic_block> worklist;
size_t i;
basic_block bprime;
- worklist.create (EDGE_COUNT (block->succs));
+ auto_vec<basic_block> worklist (EDGE_COUNT (block->succs));
FOR_EACH_EDGE (e, ei, block->succs)
{
if (e->flags & EDGE_DFS_BACK)
expression_for_id (i));
}
}
- worklist.release ();
}
/* Prune expressions that are clobbered in block and thus become
}
exprs.release ();
- avail.release ();
return new_stuff;
}
bool new_stuff = false;
vec<pre_expr> exprs;
pre_expr expr;
- vec<pre_expr> avail = vNULL;
+ auto_vec<pre_expr> avail;
int i;
exprs = sorted_array_from_bitmap_set (PA_IN (block));
}
exprs.release ();
- avail.release ();
return new_stuff;
}
{
vn_reference_t ref;
pre_expr result = NULL;
- vec<vn_reference_op_s> ops = vNULL;
+ auto_vec<vn_reference_op_s> ops;
/* We can value number only calls to real functions. */
if (gimple_call_internal_p (stmt))
vn_reference_lookup_pieces (gimple_vuse (stmt), 0,
gimple_expr_type (stmt),
ops, &ref, VN_NOWALK);
- ops.release ();
if (!ref)
continue;
basic_block bb;
edge_iterator ei;
edge e;
- vec<operand_entry_t> ops = vNULL;
- vec<inter_bb_range_test_entry> bbinfo = vNULL;
+ auto_vec<operand_entry_t> ops;
+ auto_vec<inter_bb_range_test_entry> bbinfo;
bool any_changes = false;
/* Consider only basic blocks that end with GIMPLE_COND or
break;
}
}
- bbinfo.release ();
- ops.release ();
}
/* Return true if OPERAND is defined by a PHI node which uses the LHS
if (associative_tree_code (rhs_code))
{
- vec<operand_entry_t> ops = vNULL;
+ auto_vec<operand_entry_t> ops;
tree powi_result = NULL_TREE;
/* There may be no immediate uses left by the time we
gsi_insert_after (&gsi, mul_stmt, GSI_NEW_STMT);
}
}
-
- ops.release ();
}
}
}
tree base2;
HOST_WIDE_INT offset2, size2, maxsize2;
int i, j;
- vec<vn_reference_op_s>
- rhs = vNULL;
+ auto_vec<vn_reference_op_s> rhs;
vn_reference_op_t vro;
ao_ref r;
vr->operands.truncate (i + 1 + rhs.length ());
FOR_EACH_VEC_ELT (rhs, j, vro)
vr->operands[i + 1 + j] = *vro;
- rhs.release ();
vr->operands = valueize_refs (vr->operands);
vr->hashcode = vn_reference_compute_hash (vr);
static bool
extract_and_process_scc_for_name (tree name)
{
- vec<tree> scc = vNULL;
+ auto_vec<tree> scc;
tree x;
/* Found an SCC, pop the components off the SCC stack and
"SCC size %u exceeding %u\n", scc.length (),
(unsigned)PARAM_VALUE (PARAM_SCCVN_MAX_SCC_SIZE));
- scc.release ();
return false;
}
process_scc (scc);
- scc.release ();
-
return true;
}
&& !bitmap_empty_p (get_varinfo (node)->solution))
{
unsigned int i;
- vec<unsigned> queue = vNULL;
+ auto_vec<unsigned> queue;
int queuepos;
unsigned int to = find (graph->indirect_cycles[node]);
bitmap_iterator bi;
{
unify_nodes (graph, to, i, true);
}
- queue.release ();
return true;
}
return false;
{
unsigned int i;
tree val;
- vec<ce_s> tmp = vNULL;
+ auto_vec<ce_s> tmp;
FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (t), i, val)
{
struct constraint_expr *rhsp;
results->safe_push (*rhsp);
tmp.truncate (0);
}
- tmp.release ();
/* We do not know whether the constructor was complete,
so technically we have to add &NOTHING or &ANYTHING
like we do for an empty constructor as well. */
do_structure_copy (tree lhsop, tree rhsop)
{
struct constraint_expr *lhsp, *rhsp;
- vec<ce_s> lhsc = vNULL;
- vec<ce_s> rhsc = vNULL;
+ auto_vec<ce_s> lhsc;
+ auto_vec<ce_s> rhsc;
unsigned j;
get_constraint_for (lhsop, &lhsc);
}
else
gcc_unreachable ();
-
- lhsc.release ();
- rhsc.release ();
}
/* Create constraints ID = { rhsc }. */
static void
make_constraint_to (unsigned id, tree op)
{
- vec<ce_s> rhsc = vNULL;
+ auto_vec<ce_s> rhsc;
get_constraint_for_rhs (op, &rhsc);
make_constraints_to (id, rhsc);
- rhsc.release ();
}
/* Create a constraint ID = &FROM. */
&& gimple_call_lhs (stmt) != NULL_TREE
&& TREE_ADDRESSABLE (TREE_TYPE (gimple_call_lhs (stmt))))
{
- vec<ce_s> tmpc = vNULL;
+ auto_vec<ce_s> tmpc;
struct constraint_expr lhsc, *c;
get_constraint_for_address_of (gimple_call_lhs (stmt), &tmpc);
lhsc.var = escaped_id;
lhsc.type = SCALAR;
FOR_EACH_VEC_ELT (tmpc, i, c)
process_constraint (new_constraint (lhsc, *c));
- tmpc.release ();
}
/* Regular functions return nonlocal memory. */
handle_lhs_call (gimple stmt, tree lhs, int flags, vec<ce_s> rhsc,
tree fndecl)
{
- vec<ce_s> lhsc = vNULL;
+ auto_vec<ce_s> lhsc;
get_constraint_for (lhs, &lhsc);
/* If the store is to a global decl make sure to
}
else
process_all_all_constraints (lhsc, rhsc);
-
- lhsc.release ();
}
/* For non-IPA mode, generate constraints necessary for a call of a
for (k = 0; k < gimple_call_num_args (stmt); ++k)
{
tree arg = gimple_call_arg (stmt, k);
- vec<ce_s> argc = vNULL;
+ auto_vec<ce_s> argc;
unsigned i;
struct constraint_expr *argp;
get_constraint_for_rhs (arg, &argc);
FOR_EACH_VEC_ELT (argc, i, argp)
results->safe_push (*argp);
- argc.release ();
}
/* May return addresses of globals. */
{
gimple t = origt;
vec<ce_s> lhsc = vNULL;
- vec<ce_s> rhsc = vNULL;
+ auto_vec<ce_s> rhsc;
varinfo_t fi;
/* Add constraints for clobbered/used in IPA mode.
make_constraint_from (first_vi_for_offset (fi, fi_uses),
anything_id);
}
-
- rhsc.release ();
}
varinfo_t vi, newvi;
tree decl_type = TREE_TYPE (decl);
tree declsize = DECL_P (decl) ? DECL_SIZE (decl) : TYPE_SIZE (decl_type);
- vec<fieldoff_s> fieldstack = vNULL;
+ auto_vec<fieldoff_s> fieldstack;
fieldoff_s *fo;
unsigned int i;
}
}
- fieldstack.release ();
-
return vi;
}
if (DECL_INITIAL (decl)
&& vnode->definition)
{
- vec<ce_s> rhsc = vNULL;
+ auto_vec<ce_s> rhsc;
struct constraint_expr lhs, *rhsp;
unsigned i;
get_constraint_for_rhs (DECL_INITIAL (decl), &rhsc);
FOR_EACH_VEC_ELT (rhsc, i, rhsp)
process_constraint (new_constraint (lhs, *rhsp));
}
- rhsc.release ();
}
}
}
bool extract_scalar_result = false;
gimple use_stmt, orig_stmt, reduction_phi = NULL;
bool nested_in_vect_loop = false;
- vec<gimple> new_phis = vNULL;
- vec<gimple> inner_phis = vNULL;
+ auto_vec<gimple> new_phis;
+ auto_vec<gimple> inner_phis;
enum vect_def_type dt = vect_unknown_def_type;
int j, i;
- vec<tree> scalar_results = vNULL;
+ auto_vec<tree> scalar_results;
unsigned int group_size = 1, k, ratio;
- vec<tree> vec_initial_defs = vNULL;
- vec<gimple> phis;
+ auto_vec<tree> vec_initial_defs;
+ auto_vec<gimple> phis;
bool slp_reduc = false;
tree new_phi_result;
gimple inner_phi = NULL;
}
}
- vec_initial_defs.release ();
-
/* 2. Create epilog code.
The reduction epilog code operates across the elements of the vector
of partial results computed by the vectorized loop.
phis.release ();
}
-
- scalar_results.release ();
- inner_phis.release ();
- new_phis.release ();
}
struct loop * def_stmt_loop, *outer_loop = NULL;
tree def_arg;
gimple def_arg_stmt;
- vec<tree> vec_oprnds0 = vNULL;
- vec<tree> vec_oprnds1 = vNULL;
- vec<tree> vect_defs = vNULL;
- vec<gimple> phis = vNULL;
+ auto_vec<tree> vec_oprnds0;
+ auto_vec<tree> vec_oprnds1;
+ auto_vec<tree> vect_defs;
+ auto_vec<gimple> phis;
int vec_num;
tree def0, def1, tem, op0, op1 = NULL_TREE;
epilog_reduc_code, phis, reduc_index,
double_reduc, slp_node);
- phis.release ();
- vect_defs.release ();
- vec_oprnds0.release ();
- vec_oprnds1.release ();
-
return true;
}
if (slp_node)
{
int nops = (op1 == NULL_TREE) ? 1 : 2;
- vec<tree> ops;
- ops.create (nops);
- vec<vec<tree> > vec_defs;
- vec_defs.create (nops);
+ auto_vec<tree> ops (nops);
+ auto_vec<vec<tree> > vec_defs (nops);
ops.quick_push (op0);
if (op1)
*vec_oprnds0 = vec_defs[0];
if (op1)
*vec_oprnds1 = vec_defs[1];
-
- ops.release ();
- vec_defs.release ();
}
else
{
if (slp_node)
{
- vec<vec<tree> > vec_defs;
- vec_defs.create (nargs);
+ auto_vec<vec<tree> > vec_defs (nargs);
vec<tree> vec_oprnds0;
for (i = 0; i < nargs; i++)
vec<tree> vec_oprndsi = vec_defs[i];
vec_oprndsi.release ();
}
- vec_defs.release ();
continue;
}
if (slp_node)
{
- vec<vec<tree> > vec_defs;
- vec_defs.create (nargs);
+ auto_vec<vec<tree> > vec_defs (nargs);
vec<tree> vec_oprnds0;
for (i = 0; i < nargs; i++)
vec<tree> vec_oprndsi = vec_defs[i];
vec_oprndsi.release ();
}
- vec_defs.release ();
continue;
}
};
+/* auto_vec is a sub class of vec whose storage is released when it is
+ destroyed. */
+template<typename T>
+class auto_vec : public vec<T, va_heap>
+{
+public:
+ auto_vec () { this->m_vec = NULL; }
+ auto_vec (size_t n) { this->create (n); }
+ ~auto_vec () { this->release (); }
+};
+
/* stack_vec is a subclass of vec containing N elements of internal storage.
You probably only want to allocate this on the stack because if the array
ends up being larger or much smaller than N it will be wasting space. */