constructor_elt *elt;
fdesc_type_node = make_node (RECORD_TYPE);
- vec_safe_grow (null_vec, TARGET_VTABLE_USES_DESCRIPTORS);
+ vec_safe_grow (null_vec, TARGET_VTABLE_USES_DESCRIPTORS, true);
elt = (null_vec->address () + TARGET_VTABLE_USES_DESCRIPTORS - 1);
for (j = 0; j < TARGET_VTABLE_USES_DESCRIPTORS; j++)
gnu_result = build1 (INDIRECT_REF, gnu_result_type, gnu_result);
}
- vec_safe_grow (gnu_vec, TARGET_VTABLE_USES_DESCRIPTORS);
+ vec_safe_grow (gnu_vec, TARGET_VTABLE_USES_DESCRIPTORS, true);
elt = (gnu_vec->address () + TARGET_VTABLE_USES_DESCRIPTORS - 1);
for (gnu_field = TYPE_FIELDS (gnu_result_type), i = 0;
i < TARGET_VTABLE_USES_DESCRIPTORS;
timevar_push (TV_ALIAS_ANALYSIS);
- vec_safe_grow_cleared (reg_known_value, maxreg - FIRST_PSEUDO_REGISTER);
+ vec_safe_grow_cleared (reg_known_value, maxreg - FIRST_PSEUDO_REGISTER,
+ true);
reg_known_equiv_p = sbitmap_alloc (maxreg - FIRST_PSEUDO_REGISTER);
bitmap_clear (reg_known_equiv_p);
if (reg_base_value)
reg_base_value->truncate (0);
- vec_safe_grow_cleared (reg_base_value, maxreg);
+ vec_safe_grow_cleared (reg_base_value, maxreg, true);
new_reg_base_value = XNEWVEC (rtx, maxreg);
reg_seen = sbitmap_alloc (maxreg);
: ENTRY_BLOCK_PTR_FOR_FN (cfun)));
if (basic_block_info_for_fn (cfun)->length () <= (size_t)index)
vec_safe_grow_cleared (basic_block_info_for_fn (cfun),
- index + 1);
+ index + 1, true);
SET_BASIC_BLOCK_FOR_FN (cfun, index, bb);
if (last_basic_block_for_fn (cfun) <= index)
last_basic_block_for_fn (cfun) = index + 1;
loop->num = is_loop_header_of;
loop->header = bb;
vec_safe_grow_cleared (loops_for_fn (cfun)->larray,
- is_loop_header_of + 1);
+ is_loop_header_of + 1, true);
(*loops_for_fn (cfun)->larray)[is_loop_header_of] = loop;
flow_loop_tree_node_add (loops_for_fn (cfun)->tree_root,
loop);
{
if (idx >= internal_arg_pointer_exp_state.cache.length ())
internal_arg_pointer_exp_state.cache
- .safe_grow_cleared (idx + 1);
+ .safe_grow_cleared (idx + 1, true);
internal_arg_pointer_exp_state.cache[idx] = val;
}
}
basic_block bb, min, max;
bool found = false;
auto_vec<unsigned int> n_succs;
- n_succs.safe_grow_cleared (last_basic_block_for_fn (cfun));
+ n_succs.safe_grow_cleared (last_basic_block_for_fn (cfun), true);
FOR_EACH_BB_FN (bb, cfun)
SET_STATE (bb,
/* Copy the gimple vectors into new vectors that we can manipulate. */
- output_tvec.safe_grow (noutputs);
- input_tvec.safe_grow (ninputs);
- constraints.safe_grow (noutputs + ninputs);
+ output_tvec.safe_grow (noutputs, true);
+ input_tvec.safe_grow (ninputs, true);
+ constraints.safe_grow (noutputs + ninputs, true);
for (i = 0; i < noutputs; ++i)
{
auto_vec<int, MAX_RECOG_OPERANDS> inout_opnum;
rtx_insn *after_rtl_seq = NULL, *after_rtl_end = NULL;
- output_rvec.safe_grow (noutputs);
+ output_rvec.safe_grow (noutputs, true);
for (i = 0; i < noutputs; ++i)
{
auto_vec<rtx, MAX_RECOG_OPERANDS> input_rvec;
auto_vec<machine_mode, MAX_RECOG_OPERANDS> input_mode;
- input_rvec.safe_grow (ninputs);
- input_mode.safe_grow (ninputs);
+ input_rvec.safe_grow (ninputs, true);
+ input_mode.safe_grow (ninputs, true);
generating_concat_p = 0;
size_t new_size =
(last_basic_block_for_fn (cfun)
+ (last_basic_block_for_fn (cfun) + 3) / 4);
- vec_safe_grow_cleared (basic_block_info_for_fn (cfun), new_size);
+ vec_safe_grow_cleared (basic_block_info_for_fn (cfun), new_size, true);
}
n_basic_blocks_for_fn (cfun)++;
ret = split_insns (pattern, insn);
nregs = max_reg_num ();
if (nregs > reg_stat.length ())
- reg_stat.safe_grow_cleared (nregs);
+ reg_stat.safe_grow_cleared (nregs, true);
return ret;
}
rtl_hooks = combine_rtl_hooks;
- reg_stat.safe_grow_cleared (nregs);
+ reg_stat.safe_grow_cleared (nregs, true);
init_recog_no_volatile ();
{
unsigned int opno = m_ops.length ();
machine_mode mode = insn_data[icode].operand[opno].mode;
- m_ops.safe_grow (opno + 1);
+ m_ops.safe_grow (opno + 1, true);
create_output_operand (&m_ops.last (), possible_target, mode);
}
gcc_assert (GET_MODE (x) == VNx16BImode);
x = gen_lowpart (mode, x);
}
- m_ops.safe_grow (m_ops.length () + 1);
+ m_ops.safe_grow (m_ops.length () + 1, true);
create_input_operand (&m_ops.last (), x, mode);
}
void
function_expander::add_integer_operand (HOST_WIDE_INT x)
{
- m_ops.safe_grow (m_ops.length () + 1);
+ m_ops.safe_grow (m_ops.length () + 1, true);
create_integer_operand (&m_ops.last (), x);
}
void
function_expander::add_address_operand (rtx x)
{
- m_ops.safe_grow (m_ops.length () + 1);
+ m_ops.safe_grow (m_ops.length () + 1, true);
create_address_operand (&m_ops.last (), x);
}
void
function_expander::add_fixed_operand (rtx x)
{
- m_ops.safe_grow (m_ops.length () + 1);
+ m_ops.safe_grow (m_ops.length () + 1, true);
create_fixed_operand (&m_ops.last (), x);
}
unsigned uid = INSN_UID (insn);
if (uid >= INSN_INFO_LENGTH)
- insn_info.safe_grow (uid * 5 / 4 + 10);
+ insn_info.safe_grow (uid * 5 / 4 + 10, true);
INSN_INFO_ENTRY (uid).clock = cycle;
INSN_INFO_ENTRY (uid).new_cond = NULL;
{
int n_entries = vec_safe_length (vid.inits);
- vec_safe_grow (vid.inits, TARGET_VTABLE_DATA_ENTRY_DISTANCE * n_entries);
+ vec_safe_grow (vid.inits, TARGET_VTABLE_DATA_ENTRY_DISTANCE * n_entries,
+ true);
/* Move data entries into their new positions and add padding
after the new positions. Iterate backwards so we don't
order. Straighten them out and add them to the running list in one
step. */
jx = vec_safe_length (*inits);
- vec_safe_grow (*inits, jx + vid.inits->length ());
+ vec_safe_grow (*inits, jx + vid.inits->length (), true);
for (ix = vid.inits->length () - 1;
vid.inits->iterate (ix, &e);
/* Insert the argument into its corresponding position. */
vec<tree> &list = lists[level - 1];
if (index >= (int)list.length ())
- list.safe_grow_cleared (index + 1);
+ list.safe_grow_cleared (index + 1, true);
list[index] = TREE_PURPOSE (p);
}
&& TREE_STATIC (decl))
{
auto_vec<tree, 16> v;
- v.safe_grow (count);
+ v.safe_grow (count, true);
tree d = first;
for (unsigned int i = 0; i < count; i++, d = DECL_CHAIN (d))
v[count - i - 1] = d;
}
auto_vec<tree, 16> v;
- v.safe_grow (count);
+ v.safe_grow (count, true);
tree d = first;
for (unsigned int i = 0; i < count; i++, d = DECL_CHAIN (d))
{
TREE_VEC_ELT (incrv, i) = incr;
if (orig_init)
{
- orig_inits.safe_grow_cleared (i + 1);
+ orig_inits.safe_grow_cleared (i + 1, true);
orig_inits[i] = orig_init;
}
if (orig_decl)
gcc_assert (TREE_CODE (type) != TEMPLATE_TEMPLATE_PARM);
if (vec_safe_length (canonical_template_parms) <= (unsigned) idx)
- vec_safe_grow_cleared (canonical_template_parms, idx + 1);
+ vec_safe_grow_cleared (canonical_template_parms, idx + 1, true);
for (tree list = (*canonical_template_parms)[idx];
list; list = TREE_CHAIN (list))
gcc_assert (tk_index - TK_VMI_CLASS_TYPES + 1 == nbases);
- vec_safe_grow (init_vec, nbases);
+ vec_safe_grow (init_vec, nbases, true);
/* Generate the base information initializer. */
for (unsigned ix = nbases; ix--;)
{
vec<int> last_change_age = vNULL;
int prev_age;
- last_visit_age.safe_grow_cleared (n_blocks);
- last_change_age.safe_grow_cleared (n_blocks);
+ last_visit_age.safe_grow_cleared (n_blocks, true);
+ last_change_age.safe_grow_cleared (n_blocks, true);
/* Double-queueing. Worklist is for the current iteration,
and pending is for the next. */
update_row_reg_save (dw_cfi_row *row, unsigned column, dw_cfi_ref cfi)
{
if (vec_safe_length (row->reg_save) <= column)
- vec_safe_grow_cleared (row->reg_save, column + 1);
+ vec_safe_grow_cleared (row->reg_save, column + 1, true);
(*row->reg_save)[column] = cfi;
}
early_remat::init_block_info (void)
{
unsigned int n_blocks = last_basic_block_for_fn (m_fn);
- m_block_info.safe_grow_cleared (n_blocks);
+ m_block_info.safe_grow_cleared (n_blocks, true);
}
/* Maps basic block indices to their position in the post order. */
early_remat::finalize_candidate_indices (void)
{
/* Create a bitmap for each candidate register. */
- m_regno_to_candidates.safe_grow (max_reg_num ());
+ m_regno_to_candidates.safe_grow (max_reg_num (), true);
unsigned int regno;
bitmap_iterator bi;
EXECUTE_IF_SET_IN_BITMAP (&m_candidate_regnos, 0, regno, bi)
num_dispatch = vec_safe_length (cfun->eh->lp_array);
if (num_dispatch == 0)
return;
- sjlj_lp_call_site_index.safe_grow_cleared (num_dispatch);
+ sjlj_lp_call_site_index.safe_grow_cleared (num_dispatch, true);
num_dispatch = sjlj_assign_call_site_values ();
if (num_dispatch > 0)
if (unsigned HOST_WIDE_INT size = cur_idx - (last_idx + 1))
{
size = size * elsize + bytes->length ();
- bytes->safe_grow_cleared (size);
+ bytes->safe_grow_cleared (size, true);
}
if (!convert_to_bytes (eltype, val, bytes))
any padding. */
unsigned HOST_WIDE_INT cur_off = int_byte_position (fld);
if (bytes->length () < cur_off)
- bytes->safe_grow_cleared (cur_off);
+ bytes->safe_grow_cleared (cur_off, true);
if (!convert_to_bytes (TREE_TYPE (val), val, bytes))
return false;
unsigned HOST_WIDE_INT type_size = tree_to_uhwi (size);
if (ctor_size < type_size)
if (unsigned HOST_WIDE_INT size_grow = type_size - ctor_size)
- bytes->safe_grow_cleared (bytes->length () + size_grow);
+ bytes->safe_grow_cleared (bytes->length () + size_grow, true);
return true;
}
/* Unlike for RECORD_TYPE, there is no need to clear the memory since
it's completely overwritten by native_encode_expr. */
- bytes->safe_grow (bytes_sofar + expr_bytes);
+ bytes->safe_grow (bytes_sofar + expr_bytes, true);
unsigned char *pnext = bytes->begin () + bytes_sofar;
int nbytes = native_encode_expr (expr, pnext, expr_bytes, 0);
/* NBYTES is zero on failure. Otherwise it should equal EXPR_BYTES. */
max_labelno = max_label_num ();
min_labelno = get_first_label_num ();
- label_align.safe_grow_cleared (max_labelno - min_labelno + 1);
+ label_align.safe_grow_cleared (max_labelno - min_labelno + 1, true);
/* If not optimizing or optimizing for size, don't assign any alignments. */
if (! optimize || optimize_function_for_size_p (cfun))
n_labels = max_labelno - min_labelno + 1;
n_old_labels = old - min_labelno + 1;
- label_align.safe_grow_cleared (n_labels);
+ label_align.safe_grow_cleared (n_labels, true);
/* Range of labels grows monotonically in the function. Failing here
means that the initialization of array got lost. */
if (clauses->orderedc)
{
if (doacross_steps == NULL)
- vec_safe_grow_cleared (doacross_steps, clauses->orderedc);
+ vec_safe_grow_cleared (doacross_steps, clauses->orderedc, true);
(*doacross_steps)[i] = step;
}
}
temp_slots_at_level (int level)
{
if (level >= (int) vec_safe_length (used_temp_slots))
- vec_safe_grow_cleared (used_temp_slots, level + 1);
+ vec_safe_grow_cleared (used_temp_slots, level + 1, true);
return &(*used_temp_slots)[level];
}
df_maybe_reorganize_use_refs (DF_REF_ORDER_BY_INSN_WITH_NOTES);
use_def_ref.create (DF_USES_TABLE_SIZE ());
- use_def_ref.safe_grow_cleared (DF_USES_TABLE_SIZE ());
+ use_def_ref.safe_grow_cleared (DF_USES_TABLE_SIZE (), true);
reg_defs.create (max_reg_num ());
- reg_defs.safe_grow_cleared (max_reg_num ());
+ reg_defs.safe_grow_cleared (max_reg_num (), true);
reg_defs_stack.create (n_basic_blocks_for_fn (cfun) * 10);
local_md = BITMAP_ALLOC (NULL);
/* Set up the use-def chain. */
if (DF_REF_ID (use) >= (int) use_def_ref.length ())
- use_def_ref.safe_grow_cleared (DF_REF_ID (use) + 1);
+ use_def_ref.safe_grow_cleared (DF_REF_ID (use) + 1, true);
if (flag_checking)
gcc_assert (sparseset_bit_p (active_defs_check, regno));
}
while ((p = strchr (p, ',')) != NULL);
- argbuf.safe_grow (old_length + n);
+ argbuf.safe_grow (old_length + n, true);
memmove (argbuf.address () + n,
argbuf.address (),
old_length * sizeof (const_char_p));
tab->check_vect.create (10000);
tab->base_vect.create (0);
- tab->base_vect.safe_grow (automaton->achieved_states_num);
+ tab->base_vect.safe_grow (automaton->achieved_states_num, true);
full_vect_length = (automaton->insn_equiv_classes_num
* automaton->achieved_states_num);
{
size_t full_base = tab->automaton->insn_equiv_classes_num * vect_num;
if (tab->full_vect.length () < full_base + vect_length)
- tab->full_vect.safe_grow (full_base + vect_length);
+ tab->full_vect.safe_grow (full_base + vect_length, true);
for (i = 0; i < vect_length; i++)
tab->full_vect[full_base + i] = vect[i];
}
output_states_vect.create (0);
pass_states (automaton, add_states_vect_el);
- dead_lock_vect.safe_grow (output_states_vect.length ());
+ dead_lock_vect.safe_grow (output_states_vect.length (), true);
for (i = 0; i < output_states_vect.length (); i++)
{
state_t s = output_states_vect[i];
}
force_no_side_effects = 0;
- info.safe_grow_cleared (s->capture_max + 1);
+ info.safe_grow_cleared (s->capture_max + 1, true);
for (int i = 0; i <= s->capture_max; ++i)
info[i].same_as = i;
/* Look for matching captures, diagnose mis-uses of @@ and apply
early lowering and distribution of value_match. */
auto_vec<vec<capture *> > cpts;
- cpts.safe_grow_cleared (capture_ids->elements ());
+ cpts.safe_grow_cleared (capture_ids->elements (), true);
walk_captures (op, cpts);
for (unsigned i = 0; i < cpts.length (); ++i)
{
if (cse_tests_p)
{
known_conditions kc;
- kc.position_tests.safe_grow_cleared (num_positions);
- kc.set_operands.safe_grow_cleared (num_operands);
+ kc.position_tests.safe_grow_cleared (num_positions, true);
+ kc.set_operands.safe_grow_cleared (num_operands, true);
kc.peep2_count = 1;
cse_tests (&root_pos, root, &kc);
}
num_results (0),
routine (0)
{
- transitions.safe_grow_cleared (num_transitions);
+ transitions.safe_grow_cleared (num_transitions, true);
}
/* Describes one way of matching a particular state to a particular
{
transition *trans1 = intersecting[i];
next->truncate (0);
- next->safe_grow (trans1->labels.length () + combined->length ());
+ next->safe_grow (trans1->labels.length () + combined->length (), true);
int_set::iterator end
= std::set_union (trans1->labels.begin (), trans1->labels.end (),
combined->begin (), combined->end (),
}
/* Say that x1 is valid and the rest aren't. */
- os->seen_vars.safe_grow_cleared (num_vars);
+ os->seen_vars.safe_grow_cleared (num_vars, true);
os->seen_vars[1] = true;
}
if (os->type == SUBPATTERN || os->type == RECOG)
optimize_subroutine_group ("peephole2_insns", &peephole2_root);
output_state os;
- os.id_to_var.safe_grow_cleared (num_positions);
+ os.id_to_var.safe_grow_cleared (num_positions, true);
if (use_pattern_routines_p)
{
gcc_obstack_init (&m_obstack);
/* Initialize the loop information. */
- m_loops.safe_grow_cleared (m_nloops);
+ m_loops.safe_grow_cleared (m_nloops, true);
for (unsigned int i = 0; i < m_nloops; ++i)
{
m_loops[i].outermost = get_loop (m_fn, 0);
/* Initialize the list of blocks that belong to each loop. */
unsigned int nbbs = last_basic_block_for_fn (fn);
- m_next_block_in_loop.safe_grow (nbbs);
+ m_next_block_in_loop.safe_grow (nbbs, true);
basic_block bb;
FOR_EACH_BB_FN (bb, fn)
{
unsigned new_len = 3 * uid / 2 + 1;
vec_safe_grow_cleared (label_to_block_map_for_fn (cfun),
- new_len);
+ new_len, true);
}
}
const int nb_loops = number_of_loops (cfun);
vec<tree> iv_map;
iv_map.create (nb_loops);
- iv_map.safe_grow_cleared (nb_loops);
+ iv_map.safe_grow_cleared (nb_loops, true);
build_iv_mapping (iv_map, gbb, user_expr, ip, pbb->scop->scop_info->region);
isl_ast_expr_free (user_expr);
{
int new_luids_max_uid = get_max_uid () + 1;
- sched_luids.safe_grow_cleared (new_luids_max_uid);
+ sched_luids.safe_grow_cleared (new_luids_max_uid, true);
}
/* Initialize LUID for INSN. */
if (reserve > 0
&& ! h_i_d.space (reserve))
{
- h_i_d.safe_grow_cleared (3 * get_max_uid () / 2);
+ h_i_d.safe_grow_cleared (3 * get_max_uid () / 2, true);
sched_extend_target ();
}
}
do \
{ \
insn_addresses_.create (size); \
- insn_addresses_.safe_grow_cleared (size); \
+ insn_addresses_.safe_grow_cleared (size, true); \
memset (insn_addresses_.address (), \
0, sizeof (int) * size); \
} \
if (size <= insn_uid)
{
int *p;
- insn_addresses_.safe_grow (insn_uid + 1);
+ insn_addresses_.safe_grow (insn_uid + 1, true);
p = insn_addresses_.address ();
memset (&p[size],
0, sizeof (int) * (insn_uid + 1 - size));
known_csts->create (0);
known_contexts->create (0);
- known_csts->safe_grow_cleared (count);
- known_contexts->safe_grow_cleared (count);
+ known_csts->safe_grow_cleared (count, true);
+ known_contexts->safe_grow_cleared (count, true);
if (known_aggs)
{
known_aggs->create (0);
- known_aggs->safe_grow_cleared (count);
+ known_aggs->safe_grow_cleared (count, true);
}
if (removable_params_cost)
}
if (!known_contexts->exists ())
- known_contexts->safe_grow_cleared (ipa_get_param_count (info));
+ known_contexts->safe_grow_cleared (ipa_get_param_count (info),
+ true);
(*known_contexts)[i] = newval;
}
unsigned len = type_warnings.length ();
if (newlen > len)
{
- type_warnings.safe_grow_cleared (newlen);
+ type_warnings.safe_grow_cleared (newlen, true);
for (unsigned i = len; i < newlen; i++)
type_warnings[i].dyn_count = profile_count::zero ();
}
/* If this is first time we see the enum, remember its definition. */
if (!existed_p)
{
- this_enum.vals.safe_grow_cleared (nvals);
+ this_enum.vals.safe_grow_cleared (nvals, true);
this_enum.warned = false;
if (dump_file)
fprintf (dump_file, "enum %s\n{\n", name);
{
gcc_checking_assert (TREE_CODE (cst) != TREE_BINFO);
if (!known_vals_ptr->length ())
- vec_safe_grow_cleared (known_vals_ptr, count);
+ vec_safe_grow_cleared (known_vals_ptr, count, true);
(*known_vals_ptr)[i] = cst;
}
else if (inline_p && !es->param[i].change_prob)
{
if (!known_vals_ptr->length ())
- vec_safe_grow_cleared (known_vals_ptr, count);
+ vec_safe_grow_cleared (known_vals_ptr, count, true);
(*known_vals_ptr)[i] = error_mark_node;
}
{
if (!known_value_ranges.length ())
{
- known_value_ranges.safe_grow (count);
+ known_value_ranges.safe_grow (count, true);
for (int i = 0; i < count; ++i)
new (&known_value_ranges[i]) value_range ();
}
if (agg.items.length ())
{
if (!known_aggs_ptr->length ())
- vec_safe_grow_cleared (known_aggs_ptr, count);
+ vec_safe_grow_cleared (known_aggs_ptr, count, true);
(*known_aggs_ptr)[i] = agg;
}
}
if (!ctx.useless_p ())
{
if (!known_contexts_ptr->length ())
- known_contexts_ptr->safe_grow_cleared (count);
+ known_contexts_ptr->safe_grow_cleared (count, true);
(*known_contexts_ptr)[i]
= ipa_context_from_jfunc (caller_parms_info, e, i, jf);
}
if (cst)
{
if (!known_vals_ptr->length ())
- vec_safe_grow_cleared (known_vals_ptr, count);
+ vec_safe_grow_cleared (known_vals_ptr, count, true);
(*known_vals_ptr)[i] = cst;
}
}
struct cgraph_edge *edge, *next;
info->size_time_table = 0;
- known_vals.safe_grow_cleared (count);
+ known_vals.safe_grow_cleared (count, true);
for (i = 0; i < count; i++)
{
struct ipa_replace_map *r;
fbi.node = node;
fbi.info = IPA_NODE_REF (node);
fbi.bb_infos = vNULL;
- fbi.bb_infos.safe_grow_cleared (last_basic_block_for_fn (cfun));
+ fbi.bb_infos.safe_grow_cleared (last_basic_block_for_fn (cfun), true);
fbi.param_count = count_formal_params (node->decl);
fbi.aa_walk_budget = opt_for_fn (node->decl, param_ipa_max_aa_steps);
nonconstant_names.safe_grow_cleared
- (SSANAMES (my_function)->length ());
+ (SSANAMES (my_function)->length (), true);
}
}
int i;
if (count)
- es->param.safe_grow_cleared (count);
+ es->param.safe_grow_cleared (count, true);
for (i = 0; i < count; i++)
{
int prob = param_change_prob (&fbi, stmt, i);
if (count)
{
- operand_map.safe_grow_cleared (count);
- offset_map.safe_grow_cleared (count);
+ operand_map.safe_grow_cleared (count, true);
+ offset_map.safe_grow_cleared (count, true);
}
for (i = 0; i < count; i++)
{
length = streamer_read_uhwi (ib);
if (length && es && e->possibly_call_in_translation_unit_p ())
{
- es->param.safe_grow_cleared (length);
+ es->param.safe_grow_cleared (length, true);
for (i = 0; i < length; i++)
es->param[i].change_prob = streamer_read_uhwi (ib);
}
target++;
if (bb_dict->length () <= (unsigned)source)
- bb_dict->safe_grow_cleared (source + 1);
+ bb_dict->safe_grow_cleared (source + 1, true);
if ((*bb_dict)[source] == 0)
{
if (!info->descriptors && param_count)
{
- vec_safe_grow_cleared (info->descriptors, param_count);
+ vec_safe_grow_cleared (info->descriptors, param_count, true);
return true;
}
else
gcc_checking_assert (fbi);
struct ipa_bb_info *bi = ipa_get_bb_info (fbi, bb);
if (bi->param_aa_statuses.is_empty ())
- bi->param_aa_statuses.safe_grow_cleared (fbi->param_count);
+ bi->param_aa_statuses.safe_grow_cleared (fbi->param_count, true);
struct ipa_param_aa_status *paa = &bi->param_aa_statuses[index];
if (!paa->valid)
{
if (arg_num == 0 || args->jump_functions)
return;
- vec_safe_grow_cleared (args->jump_functions, arg_num);
+ vec_safe_grow_cleared (args->jump_functions, arg_num, true);
if (flag_devirtualize)
- vec_safe_grow_cleared (args->polymorphic_call_contexts, arg_num);
+ vec_safe_grow_cleared (args->polymorphic_call_contexts, arg_num, true);
if (gimple_call_internal_p (call))
return;
fbi.node = node;
fbi.info = IPA_NODE_REF (node);
fbi.bb_infos = vNULL;
- fbi.bb_infos.safe_grow_cleared (last_basic_block_for_fn (cfun));
+ fbi.bb_infos.safe_grow_cleared (last_basic_block_for_fn (cfun), true);
fbi.param_count = ipa_get_param_count (info);
fbi.aa_walk_budget = opt_for_fn (node->decl, param_ipa_max_aa_steps);
if (!dst_ctx)
{
vec_safe_grow_cleared (args->polymorphic_call_contexts,
- count);
+ count, true);
dst_ctx = ipa_get_ith_polymorhic_call_context (args, i);
}
if (!dst_ctx)
{
vec_safe_grow_cleared (args->polymorphic_call_contexts,
- count);
+ count, true);
dst_ctx = ipa_get_ith_polymorhic_call_context (args, i);
}
dst_ctx->combine_with (ctx);
if (prevails && e->possibly_call_in_translation_unit_p ())
{
class ipa_edge_args *args = IPA_EDGE_REF_GET_CREATE (e);
- vec_safe_grow_cleared (args->jump_functions, count);
+ vec_safe_grow_cleared (args->jump_functions, count, true);
if (contexts_computed)
- vec_safe_grow_cleared (args->polymorphic_call_contexts, count);
+ vec_safe_grow_cleared (args->polymorphic_call_contexts, count, true);
for (int k = 0; k < count; k++)
{
ipa_read_jump_function (ib, ipa_get_ith_jump_func (args, k), e,
{
ipcp_transformation_initialize ();
ipcp_transformation *ts = ipcp_transformation_sum->get_create (node);
- vec_safe_grow_cleared (ts->m_vr, count);
+ vec_safe_grow_cleared (ts->m_vr, count, true);
for (i = 0; i < count; i++)
{
ipa_vr *parm_vr;
{
ipcp_transformation_initialize ();
ipcp_transformation *ts = ipcp_transformation_sum->get_create (node);
- vec_safe_grow_cleared (ts->bits, count);
+ vec_safe_grow_cleared (ts->bits, count, true);
for (i = 0; i < count; i++)
{
fbi.node = node;
fbi.info = NULL;
fbi.bb_infos = vNULL;
- fbi.bb_infos.safe_grow_cleared (last_basic_block_for_fn (cfun));
+ fbi.bb_infos.safe_grow_cleared (last_basic_block_for_fn (cfun), true);
fbi.param_count = param_count;
fbi.aa_walk_budget = opt_for_fn (node->decl, param_ipa_max_aa_steps);
- vec_safe_grow_cleared (descriptors, param_count);
+ vec_safe_grow_cleared (descriptors, param_count, true);
ipa_populate_param_decls (node, *descriptors);
calculate_dominance_info (CDI_DOMINATORS);
ipcp_modif_dom_walker (&fbi, descriptors, aggval, &something_changed,
int i;
vec_alloc (reference_vars_to_consider, ipa_reference_vars_uids);
- reference_vars_to_consider->safe_grow (ipa_reference_vars_uids);
+ reference_vars_to_consider->safe_grow (ipa_reference_vars_uids, true);
/* See what variables we are interested in. */
for (i = 0; i < lto_symtab_encoder_size (encoder); i++)
calculate_dominance_info (CDI_DOMINATORS);
/* Compute local info about basic blocks and determine function size/time. */
- bb_info_vec.safe_grow_cleared (last_basic_block_for_fn (cfun) + 1);
+ bb_info_vec.safe_grow_cleared (last_basic_block_for_fn (cfun) + 1, true);
best_split_point.split_bbs = NULL;
basic_block return_bb = find_return_bb ();
int tsan_exit_found = -1;
first_moveable_pseudo = max_regs;
pseudo_replaced_reg.release ();
- pseudo_replaced_reg.safe_grow_cleared (max_regs);
+ pseudo_replaced_reg.safe_grow_cleared (max_regs, true);
df_analyze ();
calculate_dominance_info (CDI_DOMINATORS);
subreg_context = BITMAP_ALLOC (NULL);
reg_copy_graph.create (max);
- reg_copy_graph.safe_grow_cleared (max);
+ reg_copy_graph.safe_grow_cleared (max, true);
memset (reg_copy_graph.address (), 0, sizeof (bitmap) * max);
speed_p = optimize_function_for_speed_p (cfun);
gcc_assert (len == (int) len);
if (len > 0)
{
- vec_safe_grow_cleared (fn->eh->region_array, len);
+ vec_safe_grow_cleared (fn->eh->region_array, len, true);
for (i = 0; i < len; i++)
{
eh_region r = input_eh_region (ib, data_in, i);
gcc_assert (len == (int) len);
if (len > 0)
{
- vec_safe_grow_cleared (fn->eh->lp_array, len);
+ vec_safe_grow_cleared (fn->eh->lp_array, len, true);
for (i = 0; i < len; i++)
{
eh_landing_pad lp = input_eh_lp (ib, data_in, i);
gcc_assert (len == (int) len);
if (len > 0)
{
- vec_safe_grow_cleared (fn->eh->ttype_data, len);
+ vec_safe_grow_cleared (fn->eh->ttype_data, len, true);
for (i = 0; i < len; i++)
{
tree ttype = stream_read_tree (ib, data_in);
{
if (targetm.arm_eabi_unwinder)
{
- vec_safe_grow_cleared (fn->eh->ehspec_data.arm_eabi, len);
+ vec_safe_grow_cleared (fn->eh->ehspec_data.arm_eabi, len, true);
for (i = 0; i < len; i++)
{
tree t = stream_read_tree (ib, data_in);
}
else
{
- vec_safe_grow_cleared (fn->eh->ehspec_data.other, len);
+ vec_safe_grow_cleared (fn->eh->ehspec_data.other, len, true);
for (i = 0; i < len; i++)
{
uchar c = streamer_read_uchar (ib);
last_basic_block_for_fn (fn) = bb_count;
if (bb_count > basic_block_info_for_fn (fn)->length ())
- vec_safe_grow_cleared (basic_block_info_for_fn (fn), bb_count);
+ vec_safe_grow_cleared (basic_block_info_for_fn (fn), bb_count, true);
if (bb_count > label_to_block_map_for_fn (fn)->length ())
- vec_safe_grow_cleared (label_to_block_map_for_fn (fn), bb_count);
+ vec_safe_grow_cleared (label_to_block_map_for_fn (fn), bb_count, true);
index = streamer_read_hwi (ib);
while (index != -1)
if (len > 0)
{
int i;
- vec_safe_grow_cleared (fn->local_decls, len);
+ vec_safe_grow_cleared (fn->local_decls, len, true);
for (i = 0; i < len; i++)
{
tree t = stream_read_tree (ib, data_in);
if (n_debugargs)
{
vec<tree, va_gc> **debugargs = decl_debug_args_insert (fn_decl);
- vec_safe_grow (*debugargs, n_debugargs);
+ vec_safe_grow (*debugargs, n_debugargs, true);
for (unsigned i = 0; i < n_debugargs; ++i)
(**debugargs)[i] = stream_read_tree (ib, data_in);
}
/* Create vector for fast access of resolution. We do this lazily
to save memory. */
- resolutions.safe_grow_cleared (file_data->max_index + 1);
+ resolutions.safe_grow_cleared (file_data->max_index + 1, true);
for (i = 0; file_data->respairs.iterate (i, &rp); i++)
resolutions[rp->index] = rp->res;
file_data->respairs.release ();
set_node_sched_params (ddg_ptr g)
{
node_sched_param_vec.truncate (0);
- node_sched_param_vec.safe_grow_cleared (g->num_nodes);
+ node_sched_param_vec.safe_grow_cleared (g->num_nodes, true);
}
/* Make sure that node_sched_param_vec has an entry for every move in PS. */
extend_node_sched_params (partial_schedule_ptr ps)
{
node_sched_param_vec.safe_grow_cleared (ps->g->num_nodes
- + ps->reg_moves.length ());
+ + ps->reg_moves.length (), true);
}
/* Update the sched_params (time, row and stage) for node U using the II,
/* Create NREG_MOVES register moves. */
first_move = ps->reg_moves.length ();
- ps->reg_moves.safe_grow_cleared (first_move + nreg_moves);
+ ps->reg_moves.safe_grow_cleared (first_move + nreg_moves, true);
extend_node_sched_params (ps);
/* Record the moves associated with this node. */
}
unsigned HOST_WIDE_INT argno = tree_to_uhwi (OMP_CLAUSE_DECL (c));
if (argno >= v->length ())
- v->safe_grow_cleared (argno + 1);
+ v->safe_grow_cleared (argno + 1, true);
(*v)[argno] = c;
}
/* Here, r is used as a bitmask, 2 is set if CLAUSES1 has something
if (!flag_dump_passes)
return;
- pass_tab.safe_grow_cleared (passes_by_id_size + 1);
+ pass_tab.safe_grow_cleared (passes_by_id_size + 1, true);
m_name_to_pass_map->traverse <void *, passes_pass_traverse> (NULL);
}
tab = &disabled_pass_uid_range_tab;
if ((unsigned) pass->static_pass_number >= tab->length ())
- tab->safe_grow_cleared (pass->static_pass_number + 1);
+ tab->safe_grow_cleared (pass->static_pass_number + 1, true);
if (!range_str)
{
propagate_unlikely_bbs_forward ();
auto_vec<int, 64> nsuccs;
- nsuccs.safe_grow_cleared (last_basic_block_for_fn (cfun));
+ nsuccs.safe_grow_cleared (last_basic_block_for_fn (cfun), true);
FOR_ALL_BB_FN (bb, cfun)
if (!(bb->count == profile_count::zero ())
&& bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
return;
}
- bb_gcov_counts.safe_grow_cleared (last_basic_block_for_fn (cfun));
+ bb_gcov_counts.safe_grow_cleared (last_basic_block_for_fn (cfun), true);
edge_gcov_counts = new hash_map<edge,gcov_type>;
/* Attach extra info block to each bb. */
size_t new_size = m_highest_bb_idx + 1;
if (basic_block_info_for_fn (cfun)->length () < new_size)
- vec_safe_grow_cleared (basic_block_info_for_fn (cfun), new_size);
+ vec_safe_grow_cleared (basic_block_info_for_fn (cfun), new_size, true);
last_basic_block_for_fn (cfun) = new_size;
if (reuse_id != -1)
{
/* Store away for later reuse. */
- m_reuse_rtx_by_id.safe_grow_cleared (reuse_id + 1);
+ m_reuse_rtx_by_id.safe_grow_cleared (reuse_id + 1, true);
m_reuse_rtx_by_id[reuse_id] = return_rtx;
}
/* Allocate some extra size to avoid too many reallocs, but
do not grow too quickly. */
max = uid + uid / 20 + 1;
- stack_regs_mentioned_data.safe_grow_cleared (max);
+ stack_regs_mentioned_data.safe_grow_cleared (max, true);
}
test = stack_regs_mentioned_data[uid];
gcc_obstack_init (&rename_obstack);
insn_rr.create (0);
if (insn_info)
- insn_rr.safe_grow_cleared (get_max_uid ());
+ insn_rr.safe_grow_cleared (get_max_uid (), true);
}
/* Free all global data used by the register renamer. */
/* A previous iteration might also have moved from the stack to the
heap, in which case the heap array will already be big enough. */
if (vec_safe_length (array.heap) <= i)
- vec_safe_grow (array.heap, i + 1);
+ vec_safe_grow (array.heap, i + 1, true);
base = array.heap->address ();
memcpy (base, array.stack, sizeof (array.stack));
base[LOCAL_ELEMS] = x;
{
int reserve = (sched_max_luid + 1 - h_d_i_d.length ());
if (reserve > 0 && ! h_d_i_d.space (reserve))
- h_d_i_d.safe_grow_cleared (3 * sched_max_luid / 2);
+ h_d_i_d.safe_grow_cleared (3 * sched_max_luid / 2, true);
}
/* If it is profitable to use them, initialize or extend (depending on
void
sel_extend_global_bb_info (void)
{
- sel_global_bb_info.safe_grow_cleared (last_basic_block_for_fn (cfun));
+ sel_global_bb_info.safe_grow_cleared (last_basic_block_for_fn (cfun), true);
}
/* Extend region-scope data structures for basic blocks. */
static void
extend_region_bb_info (void)
{
- sel_region_bb_info.safe_grow_cleared (last_basic_block_for_fn (cfun));
+ sel_region_bb_info.safe_grow_cleared (last_basic_block_for_fn (cfun), true);
}
/* Extend all data structures to fit for all basic blocks. */
size = 3 * sched_max_luid / 2;
- s_i_d.safe_grow_cleared (size);
+ s_i_d.safe_grow_cleared (size, true);
}
}
list = &ref_list;
old_references = vec_safe_address (list->references);
- vec_safe_grow (list->references, vec_safe_length (list->references) + 1);
+ vec_safe_grow (list->references, vec_safe_length (list->references) + 1,
+ true);
ref = &list->references->last ();
list2 = &referred_node->ref_list;
tail_duplicate (void)
{
auto_vec<fibonacci_node<long, basic_block_def>*> blocks;
- blocks.safe_grow_cleared (last_basic_block_for_fn (cfun));
+ blocks.safe_grow_cleared (last_basic_block_for_fn (cfun), true);
basic_block *trace = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
int *counts = XNEWVEC (int, last_basic_block_for_fn (cfun));
/* We could store this information in bb->aux, but we may get called
through get_all_tm_blocks() from another pass that may be already
using bb->aux. */
- bb_regions.safe_grow_cleared (last_basic_block_for_fn (cfun));
+ bb_regions.safe_grow_cleared (last_basic_block_for_fn (cfun), true);
all_tm_regions = region;
bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
vec<tm_region *> ret;
ret.create (n);
- ret.safe_grow_cleared (n);
+ ret.safe_grow_cleared (n, true);
stuff.bb2reg = &ret;
stuff.include_uninstrumented_p = include_uninstrumented_p;
expand_regions (all_tm_regions, collect_bb2reg, &stuff, traverse_clones);
last_basic_block_for_fn (fn) = NUM_FIXED_BLOCKS;
vec_alloc (basic_block_info_for_fn (fn), initial_cfg_capacity);
vec_safe_grow_cleared (basic_block_info_for_fn (fn),
- initial_cfg_capacity);
+ initial_cfg_capacity, true);
/* Build a mapping of labels to their associated blocks. */
vec_alloc (label_to_block_map_for_fn (fn), initial_cfg_capacity);
vec_safe_grow_cleared (label_to_block_map_for_fn (fn),
- initial_cfg_capacity);
+ initial_cfg_capacity, true);
SET_BASIC_BLOCK_FOR_FN (fn, ENTRY_BLOCK, ENTRY_BLOCK_PTR_FOR_FN (fn));
SET_BASIC_BLOCK_FOR_FN (fn, EXIT_BLOCK, EXIT_BLOCK_PTR_FOR_FN (fn));
if (basic_block_info_for_fn (cfun)->length ()
< (size_t) n_basic_blocks_for_fn (cfun))
vec_safe_grow_cleared (basic_block_info_for_fn (cfun),
- n_basic_blocks_for_fn (cfun));
+ n_basic_blocks_for_fn (cfun), true);
/* To speed up statement iterator walks, we first purge dead labels. */
cleanup_dead_labels ();
size_t new_size =
(last_basic_block_for_fn (cfun)
+ (last_basic_block_for_fn (cfun) + 3) / 4);
- vec_safe_grow_cleared (basic_block_info_for_fn (cfun), new_size);
+ vec_safe_grow_cleared (basic_block_info_for_fn (cfun), new_size, true);
}
/* Add the newly created block to the array. */
if ((unsigned) cfg->x_last_basic_block >= old_len)
{
new_len = cfg->x_last_basic_block + (cfg->x_last_basic_block + 3) / 4;
- vec_safe_grow_cleared (cfg->x_basic_block_info, new_len);
+ vec_safe_grow_cleared (cfg->x_basic_block_info, new_len, true);
}
(*cfg->x_basic_block_info)[bb->index] = bb;
if (old_len <= (unsigned) uid)
{
new_len = 3 * uid / 2 + 1;
- vec_safe_grow_cleared (cfg->x_label_to_block_map, new_len);
+ vec_safe_grow_cleared (cfg->x_label_to_block_map, new_len, true);
}
(*cfg->x_label_to_block_map)[uid] = bb;
return 0;
complex_lattice_values.create (num_ssa_names);
- complex_lattice_values.safe_grow_cleared (num_ssa_names);
+ complex_lattice_values.safe_grow_cleared (num_ssa_names, true);
init_parameter_lattice_values ();
class complex_propagate complex_propagate;
complex_variable_components = new int_tree_htab_type (10);
complex_ssa_name_components.create (2 * num_ssa_names);
- complex_ssa_name_components.safe_grow_cleared (2 * num_ssa_names);
+ complex_ssa_name_components.safe_grow_cleared (2 * num_ssa_names, true);
update_parameter_components ();
/* Construct the arguments to the conditional internal function. */
auto_vec<tree, 8> args;
- args.safe_grow (nops + 1);
+ args.safe_grow (nops + 1, true);
args[0] = mask;
for (unsigned int i = 1; i < nops; ++i)
args[i] = gimple_op (stmt, i);
/* Create the new array of arguments. */
n = nargs + gimple_call_num_args (call_stmt);
argarray.create (n);
- argarray.safe_grow_cleared (n);
+ argarray.safe_grow_cleared (n, true);
/* Copy all the arguments before '...' */
memcpy (argarray.address (),
/* Re-allocate the vector at most once per update/into-SSA. */
if (ver >= len)
- info_for_ssa_name.safe_grow_cleared (num_ssa_names);
+ info_for_ssa_name.safe_grow_cleared (num_ssa_names, true);
/* But allocate infos lazily. */
info = info_for_ssa_name[ver];
{
n = (unsigned) last_basic_block_for_fn (cfun) + 1;
if (phis_to_rewrite.length () < n)
- phis_to_rewrite.safe_grow_cleared (n);
+ phis_to_rewrite.safe_grow_cleared (n, true);
phis = phis_to_rewrite[idx];
gcc_assert (!phis.exists ());
unsigned int i;
if (num_ssa_names > object_sizes[object_size_type].length ())
- object_sizes[object_size_type].safe_grow (num_ssa_names);
+ object_sizes[object_size_type].safe_grow (num_ssa_names, true);
if (dump_file)
{
fprintf (dump_file, "Computing %s %sobject size for ",
for (object_size_type = 0; object_size_type <= 3; object_size_type++)
{
- object_sizes[object_size_type].safe_grow (num_ssa_names);
+ object_sizes[object_size_type].safe_grow (num_ssa_names, true);
computed[object_size_type] = BITMAP_ALLOC (NULL);
}
unsigned i, n = chain->length;
chain->vars.create (n);
- chain->vars.safe_grow_cleared (n);
+ chain->vars.safe_grow_cleared (n, true);
/* Initialize root value for eliminated stores at each distance. */
for (i = 0; i < n; i++)
/* Root values are either rhs operand of stores to be eliminated, or
loaded from memory before loop. */
auto_vec<tree> vtemps;
- vtemps.safe_grow_cleared (n);
+ vtemps.safe_grow_cleared (n, true);
for (i = 0; i < n; i++)
{
init = get_init_expr (chain, i);
}
chain->inits.create (n);
- chain->inits.safe_grow_cleared (n);
+ chain->inits.safe_grow_cleared (n, true);
/* For store eliminatin chain like below:
elements because loop body is guaranteed to be executed at least once
after loop's preheader edge. */
auto_vec<bool> bubbles;
- bubbles.safe_grow_cleared (n + 1);
+ bubbles.safe_grow_cleared (n + 1, true);
for (i = 0; i < chain->refs.length (); i++)
bubbles[chain->refs[i]->distance] = true;
= TEMPL_IDX (as, addr->symbol, addr->base, addr->index, st, off);
if (templ_index >= vec_safe_length (mem_addr_template_list))
- vec_safe_grow_cleared (mem_addr_template_list, templ_index + 1);
+ vec_safe_grow_cleared (mem_addr_template_list, templ_index + 1, true);
/* Reuse the templates for addresses, so that we do not waste memory. */
templ = &(*mem_addr_template_list)[templ_index];
sbitmap valid_mult;
if (data_index >= valid_mult_list.length ())
- valid_mult_list.safe_grow_cleared (data_index + 1);
+ valid_mult_list.safe_grow_cleared (data_index + 1, true);
valid_mult = valid_mult_list[data_index];
if (!valid_mult)
ptr = XNEW (ssa_conflicts);
bitmap_obstack_initialize (&ptr->obstack);
ptr->conflicts.create (size);
- ptr->conflicts.safe_grow_cleared (size);
+ ptr->conflicts.safe_grow_cleared (size, true);
return ptr;
}
tree one_constant = NULL_TREE;
tree one_nonconstant = NULL_TREE;
auto_vec<tree> constants;
- constants.safe_grow_cleared (nelts);
+ constants.safe_grow_cleared (nelts, true);
auto_vec<std::pair<unsigned, unsigned>, 64> elts;
FOR_EACH_VEC_SAFE_ELT (CONSTRUCTOR_ELTS (op), i, elt)
{
list_index = (unsigned) as * MAX_MACHINE_MODE + (unsigned) mem_mode;
if (list_index >= vec_safe_length (addr_list))
- vec_safe_grow_cleared (addr_list, list_index + MAX_MACHINE_MODE);
+ vec_safe_grow_cleared (addr_list, list_index + MAX_MACHINE_MODE, true);
addr = (*addr_list)[list_index];
if (!addr)
unsigned nsize = ((unsigned) as + 1) *MAX_MACHINE_MODE;
gcc_assert (nsize > idx);
- ainc_cost_data_list.safe_grow_cleared (nsize);
+ ainc_cost_data_list.safe_grow_cleared (nsize, true);
}
ainc_cost_data *data = ainc_cost_data_list[idx];
/* Start walk in loop header with index set to infinite bound. */
queue_index = bounds.length ();
- queues.safe_grow_cleared (queue_index + 1);
+ queues.safe_grow_cleared (queue_index + 1, true);
queue.safe_push (loop->header);
queues[queue_index] = queue;
block_priority.put (loop->header, queue_index);
if (v >= value_expressions.length ())
{
- value_expressions.safe_grow_cleared (v + 1);
+ value_expressions.safe_grow_cleared (v + 1, true);
}
set = value_expressions[v];
else
{
new_val_id = get_next_value_id ();
- value_expressions.safe_grow_cleared (get_max_value_id () + 1);
+ value_expressions.safe_grow_cleared (get_max_value_id () + 1,
+ true);
nary = vn_nary_op_insert_pieces (newnary->length,
newnary->opcode,
newnary->type,
{
new_val_id = get_next_value_id ();
value_expressions.safe_grow_cleared
- (get_max_value_id () + 1);
+ (get_max_value_id () + 1, true);
}
else
new_val_id = ref->value_id;
int i;
exprs = sorted_array_from_bitmap_set (ANTIC_IN (block));
- avail.safe_grow (EDGE_COUNT (block->preds));
+ avail.safe_grow (EDGE_COUNT (block->preds), true);
FOR_EACH_VEC_ELT (exprs, i, expr)
{
int i;
exprs = sorted_array_from_bitmap_set (PA_IN (block));
- avail.safe_grow (EDGE_COUNT (block->preds));
+ avail.safe_grow (EDGE_COUNT (block->preds), true);
FOR_EACH_VEC_ELT (exprs, i, expr)
{
expressions.create (0);
expressions.safe_push (NULL);
value_expressions.create (get_max_value_id () + 1);
- value_expressions.safe_grow_cleared (get_max_value_id () + 1);
+ value_expressions.safe_grow_cleared (get_max_value_id () + 1, true);
name_to_id.create (0);
inserted_exprs = BITMAP_ALLOC (NULL);
FOR_EACH_EDGE (e, ei, bb->succs)
e->flags &= ~EDGE_EXECUTABLE;
}
- uid_to_stmt.safe_grow (gimple_stmt_max_uid (cfun));
+ uid_to_stmt.safe_grow (gimple_stmt_max_uid (cfun), true);
}
if (nargs > 0)
{
args.create (nargs);
- args.safe_grow_cleared (nargs);
+ args.safe_grow_cleared (nargs, true);
for (i = 0; i < nargs; i++)
args[i] = CALL_EXPR_ARG (expr, i);
b = TYPE_PRECISION (TREE_TYPE (ranges[i].exp)) * 2 + !zero_p;
if (buckets.length () <= b)
- buckets.safe_grow_cleared (b + 1);
+ buckets.safe_grow_cleared (b + 1, true);
if (chains.length () <= (unsigned) i)
- chains.safe_grow (i + 1);
+ chains.safe_grow (i + 1, true);
chains[i] = buckets[b];
buckets[b] = i + 1;
}
/* We need to pre-pend vr->operands[0..i] to rhs. */
vec<vn_reference_op_s> old = vr->operands;
if (i + 1 + rhs.length () > vr->operands.length ())
- vr->operands.safe_grow (i + 1 + rhs.length ());
+ vr->operands.safe_grow (i + 1 + rhs.length (), true);
else
vr->operands.truncate (i + 1 + rhs.length ());
FOR_EACH_VEC_ELT (rhs, j, vro)
if (vr->operands.length () < 2)
{
vec<vn_reference_op_s> old = vr->operands;
- vr->operands.safe_grow_cleared (2);
+ vr->operands.safe_grow_cleared (2, true);
if (old == shared_lookup_references)
shared_lookup_references = vr->operands;
}
vr1.vuse = vuse_ssa_val (vuse);
shared_lookup_references.truncate (0);
- shared_lookup_references.safe_grow (operands.length ());
+ shared_lookup_references.safe_grow (operands.length (), true);
memcpy (shared_lookup_references.address (),
operands.address (),
sizeof (vn_reference_op_s)
if (TREE_CODE (valnum) == SSA_NAME)
{
if (avail.length () <= SSA_NAME_VERSION (valnum))
- avail.safe_grow_cleared (SSA_NAME_VERSION (valnum) + 1);
+ avail.safe_grow_cleared (SSA_NAME_VERSION (valnum) + 1, true);
tree pushop = op;
if (avail[SSA_NAME_VERSION (valnum)])
pushop = avail[SSA_NAME_VERSION (valnum)];
if (vec_safe_length (stridx_to_strinfo) && (*stridx_to_strinfo)[0])
unshare_strinfo_vec ();
if (vec_safe_length (stridx_to_strinfo) <= (unsigned int) idx)
- vec_safe_grow_cleared (stridx_to_strinfo, idx + 1);
+ vec_safe_grow_cleared (stridx_to_strinfo, idx + 1, true);
(*stridx_to_strinfo)[idx] = si;
}
if (TREE_CODE (ptr) == SSA_NAME
&& ssa_ver_to_stridx.length () <= SSA_NAME_VERSION (ptr))
- ssa_ver_to_stridx.safe_grow_cleared (num_ssa_names);
+ ssa_ver_to_stridx.safe_grow_cleared (num_ssa_names, true);
gcc_checking_assert (compare_tree_int (si->nonzero_chars, off) != -1);
for (chainsi = si; chainsi->next; chainsi = si)
strinfo *si;
int idx;
if (ssa_ver_to_stridx.length () <= SSA_NAME_VERSION (ptr))
- ssa_ver_to_stridx.safe_grow_cleared (num_ssa_names);
+ ssa_ver_to_stridx.safe_grow_cleared (num_ssa_names, true);
gcc_checking_assert (TREE_CODE (ptr) == SSA_NAME
&& ssa_ver_to_stridx[SSA_NAME_VERSION (ptr)] == 0);
/* We might find an endptr created in this pass. Grow the
vector in that case. */
if (ssa_ver_to_stridx.length () <= SSA_NAME_VERSION (ptr))
- ssa_ver_to_stridx.safe_grow_cleared (num_ssa_names);
+ ssa_ver_to_stridx.safe_grow_cleared (num_ssa_names, true);
if (ssa_ver_to_stridx[SSA_NAME_VERSION (ptr)] != 0)
return;
/* This has to happen after initializing the loop optimizer
and initializing SCEV as they create new SSA_NAMEs. */
- ssa_ver_to_stridx.safe_grow_cleared (num_ssa_names);
+ ssa_ver_to_stridx.safe_grow_cleared (num_ssa_names, true);
max_stridx = 1;
/* String length optimization is implemented as a walk of the dominator
set_ssa_name_value (tree name, tree value)
{
if (SSA_NAME_VERSION (name) >= ssa_name_values.length ())
- ssa_name_values.safe_grow_cleared (SSA_NAME_VERSION (name) + 1);
+ ssa_name_values.safe_grow_cleared (SSA_NAME_VERSION (name) + 1, true);
if (value && TREE_OVERFLOW_P (value))
value = drop_tree_overflow (value);
ssa_name_values[SSA_NAME_VERSION (name)] = value;
t = make_node (SSA_NAME);
SSA_NAME_VERSION (t) = version;
if (version >= SSANAMES (fn)->length ())
- vec_safe_grow_cleared (SSANAMES (fn), version + 1);
+ vec_safe_grow_cleared (SSANAMES (fn), version + 1, true);
gcc_assert ((*SSANAMES (fn))[version] == NULL);
(*SSANAMES (fn))[version] = t;
ssa_name_nodes_created++;
{
unsigned HOST_WIDE_INT length = bp_unpack_var_len_unsigned (&bp);
if (length > 0)
- vec_safe_grow (CONSTRUCTOR_ELTS (expr), length);
+ vec_safe_grow (CONSTRUCTOR_ELTS (expr), length, true);
}
#ifndef ACCEL_COMPILER
{
gcc_assert (nvectors != 0);
if (masks->length () < nvectors)
- masks->safe_grow_cleared (nvectors);
+ masks->safe_grow_cleared (nvectors, true);
rgroup_controls *rgm = &(*masks)[nvectors - 1];
/* The number of scalars per iteration and the number of vectors are
both compile-time constants. */
used it. */
if (rgm->controls.is_empty ())
{
- rgm->controls.safe_grow_cleared (nvectors);
+ rgm->controls.safe_grow_cleared (nvectors, true);
for (unsigned int i = 0; i < nvectors; ++i)
{
tree mask = make_temp_ssa_name (mask_type, NULL, "loop_mask");
{
gcc_assert (nvectors != 0);
if (lens->length () < nvectors)
- lens->safe_grow_cleared (nvectors);
+ lens->safe_grow_cleared (nvectors, true);
rgroup_controls *rgl = &(*lens)[nvectors - 1];
/* The number of scalars per iteration, scalar occupied bytes and
used it. */
if (rgl->controls.is_empty ())
{
- rgl->controls.safe_grow_cleared (nvectors);
+ rgl->controls.safe_grow_cleared (nvectors, true);
for (unsigned int i = 0; i < nvectors; ++i)
{
tree len_type = LOOP_VINFO_RGROUP_COMPARE_TYPE (loop_vinfo);
auto_vec<tree, 8> args;
unsigned int nargs = gimple_call_num_args (last_stmt);
- args.safe_grow (nargs);
+ args.safe_grow (nargs, true);
for (unsigned int i = 0; i < nargs; ++i)
args[i] = ((int) i == mask_argno
? tmp
(need to) ignore child nodes of anything that isn't vect_internal_def. */
unsigned int group_size = SLP_TREE_LANES (node);
SLP_TREE_DEF_TYPE (node) = vect_external_def;
- SLP_TREE_SCALAR_OPS (node).safe_grow (group_size);
+ SLP_TREE_SCALAR_OPS (node).safe_grow (group_size, true);
FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt_info)
{
tree lhs = gimple_get_lhs (vect_orig_stmt (stmt_info)->stmt);
confine changes in the callee to the current child/subtree. */
if (SLP_TREE_CODE (node) == VEC_PERM_EXPR)
{
- subtree_life.safe_grow_cleared (SLP_TREE_LANES (child));
+ subtree_life.safe_grow_cleared (SLP_TREE_LANES (child), true);
for (unsigned j = 0;
j < SLP_TREE_LANE_PERMUTATION (node).length (); ++j)
{
FOR_EACH_VEC_ELT (slp_instances, i, instance)
{
auto_vec<bool, 20> life;
- life.safe_grow_cleared (SLP_TREE_LANES (SLP_INSTANCE_TREE (instance)));
+ life.safe_grow_cleared (SLP_TREE_LANES (SLP_INSTANCE_TREE (instance)),
+ true);
vect_bb_slp_scalar_cost (bb_vinfo,
SLP_INSTANCE_TREE (instance),
&life, &scalar_costs, visited);
auto_vec<std::pair<std::pair<unsigned, unsigned>, unsigned> > vperm;
auto_vec<unsigned> active_lane;
vperm.create (olanes);
- active_lane.safe_grow_cleared (SLP_TREE_CHILDREN (node).length ());
+ active_lane.safe_grow_cleared (SLP_TREE_CHILDREN (node).length (), true);
for (unsigned i = 0; i < vf; ++i)
{
for (unsigned pi = 0; pi < perm.length (); ++pi)
if (modifier == NONE || ifn != IFN_LAST)
{
tree prev_res = NULL_TREE;
- vargs.safe_grow (nargs);
- orig_vargs.safe_grow (nargs);
+ vargs.safe_grow (nargs, true);
+ orig_vargs.safe_grow (nargs, true);
auto_vec<vec<tree> > vec_defs (nargs);
for (j = 0; j < ncopies; ++j)
{
== SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP))
{
STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_grow_cleared (i * 3
- + 1);
+ + 1,
+ true);
STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (arginfo[i].op);
tree lst = POINTER_TYPE_P (TREE_TYPE (arginfo[i].op))
? size_type_node : TREE_TYPE (arginfo[i].op);
auto_vec<vec<tree> > vec_oprnds;
auto_vec<unsigned> vec_oprnds_i;
- vec_oprnds.safe_grow_cleared (nargs);
- vec_oprnds_i.safe_grow_cleared (nargs);
+ vec_oprnds.safe_grow_cleared (nargs, true);
+ vec_oprnds_i.safe_grow_cleared (nargs, true);
for (j = 0; j < ncopies; ++j)
{
/* Build argument list for the vectorized call. */
if (use_whole_vector)
{
if (kind != scan_store_kind_perm && use_whole_vector->is_empty ())
- use_whole_vector->safe_grow_cleared (i);
+ use_whole_vector->safe_grow_cleared (i, true);
if (kind != scan_store_kind_perm || !use_whole_vector->is_empty ())
use_whole_vector->safe_push (kind);
}
tree vec_mask = NULL;
auto_vec<tree> vec_offsets;
auto_vec<vec<tree> > gvec_oprnds;
- gvec_oprnds.safe_grow_cleared (group_size);
+ gvec_oprnds.safe_grow_cleared (group_size, true);
for (j = 0; j < ncopies; j++)
{
gimple *new_stmt;
{
auto_vec <int> v;
ASSERT_EQ (0, v.length ());
- v.safe_grow_cleared (50);
+ v.safe_grow_cleared (50, true);
ASSERT_EQ (50, v.length ());
ASSERT_EQ (0, v[0]);
ASSERT_EQ (0, v[49]);
/* Grow V to length LEN. Allocate it, if necessary. */
template<typename T, typename A>
inline void
-vec_safe_grow (vec<T, A, vl_embed> *&v, unsigned len CXX_MEM_STAT_INFO)
+vec_safe_grow (vec<T, A, vl_embed> *&v, unsigned len,
+ bool exact CXX_MEM_STAT_INFO)
{
unsigned oldlen = vec_safe_length (v);
gcc_checking_assert (len >= oldlen);
- vec_safe_reserve_exact (v, len - oldlen PASS_MEM_STAT);
+ vec_safe_reserve (v, len - oldlen, exact PASS_MEM_STAT);
v->quick_grow (len);
}
/* If V is NULL, allocate it. Call V->safe_grow_cleared(LEN). */
template<typename T, typename A>
inline void
-vec_safe_grow_cleared (vec<T, A, vl_embed> *&v, unsigned len CXX_MEM_STAT_INFO)
+vec_safe_grow_cleared (vec<T, A, vl_embed> *&v, unsigned len,
+ bool exact CXX_MEM_STAT_INFO)
{
unsigned oldlen = vec_safe_length (v);
- vec_safe_grow (v, len PASS_MEM_STAT);
+ vec_safe_grow (v, len, exact PASS_MEM_STAT);
vec_default_construct (v->address () + oldlen, len - oldlen);
}
template<typename T>
inline void
vec_safe_grow_cleared (vec<T, va_heap, vl_ptr> *&v,
- unsigned len CXX_MEM_STAT_INFO)
+ unsigned len, bool exact CXX_MEM_STAT_INFO)
{
- v->safe_grow_cleared (len PASS_MEM_STAT);
+ v->safe_grow_cleared (len, exact PASS_MEM_STAT);
}
/* If V does not have space for NELEMS elements, call
T *safe_push (const T &CXX_MEM_STAT_INFO);
T &pop (void);
void truncate (unsigned);
- void safe_grow (unsigned CXX_MEM_STAT_INFO);
- void safe_grow_cleared (unsigned CXX_MEM_STAT_INFO);
+ void safe_grow (unsigned, bool CXX_MEM_STAT_INFO);
+ void safe_grow_cleared (unsigned, bool CXX_MEM_STAT_INFO);
void quick_grow (unsigned);
void quick_grow_cleared (unsigned);
void quick_insert (unsigned, const T &);
template<typename T>
inline void
-vec<T, va_heap, vl_ptr>::safe_grow (unsigned len MEM_STAT_DECL)
+vec<T, va_heap, vl_ptr>::safe_grow (unsigned len, bool exact MEM_STAT_DECL)
{
unsigned oldlen = length ();
gcc_checking_assert (oldlen <= len);
- reserve_exact (len - oldlen PASS_MEM_STAT);
+ reserve (len - oldlen, exact PASS_MEM_STAT);
if (m_vec)
m_vec->quick_grow (len);
else
template<typename T>
inline void
-vec<T, va_heap, vl_ptr>::safe_grow_cleared (unsigned len MEM_STAT_DECL)
+vec<T, va_heap, vl_ptr>::safe_grow_cleared (unsigned len, bool exact
+ MEM_STAT_DECL)
{
unsigned oldlen = length ();
size_t growby = len - oldlen;
- safe_grow (len PASS_MEM_STAT);
+ safe_grow (len, exact PASS_MEM_STAT);
if (growby != 0)
vec_default_construct (address () + oldlen, growby);
}