+2008-06-25 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
+
+ * config/i386/driver-i386.c (detect_caches_amd,
+ detect_caches_intel, host_detect_local_cpu): Fix -Wc++-compat
+ and/or -Wcast-qual warnings.
+ *ggc-common.c (ggc_mark_roots, gt_pch_note_object,
+ gt_pch_note_reorder, relocate_ptrs, write_pch_globals,
+ gt_pch_save): Likewise.
+ * ggc-page.c (push_depth, push_by_depth, alloc_anon, alloc_page,
+ gt_ggc_m_S, clear_marks, ggc_pch_read): Likewise.
+ * global.c (compute_regsets): Likewise.
+ * graph.c (print_rtl_graph_with_bb, clean_graph_dump_file,
+ finish_graph_dump_file): Likewise.
+ * haifa-sched.c (schedule_block, extend_h_i_d, extend_ready,
+ unlink_bb_notes): Likewise.
+ * integrate.c (get_hard_reg_initial_val): Likewise.
+ * ipa-prop.c (ipa_push_func_to_list): Likewise.
+ * ipa-struct-reorg.c (gen_var_name, gen_cluster_name): Likewise.
+ * local-alloc.c (update_equiv_regs): Likewise.
+ * loop-invariant.c (check_invariant_table_size,
+ hash_invariant_expr, eq_invariant_expr, find_or_insert_inv):
+ Likewise.
+ * loop-iv.c (check_iv_ref_table_size, analyzed_for_bivness_p,
+ altered_reg_used, mark_altered): Likewise.
+ * loop-unroll.c (si_info_eq, ve_info_eq, allocate_basic_variable,
+ insert_var_expansion_initialization,
+ combine_var_copies_in_loop_exit, apply_opt_in_copies,
+ release_var_copies): Likewise.
+ * matrix-reorg.c (mat_acc_phi_hash, mat_acc_phi_eq, mtt_info_eq,
+ analyze_matrix_decl, add_allocation_site, analyze_transpose,
+ analyze_accesses_for_phi_node, check_var_notmodified_p,
+ check_allocation_function, find_sites_in_func,
+ record_all_accesses_in_func, transform_access_sites,
+ transform_allocation_sites): Likewise.
+ * omp-low.c (new_omp_region, create_omp_child_function_name,
+ check_omp_nesting_restrictions, check_combined_parallel,
+ lower_omp_2, diagnose_sb_1, diagnose_sb_2): Likewise.
+ * optabs.c (no_conflict_move_test, gen_libfunc, gen_fp_libfunc,
+ gen_intv_fp_libfunc, gen_interclass_conv_libfunc,
+ gen_intraclass_conv_libfunc, set_optab_libfunc, set_conv_libfunc):
+ Likewise.
+ * opts-common.c (prune_options): Likewise.
+ * opts.c (add_input_filename, print_filtered_help,
+ get_option_state): Likewise.
+ * params.c (add_params): Likewise.
+ * passes.c (set_pass_for_id, next_pass_1,
+ do_per_function_toporder, pass_fini_dump_file): Likewise.
+ * postreload.c (reload_cse_simplify_operands): Likewise.
+ * predict.c (tree_predicted_by_p, tree_predict_edge,
+ clear_bb_predictions, combine_predictions_for_bb): Likewise.
+
2008-06-25 Kaveh R. Ghazi <ghazi@caip.rutgers.edu>
* ra.h (add_neighbor): Fix -Wc++-compat and/or -Wcast-qual
/* Returns the description of caches for an AMD processor. */
-static char *
+static const char *
detect_caches_amd (unsigned max_ext_level)
{
unsigned eax, ebx, ecx, edx;
unsigned l2_sizekb, l2_line, l2_assoc;
if (max_ext_level < 0x80000005)
- return (char *) "";
+ return "";
__cpuid (0x80000005, eax, ebx, ecx, edx);
/* Returns the description of caches for an intel processor. */
-static char *
+static const char *
detect_caches_intel (unsigned max_level, unsigned max_ext_level)
{
unsigned eax, ebx, ecx, edx;
unsigned l2_sizekb = 0, l2_line = 0, l2_assoc = 0;
if (max_level < 2)
- return (char *) "";
+ return "";
__cpuid (2, eax, ebx, ecx, edx);
&l2_sizekb, &l2_line, &l2_assoc);
if (!l1_sizekb)
- return (char *) "";
+ return "";
/* Newer Intel CPUs are equipped with AMD style L2 cache info */
if (max_ext_level >= 0x80000006)
if (!arch)
{
- if (vendor == *(unsigned int*) "Auth")
+ if (vendor == *(const unsigned int*) "Auth")
cache = detect_caches_amd (ext_level);
- else if (vendor == *(unsigned int*) "Genu")
+ else if (vendor == *(const unsigned int*) "Genu")
cache = detect_caches_intel (max_level, ext_level);
}
- if (vendor == *(unsigned int*) "Auth")
+ if (vendor == *(const unsigned int*) "Auth")
{
processor = PROCESSOR_PENTIUM;
if (has_sse4a)
processor = PROCESSOR_AMDFAM10;
}
- else if (vendor == *(unsigned int*) "Geod")
+ else if (vendor == *(const unsigned int*) "Geod")
processor = PROCESSOR_GEODE;
else
{
if (*cti->base)
{
ggc_set_mark (*cti->base);
- htab_traverse_noresize (*cti->base, ggc_htab_delete, (void *) cti);
+ htab_traverse_noresize (*cti->base, ggc_htab_delete,
+ CONST_CAST (void *, (const void *)cti));
ggc_set_mark ((*cti->base)->entries);
}
return 0;
}
- *slot = xcalloc (sizeof (struct ptr_data), 1);
+ *slot = XCNEW (struct ptr_data);
(*slot)->obj = obj;
(*slot)->note_ptr_fn = note_ptr_fn;
(*slot)->note_ptr_cookie = note_ptr_cookie;
if (note_ptr_fn == gt_pch_p_S)
- (*slot)->size = strlen (obj) + 1;
+ (*slot)->size = strlen ((const char *)obj) + 1;
else
(*slot)->size = ggc_get_size (obj);
(*slot)->type = type;
if (obj == NULL || obj == (void *) 1)
return;
- data = htab_find_with_hash (saving_htab, obj, POINTER_HASH (obj));
+ data = (struct ptr_data *)
+ htab_find_with_hash (saving_htab, obj, POINTER_HASH (obj));
gcc_assert (data && data->note_ptr_cookie == note_ptr_cookie);
data->reorder_fn = reorder_fn;
if (*ptr == NULL || *ptr == (void *)1)
return;
- result = htab_find_with_hash (saving_htab, *ptr, POINTER_HASH (*ptr));
+ result = (struct ptr_data *)
+ htab_find_with_hash (saving_htab, *ptr, POINTER_HASH (*ptr));
gcc_assert (result);
*ptr = result->new_addr;
}
}
else
{
- new_ptr = htab_find_with_hash (saving_htab, ptr,
- POINTER_HASH (ptr));
+ new_ptr = (struct ptr_data *)
+ htab_find_with_hash (saving_htab, ptr, POINTER_HASH (ptr));
if (fwrite (&new_ptr->new_addr, sizeof (void *), 1, state->f)
!= 1)
fatal_error ("can't write PCH file: %m");
if (this_object_size < state.ptrs[i]->size)
{
this_object_size = state.ptrs[i]->size;
- this_object = xrealloc (this_object, this_object_size);
+ this_object = XRESIZEVAR (char, this_object, this_object_size);
}
memcpy (this_object, state.ptrs[i]->obj, state.ptrs[i]->size);
if (state.ptrs[i]->reorder_fn != NULL)
if (G.depth_in_use >= G.depth_max)
{
G.depth_max *= 2;
- G.depth = xrealloc (G.depth, G.depth_max * sizeof (unsigned int));
+ G.depth = XRESIZEVEC (unsigned int, G.depth, G.depth_max);
}
G.depth[G.depth_in_use++] = i;
}
if (G.by_depth_in_use >= G.by_depth_max)
{
G.by_depth_max *= 2;
- G.by_depth = xrealloc (G.by_depth,
- G.by_depth_max * sizeof (page_entry *));
- G.save_in_use = xrealloc (G.save_in_use,
- G.by_depth_max * sizeof (unsigned long *));
+ G.by_depth = XRESIZEVEC (page_entry *, G.by_depth, G.by_depth_max);
+ G.save_in_use = XRESIZEVEC (unsigned long *, G.save_in_use,
+ G.by_depth_max);
}
G.by_depth[G.by_depth_in_use] = p;
G.save_in_use[G.by_depth_in_use++] = s;
alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size)
{
#ifdef HAVE_MMAP_ANON
- char *page = mmap (pref, size, PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
#endif
#ifdef HAVE_MMAP_DEV_ZERO
- char *page = mmap (pref, size, PROT_READ | PROT_WRITE,
- MAP_PRIVATE, G.dev_zero_fd, 0);
+ char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE, G.dev_zero_fd, 0);
#endif
if (page == (char *) MAP_FAILED)
memory order. */
for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--)
{
- e = xcalloc (1, page_entry_size);
+ e = XCNEWVAR (struct page_entry, page_entry_size);
e->order = order;
e->bytes = G.pagesize;
e->page = page + (i << G.lg_pagesize);
#endif
if (entry == NULL)
- entry = xcalloc (1, page_entry_size);
+ entry = XCNEWVAR (struct page_entry, page_entry_size);
entry->bytes = entry_size;
entry->page = page;
a STRING_CST. */
gcc_assert (offset == offsetof (struct tree_string, str));
p = ((const char *) p) - offset;
- gt_ggc_mx_lang_tree_node ((void *) p);
+ gt_ggc_mx_lang_tree_node (CONST_CAST (void *, p));
return;
}
if (p->context_depth < G.context_depth)
{
if (! save_in_use_p (p))
- save_in_use_p (p) = xmalloc (bitmap_size);
+ save_in_use_p (p) = XNEWVAR (unsigned long, bitmap_size);
memcpy (save_in_use_p (p), p->in_use_p, bitmap_size);
}
{
struct ggc_pch_ondisk d;
unsigned i;
- char *offs = addr;
+ char *offs = (char *) addr;
unsigned long count_old_page_tables;
unsigned long count_new_page_tables;
bytes = ROUND_UP (d.totals[i] * OBJECT_SIZE (i), G.pagesize);
num_objs = bytes / OBJECT_SIZE (i);
- entry = xcalloc (1, (sizeof (struct page_entry)
- - sizeof (long)
- + BITMAP_SIZE (num_objs + 1)));
+ entry = XCNEWVAR (struct page_entry, (sizeof (struct page_entry)
+ - sizeof (long)
+ + BITMAP_SIZE (num_objs + 1)));
entry->bytes = bytes;
entry->page = offs;
entry->context_depth = 0;
/* Like regs_ever_live, but 1 if a reg is set or clobbered from an asm.
Unlike regs_ever_live, elements of this array corresponding to
eliminable regs like the frame pointer are set if an asm sets them. */
- char *regs_asm_clobbered = alloca (FIRST_PSEUDO_REGISTER * sizeof (char));
+ char *regs_asm_clobbered = XALLOCAVEC (char, FIRST_PSEUDO_REGISTER);
#ifdef ELIMINABLE_REGS
static const struct {const int from, to; } eliminables[] = ELIMINABLE_REGS;
rtx tmp_rtx;
size_t namelen = strlen (base);
size_t extlen = strlen (graph_ext[graph_dump_format]) + 1;
- char *buf = alloca (namelen + extlen);
+ char *buf = XALLOCAVEC (char, namelen + extlen);
FILE *fp;
if (basic_block_info == NULL)
{
size_t namelen = strlen (base);
size_t extlen = strlen (graph_ext[graph_dump_format]) + 1;
- char *buf = alloca (namelen + extlen);
+ char *buf = XALLOCAVEC (char, namelen + extlen);
FILE *fp;
memcpy (buf, base, namelen);
{
size_t namelen = strlen (base);
size_t extlen = strlen (graph_ext[graph_dump_format]) + 1;
- char *buf = alloca (namelen + extlen);
+ char *buf = XALLOCAVEC (char, namelen + extlen);
FILE *fp;
memcpy (buf, base, namelen);
q_ptr = 0;
q_size = 0;
- insn_queue = alloca ((max_insn_queue_index + 1) * sizeof (rtx));
+ insn_queue = XALLOCAVEC (rtx, max_insn_queue_index + 1);
memset (insn_queue, 0, (max_insn_queue_index + 1) * sizeof (rtx));
/* Start just before the beginning of time. */
pseudos which do not cross calls. */
int new_max_uid = get_max_uid () + 1;
- h_i_d = xrecalloc (h_i_d, new_max_uid, old_max_uid, sizeof (*h_i_d));
+ h_i_d = (struct haifa_insn_data *)
+ xrecalloc (h_i_d, new_max_uid, old_max_uid, sizeof (*h_i_d));
old_max_uid = new_max_uid;
if (targetm.sched.h_i_d_extended)
readyp->veclen = rgn_n_insns + n_new_insns + 1 + issue_rate;
readyp->vec = XRESIZEVEC (rtx, readyp->vec, readyp->veclen);
- ready_try = xrecalloc (ready_try, rgn_n_insns + n_new_insns + 1,
- rgn_n_insns + 1, sizeof (char));
+ ready_try = (char *) xrecalloc (ready_try, rgn_n_insns + n_new_insns + 1,
+ rgn_n_insns + 1, sizeof (char));
rgn_n_insns += n_new_insns;
if (first == last)
return;
- bb_header = xmalloc (last_basic_block * sizeof (*bb_header));
+ bb_header = XNEWVEC (rtx, last_basic_block);
/* Make a sentinel. */
if (last->next_bb != EXIT_BLOCK_PTR)
ivs = crtl->hard_reg_initial_vals;
if (ivs == 0)
{
- ivs = ggc_alloc (sizeof (initial_value_struct));
+ ivs = GGC_NEW (initial_value_struct);
ivs->num_entries = 0;
ivs->max_entries = 5;
- ivs->entries = ggc_alloc (5 * sizeof (initial_value_pair));
+ ivs->entries = GGC_NEWVEC (initial_value_pair, 5);
crtl->hard_reg_initial_vals = ivs;
}
if (ivs->num_entries >= ivs->max_entries)
{
ivs->max_entries += 5;
- ivs->entries = ggc_realloc (ivs->entries,
- ivs->max_entries
- * sizeof (initial_value_pair));
+ ivs->entries = GGC_RESIZEVEC (initial_value_pair, ivs->entries,
+ ivs->max_entries);
}
ivs->entries[ivs->num_entries].hard_reg = gen_rtx_REG (mode, regno);
{
struct ipa_func_list *temp;
- temp = xcalloc (1, sizeof (struct ipa_func_list));
+ temp = XCNEW (struct ipa_func_list);
temp->node = mt;
temp->next = *wl;
*wl = temp;
appropriate new name for the new variable. */
old_name = IDENTIFIER_POINTER (DECL_NAME (orig_decl));
- prefix = alloca (strlen (old_name) + 1);
+ prefix = XALLOCAVEC (char, strlen (old_name) + 1);
strcpy (prefix, old_name);
ASM_FORMAT_PRIVATE_NAME (new_name, prefix, i);
return get_identifier (new_name);
ASM_FORMAT_PRIVATE_NAME(tmp_name, "struct", str_num);
len = strlen (tmp_name ? tmp_name : orig_name) + strlen ("_sub");
- prefix = alloca (len + 1);
+ prefix = XALLOCAVEC (char, len + 1);
memcpy (prefix, tmp_name ? tmp_name : orig_name,
strlen (tmp_name ? tmp_name : orig_name));
strcpy (prefix + strlen (tmp_name ? tmp_name : orig_name), "_sub");
bitmap cleared_regs;
reg_equiv = XCNEWVEC (struct equivalence, max_regno);
- reg_equiv_init = ggc_alloc_cleared (max_regno * sizeof (rtx));
+ reg_equiv_init = GGC_CNEWVEC (rtx, max_regno);
reg_equiv_init_size = max_regno;
init_alias_analysis ();
if (invariant_table_size < DF_DEFS_TABLE_SIZE())
{
unsigned int new_size = DF_DEFS_TABLE_SIZE () + (DF_DEFS_TABLE_SIZE () / 4);
- invariant_table = xrealloc (invariant_table,
- sizeof (struct rtx_iv *) * new_size);
+ invariant_table = XRESIZEVEC (struct invariant *, invariant_table, new_size);
memset (&invariant_table[invariant_table_size], 0,
(new_size - invariant_table_size) * sizeof (struct rtx_iv *));
invariant_table_size = new_size;
static hashval_t
hash_invariant_expr (const void *e)
{
- const struct invariant_expr_entry *entry = e;
+ const struct invariant_expr_entry *const entry =
+ (const struct invariant_expr_entry *) e;
return entry->hash;
}
static int
eq_invariant_expr (const void *e1, const void *e2)
{
- const struct invariant_expr_entry *entry1 = e1;
- const struct invariant_expr_entry *entry2 = e2;
+ const struct invariant_expr_entry *const entry1 =
+ (const struct invariant_expr_entry *) e1;
+ const struct invariant_expr_entry *const entry2 =
+ (const struct invariant_expr_entry *) e2;
if (entry1->mode != entry2->mode)
return 0;
pentry.inv = inv;
pentry.mode = mode;
slot = htab_find_slot_with_hash (eq, &pentry, hash, INSERT);
- entry = *slot;
+ entry = (struct invariant_expr_entry *) *slot;
if (entry)
return entry->inv;
if (iv_ref_table_size < DF_DEFS_TABLE_SIZE())
{
unsigned int new_size = DF_DEFS_TABLE_SIZE () + (DF_DEFS_TABLE_SIZE () / 4);
- iv_ref_table = xrealloc (iv_ref_table,
- sizeof (struct rtx_iv *) * new_size);
+ iv_ref_table = XRESIZEVEC (struct rtx_iv *, iv_ref_table, new_size);
memset (&iv_ref_table[iv_ref_table_size], 0,
(new_size - iv_ref_table_size) * sizeof (struct rtx_iv *));
iv_ref_table_size = new_size;
static bool
analyzed_for_bivness_p (rtx def, struct rtx_iv *iv)
{
- struct biv_entry *biv = htab_find_with_hash (bivs, def, REGNO (def));
+ struct biv_entry *biv =
+ (struct biv_entry *) htab_find_with_hash (bivs, def, REGNO (def));
if (!biv)
return false;
if (!REG_P (*reg))
return 0;
- return REGNO_REG_SET_P (alt, REGNO (*reg));
+ return REGNO_REG_SET_P ((bitmap) alt, REGNO (*reg));
}
/* Marks registers altered by EXPR in set ALT. */
if (!REG_P (expr))
return;
- SET_REGNO_REG_SET (alt, REGNO (expr));
+ SET_REGNO_REG_SET ((bitmap) alt, REGNO (expr));
}
/* Checks whether RHS is simple enough to process. */
static int
si_info_eq (const void *ivts1, const void *ivts2)
{
- const struct iv_to_split *i1 = ivts1;
- const struct iv_to_split *i2 = ivts2;
+ const struct iv_to_split *const i1 = (const struct iv_to_split *) ivts1;
+ const struct iv_to_split *const i2 = (const struct iv_to_split *) ivts2;
return i1->insn == i2->insn;
}
static int
ve_info_eq (const void *ivts1, const void *ivts2)
{
- const struct var_to_expand *i1 = ivts1;
- const struct var_to_expand *i2 = ivts2;
+ const struct var_to_expand *const i1 = (const struct var_to_expand *) ivts1;
+ const struct var_to_expand *const i2 = (const struct var_to_expand *) ivts2;
return i1->insn == i2->insn;
}
static int
allocate_basic_variable (void **slot, void *data ATTRIBUTE_UNUSED)
{
- struct iv_to_split *ivts = *slot;
+ struct iv_to_split *ivts = (struct iv_to_split *) *slot;
rtx expr = *get_ivts_expr (single_set (ivts->insn), ivts);
ivts->base_var = gen_reg_rtx (GET_MODE (expr));
static int
insert_var_expansion_initialization (void **slot, void *place_p)
{
- struct var_to_expand *ve = *slot;
+ struct var_to_expand *ve = (struct var_to_expand *) *slot;
basic_block place = (basic_block)place_p;
rtx seq, var, zero_init, insn;
unsigned i;
static int
combine_var_copies_in_loop_exit (void **slot, void *place_p)
{
- struct var_to_expand *ve = *slot;
+ struct var_to_expand *ve = (struct var_to_expand *) *slot;
basic_block place = (basic_block)place_p;
rtx sum = ve->reg;
rtx expr, seq, var, insn;
/* Apply splitting iv optimization. */
if (opt_info->insns_to_split)
{
- ivts = htab_find (opt_info->insns_to_split, &ivts_templ);
+ ivts = (struct iv_to_split *)
+ htab_find (opt_info->insns_to_split, &ivts_templ);
if (ivts)
{
/* Apply variable expansion optimization. */
if (unrolling && opt_info->insns_with_var_to_expand)
{
- ves = htab_find (opt_info->insns_with_var_to_expand, &ve_templ);
+ ves = (struct var_to_expand *)
+ htab_find (opt_info->insns_with_var_to_expand, &ve_templ);
if (ves)
{
gcc_assert (GET_CODE (PATTERN (insn))
ivts_templ.insn = orig_insn;
if (opt_info->insns_to_split)
{
- ivts = htab_find (opt_info->insns_to_split, &ivts_templ);
+ ivts = (struct iv_to_split *)
+ htab_find (opt_info->insns_to_split, &ivts_templ);
if (ivts)
{
if (!delta)
static int
release_var_copies (void **slot, void *data ATTRIBUTE_UNUSED)
{
- struct var_to_expand *ve = *slot;
+ struct var_to_expand *ve = (struct var_to_expand *) *slot;
VEC_free (rtx, heap, ve->var_expansions);
static hashval_t
mat_acc_phi_hash (const void *p)
{
- const struct matrix_access_phi_node *ma_phi = p;
+ const struct matrix_access_phi_node *const ma_phi =
+ (const struct matrix_access_phi_node *) p;
return htab_hash_pointer (ma_phi->phi);
}
static int
mat_acc_phi_eq (const void *p1, const void *p2)
{
- const struct matrix_access_phi_node *phi1 = p1;
- const struct matrix_access_phi_node *phi2 = p2;
+ const struct matrix_access_phi_node *const phi1 =
+ (const struct matrix_access_phi_node *) p1;
+ const struct matrix_access_phi_node *const phi2 =
+ (const struct matrix_access_phi_node *) p2;
if (phi1->phi == phi2->phi)
return 1;
static int
mtt_info_eq (const void *mtt1, const void *mtt2)
{
- const struct matrix_info *i1 = mtt1;
- const struct matrix_info *i2 = mtt2;
+ const struct matrix_info *const i1 = (const struct matrix_info *) mtt1;
+ const struct matrix_info *const i2 = (const struct matrix_info *) mtt2;
if (i1->decl == i2->decl)
return true;
/* Check to see if this pointer is already in there. */
tmpmi.decl = var_decl;
- mi = htab_find (matrices_to_reorg, &tmpmi);
+ mi = (struct matrix_info *) htab_find (matrices_to_reorg, &tmpmi);
if (mi)
return NULL;
calls like calloc and realloc. */
if (!mi->malloc_for_level)
{
- mi->malloc_for_level = xcalloc (level + 1, sizeof (tree));
+ mi->malloc_for_level = XCNEWVEC (tree, level + 1);
mi->max_malloced_level = level + 1;
}
else if (mi->max_malloced_level <= level)
{
mi->malloc_for_level
- = xrealloc (mi->malloc_for_level, (level + 1) * sizeof (tree));
+ = XRESIZEVEC (tree, mi->malloc_for_level, level + 1);
/* Zero the newly allocated items. */
memset (&(mi->malloc_for_level[mi->max_malloced_level + 1]),
static int
analyze_transpose (void **slot, void *data ATTRIBUTE_UNUSED)
{
- struct matrix_info *mi = *slot;
+ struct matrix_info *mi = (struct matrix_info *) *slot;
int min_escape_l = mi->min_indirect_level_escape;
struct loop *loop;
affine_iv iv;
struct matrix_access_phi_node tmp_maphi, *maphi, **pmaphi;
tmp_maphi.phi = use_stmt;
- if ((maphi = htab_find (htab_mat_acc_phi_nodes, &tmp_maphi)))
+ if ((maphi = (struct matrix_access_phi_node *)
+ htab_find (htab_mat_acc_phi_nodes, &tmp_maphi)))
{
if (maphi->indirection_level == current_indirect_level)
return;
{
basic_block bb;
tree t = *tp;
- tree fn = data;
+ tree fn = (tree) data;
block_stmt_iterator bsi;
tree stmt;
int level;
block_stmt_iterator bsi;
basic_block bb_level_0;
- struct matrix_info *mi = *slot;
+ struct matrix_info *mi = (struct matrix_info *) *slot;
sbitmap visited;
if (!mi->malloc_for_level)
&& TREE_CODE (GIMPLE_STMT_OPERAND (stmt, 0)) == VAR_DECL)
{
tmpmi.decl = GIMPLE_STMT_OPERAND (stmt, 0);
- if ((mi = htab_find (matrices_to_reorg, &tmpmi)))
+ if ((mi = (struct matrix_info *) htab_find (matrices_to_reorg,
+ &tmpmi)))
{
sbitmap_zero (visited_stmts_1);
analyze_matrix_allocation_site (mi, stmt, 0, visited_stmts_1);
&& TREE_CODE (GIMPLE_STMT_OPERAND (stmt, 1)) == VAR_DECL)
{
tmpmi.decl = GIMPLE_STMT_OPERAND (stmt, 1);
- if ((mi = htab_find (matrices_to_reorg, &tmpmi)))
+ if ((mi = (struct matrix_info *) htab_find (matrices_to_reorg,
+ &tmpmi)))
{
sbitmap_zero (visited_stmts_1);
analyze_matrix_accesses (mi,
chain for this SSA_VAR and check for escapes or apply the
flattening. */
tmpmi.decl = rhs;
- if ((mi = htab_find (matrices_to_reorg, &tmpmi)))
+ if ((mi = (struct matrix_info *) htab_find (matrices_to_reorg, &tmpmi)))
{
/* This variable will track the visited PHI nodes, so we can limit
its size to the maximum number of SSA names. */
transform_access_sites (void **slot, void *data ATTRIBUTE_UNUSED)
{
block_stmt_iterator bsi;
- struct matrix_info *mi = *slot;
+ struct matrix_info *mi = (struct matrix_info *) *slot;
int min_escape_l = mi->min_indirect_level_escape;
struct access_site_info *acc_info;
int i;
int min_escape_l;
int id;
- mi = *slot;
+ mi = (struct matrix_info *) *slot;
min_escape_l = mi->min_indirect_level_escape;
static int
dump_matrix_reorg_analysis (void **slot, void *data ATTRIBUTE_UNUSED)
{
- struct matrix_info *mi = *slot;
+ struct matrix_info *mi = (struct matrix_info *) *slot;
if (!dump_file)
return 1;
struct omp_region *
new_omp_region (basic_block bb, enum tree_code type, struct omp_region *parent)
{
- struct omp_region *region = xcalloc (1, sizeof (*region));
+ struct omp_region *region = XCNEW (struct omp_region);
region->outer = parent;
region->entry = bb;
const char *suffix;
suffix = task_copy ? "_omp_cpyfn" : "_omp_fn";
- prefix = alloca (len + strlen (suffix) + 1);
+ prefix = XALLOCAVEC (char, len + strlen (suffix) + 1);
memcpy (prefix, IDENTIFIER_POINTER (name), len);
strcpy (prefix + len, suffix);
#ifndef NO_DOT_IN_LABEL
static tree
scan_omp_1 (tree *tp, int *walk_subtrees, void *data)
{
- struct walk_stmt_info *wi = data;
- omp_context *ctx = wi->info;
+ struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
+ omp_context *ctx = (omp_context *) wi->info;
tree t = *tp;
if (EXPR_HAS_LOCATION (t))
static tree
check_combined_parallel (tree *tp, int *walk_subtrees, void *data)
{
- struct walk_stmt_info *wi = data;
- int *info = wi->info;
+ struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
+ int *info = (int *) wi->info;
*walk_subtrees = 0;
switch (TREE_CODE (*tp))
lower_omp_2 (tree *tp, int *walk_subtrees, void *data)
{
tree t = *tp;
- omp_context *ctx = data;
+ omp_context *ctx = (omp_context *) data;
/* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
if (TREE_CODE (t) == VAR_DECL
static tree
diagnose_sb_1 (tree *tp, int *walk_subtrees, void *data)
{
- struct walk_stmt_info *wi = data;
+ struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
tree context = (tree) wi->info;
tree inner_context;
tree t = *tp;
static tree
diagnose_sb_2 (tree *tp, int *walk_subtrees, void *data)
{
- struct walk_stmt_info *wi = data;
+ struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
tree context = (tree) wi->info;
splay_tree_node n;
tree t = *tp;
static void
no_conflict_move_test (rtx dest, const_rtx set, void *p0)
{
- struct no_conflict_data *p= p0;
+ struct no_conflict_data *p= (struct no_conflict_data *) p0;
/* If this inns directly contributes to setting the target, it must stay. */
if (reg_overlap_mentioned_p (p->target, dest))
unsigned opname_len = strlen (opname);
const char *mname = GET_MODE_NAME (mode);
unsigned mname_len = strlen (mname);
- char *libfunc_name = alloca (2 + opname_len + mname_len + 1 + 1);
+ char *libfunc_name = XALLOCAVEC (char, 2 + opname_len + mname_len + 1 + 1);
char *p;
const char *q;
gen_libfunc (optable, opname, suffix, mode);
if (DECIMAL_FLOAT_MODE_P (mode))
{
- dec_opname = alloca (sizeof (DECIMAL_PREFIX) + strlen (opname));
+ dec_opname = XALLOCAVEC (char, sizeof (DECIMAL_PREFIX) + strlen (opname));
/* For BID support, change the name to have either a bid_ or dpd_ prefix
depending on the low level floating format used. */
memcpy (dec_opname, DECIMAL_PREFIX, sizeof (DECIMAL_PREFIX) - 1);
if (GET_MODE_CLASS (mode) == MODE_INT)
{
int len = strlen (name);
- char *v_name = alloca (len + 2);
+ char *v_name = XALLOCAVEC (char, len + 2);
strcpy (v_name, name);
v_name[len] = 'v';
v_name[len + 1] = 0;
mname_len = strlen (GET_MODE_NAME (tmode)) + strlen (GET_MODE_NAME (fmode));
- nondec_name = alloca (2 + opname_len + mname_len + 1 + 1);
+ nondec_name = XALLOCAVEC (char, 2 + opname_len + mname_len + 1 + 1);
nondec_name[0] = '_';
nondec_name[1] = '_';
memcpy (&nondec_name[2], opname, opname_len);
nondec_suffix = nondec_name + opname_len + 2;
- dec_name = alloca (2 + dec_len + opname_len + mname_len + 1 + 1);
+ dec_name = XALLOCAVEC (char, 2 + dec_len + opname_len + mname_len + 1 + 1);
dec_name[0] = '_';
dec_name[1] = '_';
memcpy (&dec_name[2], DECIMAL_PREFIX, dec_len);
mname_len = strlen (GET_MODE_NAME (tmode)) + strlen (GET_MODE_NAME (fmode));
- nondec_name = alloca (2 + opname_len + mname_len + 1 + 1);
+ nondec_name = XALLOCAVEC (char, 2 + opname_len + mname_len + 1 + 1);
nondec_name[0] = '_';
nondec_name[1] = '_';
memcpy (&nondec_name[2], opname, opname_len);
nondec_suffix = nondec_name + opname_len + 2;
- dec_name = alloca (2 + dec_len + opname_len + mname_len + 1 + 1);
+ dec_name = XALLOCAVEC (char, 2 + dec_len + opname_len + mname_len + 1 + 1);
dec_name[0] = '_';
dec_name[1] = '_';
memcpy (&dec_name[2], DECIMAL_PREFIX, dec_len);
val = 0;
slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, INSERT);
if (*slot == NULL)
- *slot = ggc_alloc (sizeof (struct libfunc_entry));
+ *slot = GGC_NEW (struct libfunc_entry);
(*slot)->optab = (size_t) (optable - &optab_table[0]);
(*slot)->mode1 = mode;
(*slot)->mode2 = VOIDmode;
val = 0;
slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, INSERT);
if (*slot == NULL)
- *slot = ggc_alloc (sizeof (struct libfunc_entry));
+ *slot = GGC_NEW (struct libfunc_entry);
(*slot)->optab = (size_t) (optable - &convert_optab_table[0]);
(*slot)->mode1 = tmode;
(*slot)->mode2 = fmode;
prune_options (int *argcp, char ***argvp)
{
int argc = *argcp;
- int *options = xmalloc (argc * sizeof (*options));
- char **argv = xmalloc (argc * sizeof (char *));
+ int *options = XNEWVEC (int, argc);
+ char **argv = XNEWVEC (char *, argc);
int i, arg_count, need_prune = 0;
const struct cl_option *option;
size_t opt_index;
add_input_filename (const char *filename)
{
num_in_fnames++;
- in_fnames = xrealloc (in_fnames, num_in_fnames * sizeof (in_fnames[0]));
+ in_fnames = XRESIZEVEC (const char *, in_fnames, num_in_fnames);
in_fnames[num_in_fnames - 1] = filename;
}
}
if (!printed)
- printed = xcalloc (1, cl_options_count);
+ printed = XCNEWVAR (char, cl_options_count);
for (i = 0; i < cl_options_count; i++)
{
state->data = *(const char **) cl_options[option].flag_var;
if (state->data == 0)
state->data = "";
- state->size = strlen (state->data) + 1;
+ state->size = strlen ((const char *) state->data) + 1;
break;
}
return true;
add_params (const param_info params[], size_t n)
{
/* Allocate enough space for the new parameters. */
- compiler_params = xrealloc (compiler_params,
- (num_compiler_params + n) * sizeof (param_info));
+ compiler_params = XRESIZEVEC (param_info, compiler_params,
+ num_compiler_params + n);
/* Copy them into the table. */
memcpy (compiler_params + num_compiler_params,
params,
pass->static_pass_number = id;
if (passes_by_id_size <= id)
{
- passes_by_id = xrealloc (passes_by_id, (id + 1) * sizeof (void *));
+ passes_by_id = XRESIZEVEC (struct opt_pass *, passes_by_id, id + 1);
memset (passes_by_id + passes_by_id_size, 0,
(id + 1 - passes_by_id_size) * sizeof (void *));
passes_by_id_size = id + 1;
{
struct opt_pass *new;
- new = xmalloc (sizeof (*new));
+ new = XNEW (struct opt_pass);
memcpy (new, pass, sizeof (*new));
new->next = NULL;
else
{
gcc_assert (!order);
- order = ggc_alloc (sizeof (*order) * cgraph_n_nodes);
+ order = GGC_NEWVEC (struct cgraph_node *, cgraph_n_nodes);
nnodes = cgraph_postorder (order);
for (i = nnodes - 1; i >= 0; i--)
{
static void
update_properties_after_pass (void *data)
{
- struct opt_pass *pass = data;
+ struct opt_pass *pass = (struct opt_pass *) data;
cfun->curr_properties = (cfun->curr_properties | pass->properties_provided)
& ~pass->properties_destroyed;
}
if (! constrain_operands (1))
fatal_insn_not_found (insn);
- alternative_reject = alloca (recog_data.n_alternatives * sizeof (int));
- alternative_nregs = alloca (recog_data.n_alternatives * sizeof (int));
- alternative_order = alloca (recog_data.n_alternatives * sizeof (int));
+ alternative_reject = XALLOCAVEC (int, recog_data.n_alternatives);
+ alternative_nregs = XALLOCAVEC (int, recog_data.n_alternatives);
+ alternative_order = XALLOCAVEC (int, recog_data.n_alternatives);
memset (alternative_reject, 0, recog_data.n_alternatives * sizeof (int));
memset (alternative_nregs, 0, recog_data.n_alternatives * sizeof (int));
int regno;
const char *p;
- op_alt_regno[i] = alloca (recog_data.n_alternatives * sizeof (int));
+ op_alt_regno[i] = XALLOCAVEC (int, recog_data.n_alternatives);
for (j = 0; j < recog_data.n_alternatives; j++)
op_alt_regno[i][j] = -1;
if (!preds)
return false;
- for (i = *preds; i; i = i->ep_next)
+ for (i = (struct edge_prediction *) *preds; i; i = i->ep_next)
if (i->ep_predictor == predictor)
return true;
return false;
struct edge_prediction *i = XNEW (struct edge_prediction);
void **preds = pointer_map_insert (bb_predictions, e->src);
- i->ep_next = *preds;
+ i->ep_next = (struct edge_prediction *) *preds;
*preds = i;
i->ep_probability = probability;
i->ep_predictor = predictor;
if (!preds)
return;
- for (pred = *preds; pred; pred = next)
+ for (pred = (struct edge_prediction *) *preds; pred; pred = next)
{
next = pred->ep_next;
free (pred);
{
/* We implement "first match" heuristics and use probability guessed
by predictor with smallest index. */
- for (pred = *preds; pred; pred = pred->ep_next)
+ for (pred = (struct edge_prediction *) *preds; pred; pred = pred->ep_next)
{
int predictor = pred->ep_predictor;
int probability = pred->ep_probability;
if (preds)
{
- for (pred = *preds; pred; pred = pred->ep_next)
+ for (pred = (struct edge_prediction *) *preds; pred; pred = pred->ep_next)
{
int predictor = pred->ep_predictor;
int probability = pred->ep_probability;