+2015-10-01 Trevor Saunders <tbsaunde+gcc@tbsaunde.org>
+
+ * cfganal.c, compare-elim.c, coverage.c, cprop.c, df-scan.c,
+ function.c, read-rtl.c, statistics.c, trans-mem.c, tree-if-conv.c,
+ tree-into-ssa.c, tree-loop-distribution.c, tree-ssa-coalesce.c,
+ tree-ssa-loop-ivopts.c, tree-ssa-reassoc.c, tree-ssa-strlen.c,
+ tree-ssa-tail-merge.c, tree-vrp.c, var-tracking.c: Remove
+
2015-10-01 Marek Polacek <polacek@redhat.com>
PR c/65345
#include "timevar.h"
/* Store the data structures necessary for depth-first search. */
-struct depth_first_search_dsS {
+struct depth_first_search_ds {
/* stack for backtracking during the algorithm */
basic_block *stack;
/* record of basic blocks already seen by depth-first search */
sbitmap visited_blocks;
};
-typedef struct depth_first_search_dsS *depth_first_search_ds;
-static void flow_dfs_compute_reverse_init (depth_first_search_ds);
-static void flow_dfs_compute_reverse_add_bb (depth_first_search_ds,
+static void flow_dfs_compute_reverse_init (depth_first_search_ds *);
+static void flow_dfs_compute_reverse_add_bb (depth_first_search_ds *,
basic_block);
-static basic_block flow_dfs_compute_reverse_execute (depth_first_search_ds,
+static basic_block flow_dfs_compute_reverse_execute (depth_first_search_ds *,
basic_block);
-static void flow_dfs_compute_reverse_finish (depth_first_search_ds);
+static void flow_dfs_compute_reverse_finish (depth_first_search_ds *);
\f
/* Mark the back edges in DFS traversal.
Return nonzero if a loop (natural or otherwise) is present.
{
basic_block unvisited_block = EXIT_BLOCK_PTR_FOR_FN (cfun);
basic_block deadend_block;
- struct depth_first_search_dsS dfs_ds;
+ depth_first_search_ds dfs_ds;
/* Perform depth-first search in the reverse graph to find nodes
reachable from the exit block. */
element on the stack. */
static void
-flow_dfs_compute_reverse_init (depth_first_search_ds data)
+flow_dfs_compute_reverse_init (depth_first_search_ds *data)
{
/* Allocate stack for back-tracking up CFG. */
data->stack = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
block. */
static void
-flow_dfs_compute_reverse_add_bb (depth_first_search_ds data, basic_block bb)
+flow_dfs_compute_reverse_add_bb (depth_first_search_ds *data, basic_block bb)
{
data->stack[data->sp++] = bb;
bitmap_set_bit (data->visited_blocks, bb->index);
available. */
static basic_block
-flow_dfs_compute_reverse_execute (depth_first_search_ds data,
+flow_dfs_compute_reverse_execute (depth_first_search_ds *data,
basic_block last_unvisited)
{
basic_block bb;
reverse graph. */
static void
-flow_dfs_compute_reverse_finish (depth_first_search_ds data)
+flow_dfs_compute_reverse_finish (depth_first_search_ds *data)
{
free (data->stack);
sbitmap_free (data->visited_blocks);
bool inputs_valid;
};
-typedef struct comparison *comparison_struct_p;
-
-static vec<comparison_struct_p> all_compares;
+static vec<comparison *> all_compares;
/* Look for a "conforming" comparison, as defined above. If valid, return
the rtx for the COMPARE itself. */
};
/* Counts information for a function. */
-typedef struct counts_entry : pointer_hash <counts_entry>
+struct counts_entry : pointer_hash <counts_entry>
{
/* We hash by */
unsigned ident;
static inline hashval_t hash (const counts_entry *);
static int equal (const counts_entry *, const counts_entry *);
static void remove (counts_entry *);
-} counts_entry_t;
+};
static GTY(()) struct coverage_data *functions_head = 0;
static struct coverage_data **functions_tail = &functions_head;
}
else if (GCOV_TAG_IS_COUNTER (tag) && fn_ident)
{
- counts_entry_t **slot, *entry, elt;
+ counts_entry **slot, *entry, elt;
unsigned n_counts = GCOV_TAG_COUNTER_NUM (length);
unsigned ix;
entry = *slot;
if (!entry)
{
- *slot = entry = XCNEW (counts_entry_t);
+ *slot = entry = XCNEW (counts_entry);
entry->ident = fn_ident;
entry->ctr = elt.ctr;
entry->lineno_checksum = lineno_checksum;
unsigned cfg_checksum, unsigned lineno_checksum,
const struct gcov_ctr_summary **summary)
{
- counts_entry_t *entry, elt;
+ counts_entry *entry, elt;
/* No hash table, no counts. */
if (!counts_hash)
rtx_insn *insn;
};
-typedef struct cprop_occr *occr_t;
-
/* Hash table entry for assignment expressions. */
struct cprop_expr
#include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
-typedef struct df_mw_hardreg *df_mw_hardreg_ptr;
-
-
/* The set of hard registers in eliminables[i].from. */
static HARD_REG_SET elim_reg_set;
auto_vec<df_ref, 128> def_vec;
auto_vec<df_ref, 32> use_vec;
auto_vec<df_ref, 32> eq_use_vec;
- auto_vec<df_mw_hardreg_ptr, 32> mw_vec;
+ auto_vec<df_mw_hardreg *, 32> mw_vec;
};
static void df_ref_record (enum df_ref_class, struct df_collection_rec *,
bitmap_obstack insn_bitmaps;
};
-typedef struct df_scan_bb_info *df_scan_bb_info_t;
-
-
/* Internal function to shut down the scanning problem. */
static void
df_scan_free_internal (void)
/* Sort and compress a set of refs. */
static void
-df_sort_and_compress_mws (vec<df_mw_hardreg_ptr, va_heap> *mw_vec)
+df_sort_and_compress_mws (vec<df_mw_hardreg *, va_heap> *mw_vec)
{
unsigned int count;
struct df_scan_problem_data *problem_data
insn. */
static struct df_mw_hardreg *
-df_install_mws (const vec<df_mw_hardreg_ptr, va_heap> *old_vec)
+df_install_mws (const vec<df_mw_hardreg *, va_heap> *old_vec)
{
unsigned int count = old_vec->length ();
if (count)
/* Verify that NEW_REC and OLD_REC have exactly the same members. */
static bool
-df_mws_verify (const vec<df_mw_hardreg_ptr, va_heap> *new_rec,
+df_mws_verify (const vec<df_mw_hardreg *, va_heap> *new_rec,
struct df_mw_hardreg *old_rec,
bool abort_if_fail)
{
/* Stack of nested functions. */
/* Keep track of the cfun stack. */
-typedef struct function *function_p;
-
-static vec<function_p> function_context_stack;
+static vec<function *> function_context_stack;
/* Save the current context for compilation of a nested function.
This is called from language-specific code. */
/* Initialized with NOGC, making this poisonous to the garbage collector. */
-static vec<function_p> cfun_stack;
+static vec<function *> cfun_stack;
/* Push the current cfun onto the stack, and set cfun to new_cfun. Also set
current_function_decl accordingly. */
struct map_value *current_value;
};
-/* Vector definitions for the above. */
-typedef struct mapping *mapping_ptr;
-
/* A structure for abstracting the common parts of iterators. */
struct iterator_group {
/* Tables of "mapping" structures, one for attributes and one for
static struct iterator_group modes, codes, ints, substs;
/* All iterators used in the current rtx. */
-static vec<mapping_ptr> current_iterators;
+static vec<mapping *> current_iterators;
/* The list of all iterator uses in the current rtx. */
static vec<iterator_use> iterator_uses;
/* Statistics entry. A integer counter associated to a string ID
and value. */
-typedef struct statistics_counter_s {
+struct statistics_counter {
const char *id;
int val;
bool histogram_p;
unsigned HOST_WIDE_INT count;
unsigned HOST_WIDE_INT prev_dumped_count;
-} statistics_counter_t;
+};
/* Hashtable helpers. */
-struct stats_counter_hasher : pointer_hash <statistics_counter_t>
+struct stats_counter_hasher : pointer_hash <statistics_counter>
{
- static inline hashval_t hash (const statistics_counter_t *);
- static inline bool equal (const statistics_counter_t *,
- const statistics_counter_t *);
- static inline void remove (statistics_counter_t *);
+ static inline hashval_t hash (const statistics_counter *);
+ static inline bool equal (const statistics_counter *,
+ const statistics_counter *);
+ static inline void remove (statistics_counter *);
};
/* Hash a statistic counter by its string ID. */
inline hashval_t
-stats_counter_hasher::hash (const statistics_counter_t *c)
+stats_counter_hasher::hash (const statistics_counter *c)
{
return htab_hash_string (c->id) + c->val;
}
/* Compare two statistic counters by their string IDs. */
inline bool
-stats_counter_hasher::equal (const statistics_counter_t *c1,
- const statistics_counter_t *c2)
+stats_counter_hasher::equal (const statistics_counter *c1,
+ const statistics_counter *c2)
{
return c1->val == c2->val && strcmp (c1->id, c2->id) == 0;
}
/* Free a statistics entry. */
inline void
-stats_counter_hasher::remove (statistics_counter_t *v)
+stats_counter_hasher::remove (statistics_counter *v)
{
free (CONST_CAST (char *, v->id));
free (v);
since the last dump for the pass dump files. */
int
-statistics_fini_pass_1 (statistics_counter_t **slot,
+statistics_fini_pass_1 (statistics_counter **slot,
void *data ATTRIBUTE_UNUSED)
{
- statistics_counter_t *counter = *slot;
+ statistics_counter *counter = *slot;
unsigned HOST_WIDE_INT count = counter->count - counter->prev_dumped_count;
if (count == 0)
return 1;
since the last dump for the statistics dump. */
int
-statistics_fini_pass_2 (statistics_counter_t **slot,
+statistics_fini_pass_2 (statistics_counter **slot,
void *data ATTRIBUTE_UNUSED)
{
- statistics_counter_t *counter = *slot;
+ statistics_counter *counter = *slot;
unsigned HOST_WIDE_INT count = counter->count - counter->prev_dumped_count;
if (count == 0)
return 1;
/* Helper for statistics_fini_pass, reset the counters. */
int
-statistics_fini_pass_3 (statistics_counter_t **slot,
+statistics_fini_pass_3 (statistics_counter **slot,
void *data ATTRIBUTE_UNUSED)
{
- statistics_counter_t *counter = *slot;
+ statistics_counter *counter = *slot;
counter->prev_dumped_count = counter->count;
return 1;
}
/* Helper for printing summary information. */
int
-statistics_fini_1 (statistics_counter_t **slot, opt_pass *pass)
+statistics_fini_1 (statistics_counter **slot, opt_pass *pass)
{
- statistics_counter_t *counter = *slot;
+ statistics_counter *counter = *slot;
if (counter->count == 0)
return 1;
if (counter->histogram_p)
/* Lookup or add a statistics counter in the hashtable HASH with ID, VAL
and HISTOGRAM_P. */
-static statistics_counter_t *
+static statistics_counter *
lookup_or_add_counter (stats_counter_table_type *hash, const char *id, int val,
bool histogram_p)
{
- statistics_counter_t **counter;
- statistics_counter_t c;
+ statistics_counter **counter;
+ statistics_counter c;
c.id = id;
c.val = val;
counter = hash->find_slot (&c, INSERT);
if (!*counter)
{
- *counter = XNEW (struct statistics_counter_s);
+ *counter = XNEW (statistics_counter);
(*counter)->id = xstrdup (id);
(*counter)->val = val;
(*counter)->histogram_p = histogram_p;
void
statistics_counter_event (struct function *fn, const char *id, int incr)
{
- statistics_counter_t *counter;
+ statistics_counter *counter;
if ((!(dump_flags & TDF_STATS)
&& !statistics_dump_file)
void
statistics_histogram_event (struct function *fn, const char *id, int val)
{
- statistics_counter_t *counter;
+ statistics_counter *counter;
if (!(dump_flags & TDF_STATS)
&& !statistics_dump_file)
/* One individual log entry. We may have multiple statements for the
same location if neither dominate each other (on different
execution paths). */
-typedef struct tm_log_entry
+struct tm_log_entry
{
/* Address to save. */
tree addr;
save/restore sequence. Later, when generating the save sequence
we place the SSA temp generated here. */
tree save_var;
-} *tm_log_entry_t;
+};
/* Log entry hashtable helpers. */
mem_max
};
-typedef struct tm_new_mem_map
+struct tm_new_mem_map
{
/* SSA_NAME being dereferenced. */
tree val;
enum thread_memory_type local_new_memory;
-} tm_new_mem_map_t;
+};
/* Hashtable helpers. */
-struct tm_mem_map_hasher : free_ptr_hash <tm_new_mem_map_t>
+struct tm_mem_map_hasher : free_ptr_hash <tm_new_mem_map>
{
- static inline hashval_t hash (const tm_new_mem_map_t *);
- static inline bool equal (const tm_new_mem_map_t *, const tm_new_mem_map_t *);
+ static inline hashval_t hash (const tm_new_mem_map *);
+ static inline bool equal (const tm_new_mem_map *, const tm_new_mem_map *);
};
inline hashval_t
-tm_mem_map_hasher::hash (const tm_new_mem_map_t *v)
+tm_mem_map_hasher::hash (const tm_new_mem_map *v)
{
return (intptr_t)v->val >> 4;
}
inline bool
-tm_mem_map_hasher::equal (const tm_new_mem_map_t *v, const tm_new_mem_map_t *c)
+tm_mem_map_hasher::equal (const tm_new_mem_map *v, const tm_new_mem_map *c)
{
return v->val == c->val;
}
{
gimple *stmt = NULL;
enum tree_code code;
- tm_new_mem_map_t **slot;
- tm_new_mem_map_t elt, *elt_p;
+ tm_new_mem_map **slot;
+ tm_new_mem_map elt, *elt_p;
tree val = x;
enum thread_memory_type retval = mem_transaction_local;
/* Optimistically assume the memory is transaction local during
processing. This catches recursion into this variable. */
- *slot = elt_p = XNEW (tm_new_mem_map_t);
+ *slot = elt_p = XNEW (tm_new_mem_map);
elt_p->val = val;
elt_p->local_new_memory = mem_transaction_local;
bitmap irr_blocks;
};
-typedef struct tm_region *tm_region_p;
-
/* True if there are pending edge statements to be committed for the
current function being scanned in the tmmark pass. */
bool pending_edge_inserts_p;
auto_vec<basic_block> queue;
bitmap visited_blocks = BITMAP_ALLOC (NULL);
struct tm_region *old_region;
- auto_vec<tm_region_p> bb_regions;
+ auto_vec<tm_region *> bb_regions;
all_tm_regions = region;
bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
// Callback data for collect_bb2reg.
struct bb2reg_stuff
{
- vec<tm_region_p> *bb2reg;
+ vec<tm_region *> *bb2reg;
bool include_uninstrumented_p;
};
collect_bb2reg (struct tm_region *region, void *data)
{
struct bb2reg_stuff *stuff = (struct bb2reg_stuff *)data;
- vec<tm_region_p> *bb2reg = stuff->bb2reg;
+ vec<tm_region *> *bb2reg = stuff->bb2reg;
vec<basic_block> queue;
unsigned int i;
basic_block bb;
// ??? There is currently a hack inside tree-ssa-pre.c to work around the
// only known instance of this block sharing.
-static vec<tm_region_p>
+static vec<tm_region *>
get_bb_regions_instrumented (bool traverse_clones,
bool include_uninstrumented_p)
{
unsigned n = last_basic_block_for_fn (cfun);
struct bb2reg_stuff stuff;
- vec<tm_region_p> ret;
+ vec<tm_region *> ret;
ret.create (n);
ret.safe_grow_cleared (n);
tm_log_init ();
- vec<tm_region_p> bb_regions
+ vec<tm_region *> bb_regions
= get_bb_regions_instrumented (/*traverse_clones=*/true,
/*include_uninstrumented_p=*/false);
struct tm_region *r;
unsigned int
pass_tm_edges::execute (function *fun)
{
- vec<tm_region_p> bb_regions
+ vec<tm_region *> bb_regions
= get_bb_regions_instrumented (/*traverse_clones=*/false,
/*include_uninstrumented_p=*/true);
struct tm_region *r;
\f
/* A unique TM memory operation. */
-typedef struct tm_memop
+struct tm_memop
{
/* Unique ID that all memory operations to the same location have. */
unsigned int value_id;
/* Address of load/store. */
tree addr;
-} *tm_memop_t;
+};
/* TM memory operation hashtable helpers. */
/* Structure used to predicate basic blocks. This is attached to the
->aux field of the BBs in the loop to be if-converted. */
-typedef struct bb_predicate_s {
+struct bb_predicate {
/* The condition under which this basic block is executed. */
tree predicate;
recorded here, in order to avoid the duplication of computations
that occur in previous conditions. See PR44483. */
gimple_seq predicate_gimplified_stmts;
-} *bb_predicate_p;
+};
/* Returns true when the basic block BB has a predicate. */
static inline tree
bb_predicate (basic_block bb)
{
- return ((bb_predicate_p) bb->aux)->predicate;
+ return ((struct bb_predicate *) bb->aux)->predicate;
}
/* Sets the gimplified predicate COND for basic block BB. */
gcc_assert ((TREE_CODE (cond) == TRUTH_NOT_EXPR
&& is_gimple_condexpr (TREE_OPERAND (cond, 0)))
|| is_gimple_condexpr (cond));
- ((bb_predicate_p) bb->aux)->predicate = cond;
+ ((struct bb_predicate *) bb->aux)->predicate = cond;
}
/* Returns the sequence of statements of the gimplification of the
static inline gimple_seq
bb_predicate_gimplified_stmts (basic_block bb)
{
- return ((bb_predicate_p) bb->aux)->predicate_gimplified_stmts;
+ return ((struct bb_predicate *) bb->aux)->predicate_gimplified_stmts;
}
/* Sets the sequence of statements STMTS of the gimplification of the
static inline void
set_bb_predicate_gimplified_stmts (basic_block bb, gimple_seq stmts)
{
- ((bb_predicate_p) bb->aux)->predicate_gimplified_stmts = stmts;
+ ((struct bb_predicate *) bb->aux)->predicate_gimplified_stmts = stmts;
}
/* Adds the sequence of statements STMTS to the sequence of statements
add_bb_predicate_gimplified_stmts (basic_block bb, gimple_seq stmts)
{
gimple_seq_add_seq
- (&(((bb_predicate_p) bb->aux)->predicate_gimplified_stmts), stmts);
+ (&(((struct bb_predicate *) bb->aux)->predicate_gimplified_stmts), stmts);
}
/* Initializes to TRUE the predicate of basic block BB. */
static inline void
init_bb_predicate (basic_block bb)
{
- bb->aux = XNEW (struct bb_predicate_s);
+ bb->aux = XNEW (struct bb_predicate);
set_bb_predicate_gimplified_stmts (bb, NULL);
set_bb_predicate (bb, boolean_true_node);
}
/* Structure to map a variable VAR to the set of blocks that contain
definitions for VAR. */
-struct def_blocks_d
+struct def_blocks
{
/* Blocks that contain definitions of VAR. Bit I will be set if the
Ith block contains a definition of VAR. */
bitmap livein_blocks;
};
-typedef struct def_blocks_d *def_blocks_p;
-
-
/* Stack of trees used to restore the global currdefs to its original
state after completing rewriting of a block and its dominator
children. Its elements have the following properties:
};
/* Information stored for both SSA names and decls. */
-struct common_info_d
+struct common_info
{
/* This field indicates whether or not the variable may need PHI nodes.
See the enum's definition for more detailed information about the
tree current_def;
/* Definitions for this var. */
- struct def_blocks_d def_blocks;
+ struct def_blocks def_blocks;
};
-/* The information associated with decls and SSA names. */
-typedef struct common_info_d *common_info_p;
-
/* Information stored for decls. */
-struct var_info_d
+struct var_info
{
/* The variable. */
tree var;
/* Information stored for both SSA names and decls. */
- struct common_info_d info;
+ common_info info;
};
-/* The information associated with decls. */
-typedef struct var_info_d *var_info_p;
-
/* VAR_INFOS hashtable helpers. */
-struct var_info_hasher : free_ptr_hash <var_info_d>
+struct var_info_hasher : free_ptr_hash <var_info>
{
static inline hashval_t hash (const value_type &);
static inline bool equal (const value_type &, const compare_type &);
bitmap repl_set;
/* Information stored for both SSA names and decls. */
- struct common_info_d info;
+ common_info info;
};
-/* The information associated with names. */
-typedef struct ssa_name_info *ssa_name_info_p;
-
-static vec<ssa_name_info_p> info_for_ssa_name;
+static vec<ssa_name_info *> info_for_ssa_name;
static unsigned current_info_for_ssa_name_age;
static bitmap_obstack update_ssa_obstack;
/* Get the information associated with NAME. */
-static inline ssa_name_info_p
+static inline ssa_name_info *
get_ssa_name_ann (tree name)
{
unsigned ver = SSA_NAME_VERSION (name);
/* Return and allocate the auxiliar information for DECL. */
-static inline var_info_p
+static inline var_info *
get_var_info (tree decl)
{
- struct var_info_d vi;
- var_info_d **slot;
+ var_info vi;
+ var_info **slot;
vi.var = decl;
slot = var_infos->find_slot_with_hash (&vi, DECL_UID (decl), INSERT);
if (*slot == NULL)
{
- var_info_p v = XCNEW (struct var_info_d);
+ var_info *v = XCNEW (var_info);
v->var = decl;
*slot = v;
return v;
/* Get access to the auxiliar information stored per SSA name or decl. */
-static inline common_info_p
+static inline common_info *
get_common_info (tree var)
{
if (TREE_CODE (var) == SSA_NAME)
where VAR is live on entry (livein). If no entry is found in
DEF_BLOCKS, a new one is created and returned. */
-static inline struct def_blocks_d *
-get_def_blocks_for (common_info_p info)
+static inline def_blocks *
+get_def_blocks_for (common_info *info)
{
- struct def_blocks_d *db_p = &info->def_blocks;
+ def_blocks *db_p = &info->def_blocks;
if (!db_p->def_blocks)
{
db_p->def_blocks = BITMAP_ALLOC (&update_ssa_obstack);
static void
set_def_block (tree var, basic_block bb, bool phi_p)
{
- struct def_blocks_d *db_p;
- common_info_p info;
+ def_blocks *db_p;
+ common_info *info;
info = get_common_info (var);
db_p = get_def_blocks_for (info);
static void
set_livein_block (tree var, basic_block bb)
{
- common_info_p info;
- struct def_blocks_d *db_p;
+ common_info *info;
+ def_blocks *db_p;
info = get_common_info (var);
db_p = get_def_blocks_for (info);
where VAR is live on entry (livein). Return NULL, if no entry is
found in DEF_BLOCKS. */
-static inline struct def_blocks_d *
+static inline def_blocks *
find_def_blocks_for (tree var)
{
- def_blocks_p p = &get_common_info (var)->def_blocks;
+ def_blocks *p = &get_common_info (var)->def_blocks;
if (!p->def_blocks)
return NULL;
return p;
gphi *phi;
basic_block bb;
bitmap_iterator bi;
- struct def_blocks_d *def_map = find_def_blocks_for (var);
+ def_blocks *def_map = find_def_blocks_for (var);
/* Remove the blocks where we already have PHI nodes for VAR. */
bitmap_and_compl_into (phi_insertion_points, def_map->phi_blocks);
static int
insert_phi_nodes_compare_var_infos (const void *a, const void *b)
{
- const struct var_info_d *defa = *(struct var_info_d * const *)a;
- const struct var_info_d *defb = *(struct var_info_d * const *)b;
+ const var_info *defa = *(var_info * const *)a;
+ const var_info *defb = *(var_info * const *)b;
if (DECL_UID (defa->var) < DECL_UID (defb->var))
return -1;
else
{
hash_table<var_info_hasher>::iterator hi;
unsigned i;
- var_info_p info;
+ var_info *info;
timevar_push (TV_TREE_INSERT_PHI_NODES);
- auto_vec<var_info_p> vars (var_infos->elements ());
+ auto_vec<var_info *> vars (var_infos->elements ());
FOR_EACH_HASH_TABLE_ELEMENT (*var_infos, info, var_info_p, hi)
if (info->info.need_phi_state != NEED_PHI_STATE_NO)
vars.quick_push (info);
static void
register_new_def (tree def, tree sym)
{
- common_info_p info = get_common_info (sym);
+ common_info *info = get_common_info (sym);
tree currdef;
/* If this variable is set in a single basic block and all uses are
static tree
get_reaching_def (tree var)
{
- common_info_p info = get_common_info (var);
+ common_info *info = get_common_info (var);
tree currdef;
/* Lookup the current reaching definition for VAR. */
FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE)
{
tree var = USE_FROM_PTR (use_p), def;
- common_info_p info = get_common_info (var);
+ common_info *info = get_common_info (var);
gcc_checking_assert (DECL_P (var));
def = info->current_def;
if (!def)
;
else
{
- struct def_blocks_d *db_p = get_def_blocks_for (info);
+ def_blocks *db_p = get_def_blocks_for (info);
/* If there are some non-debug uses in the current bb,
it is fine. */
fprintf (file, "\n\nCurrent reaching definitions\n\n");
FOR_EACH_VEC_ELT (symbols_to_rename, i, var)
{
- common_info_p info = get_common_info (var);
+ common_info *info = get_common_info (var);
fprintf (file, "CURRDEF (");
print_generic_expr (file, var, 0);
fprintf (file, ") = ");
/* Callback for htab_traverse to dump the VAR_INFOS hash table. */
int
-debug_var_infos_r (var_info_d **slot, FILE *file)
+debug_var_infos_r (var_info **slot, FILE *file)
{
- struct var_info_d *info = *slot;
+ var_info *info = *slot;
fprintf (file, "VAR: ");
print_generic_expr (file, info->var, dump_flags);
static inline void
register_new_update_single (tree new_name, tree old_name)
{
- common_info_p info = get_common_info (old_name);
+ common_info *info = get_common_info (old_name);
tree currdef = info->current_def;
/* Push the current reaching definition into BLOCK_DEFS_STACK.
replace it). */
if (insert_phi_p)
{
- struct def_blocks_d *db_p = get_def_blocks_for (get_common_info (var));
+ def_blocks *db_p = get_def_blocks_for (get_common_info (var));
if (!bitmap_bit_p (db_p->def_blocks, bb->index))
set_livein_block (var, bb);
}
unsigned update_flags)
{
basic_block entry;
- struct def_blocks_d *db;
+ def_blocks *db;
bitmap idf, pruned_idf;
bitmap_iterator bi;
unsigned i;
/* A Reduced Dependence Graph (RDG) vertex representing a statement. */
-typedef struct rdg_vertex
+struct rdg_vertex
{
/* The statement represented by this vertex. */
gimple *stmt;
/* True when the statement contains a read from memory. */
bool has_mem_reads;
-} *rdg_vertex_p;
+};
#define RDGV_STMT(V) ((struct rdg_vertex *) ((V)->data))->stmt
#define RDGV_DATAREFS(V) ((struct rdg_vertex *) ((V)->data))->datarefs
/* Dependence information attached to an edge of the RDG. */
-typedef struct rdg_edge
+struct rdg_edge
{
/* Type of the dependence. */
enum rdg_dep_type type;
-} *rdg_edge_p;
+};
#define RDGE_TYPE(E) ((struct rdg_edge *) ((E)->data))->type
PKIND_NORMAL, PKIND_MEMSET, PKIND_MEMCPY
};
-typedef struct partition_s
+struct partition
{
bitmap stmts;
bitmap loops;
data_reference_p secondary_dr;
tree niter;
bool plus_one;
-} *partition_t;
+};
/* Allocate and initialize a partition from BITMAP. */
-static partition_t
+static partition *
partition_alloc (bitmap stmts, bitmap loops)
{
- partition_t partition = XCNEW (struct partition_s);
+ partition *partition = XCNEW (struct partition);
partition->stmts = stmts ? stmts : BITMAP_ALLOC (NULL);
partition->loops = loops ? loops : BITMAP_ALLOC (NULL);
partition->reduction_p = false;
/* Free PARTITION. */
static void
-partition_free (partition_t partition)
+partition_free (partition *partition)
{
BITMAP_FREE (partition->stmts);
BITMAP_FREE (partition->loops);
/* Returns true if the partition can be generated as a builtin. */
static bool
-partition_builtin_p (partition_t partition)
+partition_builtin_p (partition *partition)
{
return partition->kind != PKIND_NORMAL;
}
/* Returns true if the partition contains a reduction. */
static bool
-partition_reduction_p (partition_t partition)
+partition_reduction_p (partition *partition)
{
return partition->reduction_p;
}
/* Merge PARTITION into the partition DEST. */
static void
-partition_merge_into (partition_t dest, partition_t partition)
+partition_merge_into (partition *dest, partition *partition)
{
dest->kind = PKIND_NORMAL;
bitmap_ior_into (dest->stmts, partition->stmts);
basic blocks of a loop are taken in dom order. */
static void
-generate_loops_for_partition (struct loop *loop, partition_t partition,
+generate_loops_for_partition (struct loop *loop, partition *partition,
bool copy_p)
{
unsigned i;
/* Generate a call to memset for PARTITION in LOOP. */
static void
-generate_memset_builtin (struct loop *loop, partition_t partition)
+generate_memset_builtin (struct loop *loop, partition *partition)
{
gimple_stmt_iterator gsi;
gimple *stmt, *fn_call;
/* Generate a call to memcpy for PARTITION in LOOP. */
static void
-generate_memcpy_builtin (struct loop *loop, partition_t partition)
+generate_memcpy_builtin (struct loop *loop, partition *partition)
{
gimple_stmt_iterator gsi;
gimple *stmt, *fn_call;
static void
generate_code_for_partition (struct loop *loop,
- partition_t partition, bool copy_p)
+ partition *partition, bool copy_p)
{
switch (partition->kind)
{
/* Returns a partition with all the statements needed for computing
the vertex V of the RDG, also including the loop exit conditions. */
-static partition_t
+static partition *
build_rdg_partition_for_vertex (struct graph *rdg, int v)
{
- partition_t partition = partition_alloc (NULL, NULL);
+ partition *partition = partition_alloc (NULL, NULL);
auto_vec<int, 3> nodes;
unsigned i;
int x;
For the moment we detect only the memset zero pattern. */
static void
-classify_partition (loop_p loop, struct graph *rdg, partition_t partition)
+classify_partition (loop_p loop, struct graph *rdg, partition *partition)
{
bitmap_iterator bi;
unsigned i;
accesses in RDG. */
static bool
-similar_memory_accesses (struct graph *rdg, partition_t partition1,
- partition_t partition2)
+similar_memory_accesses (struct graph *rdg, partition *partition1,
+ partition *partition2)
{
unsigned i, j, k, l;
bitmap_iterator bi, bj;
static void
rdg_build_partitions (struct graph *rdg,
vec<gimple *> starting_stmts,
- vec<partition_t> *partitions)
+ vec<partition *> *partitions)
{
bitmap processed = BITMAP_ALLOC (NULL);
int i;
if (bitmap_bit_p (processed, v))
continue;
- partition_t partition = build_rdg_partition_for_vertex (rdg, v);
+ partition *partition = build_rdg_partition_for_vertex (rdg, v);
bitmap_ior_into (processed, partition->stmts);
if (dump_file && (dump_flags & TDF_DETAILS))
/* Dump to FILE the PARTITIONS. */
static void
-dump_rdg_partitions (FILE *file, vec<partition_t> partitions)
+dump_rdg_partitions (FILE *file, vec<partition *> partitions)
{
int i;
- partition_t partition;
+ partition *partition;
FOR_EACH_VEC_ELT (partitions, i, partition)
debug_bitmap_file (file, partition->stmts);
}
/* Debug PARTITIONS. */
-extern void debug_rdg_partitions (vec<partition_t> );
+extern void debug_rdg_partitions (vec<partition *> );
DEBUG_FUNCTION void
-debug_rdg_partitions (vec<partition_t> partitions)
+debug_rdg_partitions (vec<partition *> partitions)
{
dump_rdg_partitions (stderr, partitions);
}
the RDG. */
static int
-number_of_rw_in_partition (struct graph *rdg, partition_t partition)
+number_of_rw_in_partition (struct graph *rdg, partition *partition)
{
int res = 0;
unsigned i;
static bool
partition_contains_all_rw (struct graph *rdg,
- vec<partition_t> partitions)
+ vec<partition *> partitions)
{
int i;
- partition_t partition;
+ partition *partition;
int nrw = number_of_rw_in_rdg (rdg);
FOR_EACH_VEC_ELT (partitions, i, partition)
control_dependences *cd, int *nb_calls)
{
struct graph *rdg;
- partition_t partition;
+ partition *partition;
bool any_builtin;
int i, nbp;
graph *pg = NULL;
if (dump_file && (dump_flags & TDF_DETAILS))
dump_rdg (dump_file, rdg);
- auto_vec<partition_t, 3> partitions;
+ auto_vec<struct partition *, 3> partitions;
rdg_build_partitions (rdg, stmts, &partitions);
any_builtin = false;
were not classified as builtins. This also avoids chopping
a loop into pieces, separated by builtin calls. That is, we
only want no or a single loop body remaining. */
- partition_t into;
+ struct partition *into;
if (!flag_tree_loop_distribution)
{
for (i = 0; partitions.iterate (i, &into); ++i)
{
pg = new_graph (partitions.length ());
struct pgdata {
- partition_t partition;
+ struct partition *partition;
vec<data_reference_p> writes;
vec<data_reference_p> reads;
};
else
data->writes.safe_push (dr);
}
- partition_t partition1, partition2;
+ struct partition *partition1, *partition2;
for (i = 0; partitions.iterate (i, &partition1); ++i)
for (int j = i + 1; partitions.iterate (j, &partition2); ++j)
{
num_sccs = graphds_scc (pg, NULL);
for (i = 0; i < num_sccs; ++i)
{
- partition_t first;
+ struct partition *first;
int j;
for (j = 0; partitions.iterate (j, &first); ++j)
if (pg->vertices[j].component == i)
/* This structure defines a pair entry. */
-typedef struct coalesce_pair
+struct coalesce_pair
{
int first_element;
int second_element;
int cost;
-} * coalesce_pair_p;
-typedef const struct coalesce_pair *const_coalesce_pair_p;
+};
/* Coalesce pair hashtable helpers. */
typedef coalesce_table_type::iterator coalesce_iterator_type;
-typedef struct cost_one_pair_d
+struct cost_one_pair
{
int first_element;
int second_element;
- struct cost_one_pair_d *next;
-} * cost_one_pair_p;
+ cost_one_pair *next;
+};
/* This structure maintains the list of coalesce pairs. */
-typedef struct coalesce_list_d
+struct coalesce_list
{
coalesce_table_type *list; /* Hash table. */
- coalesce_pair_p *sorted; /* List when sorted. */
+ coalesce_pair **sorted; /* List when sorted. */
int num_sorted; /* Number in the sorted list. */
- cost_one_pair_p cost_one_list;/* Single use coalesces with cost 1. */
-} *coalesce_list_p;
+ cost_one_pair *cost_one_list;/* Single use coalesces with cost 1. */
+};
#define NO_BEST_COALESCE -1
#define MUST_COALESCE_COST INT_MAX
NO_BEST_COALESCE is returned if there aren't any. */
static inline int
-pop_cost_one_pair (coalesce_list_p cl, int *p1, int *p2)
+pop_cost_one_pair (coalesce_list *cl, int *p1, int *p2)
{
- cost_one_pair_p ptr;
+ cost_one_pair *ptr;
ptr = cl->cost_one_list;
if (!ptr)
NO_BEST_COALESCE is returned if the coalesce list is empty. */
static inline int
-pop_best_coalesce (coalesce_list_p cl, int *p1, int *p2)
+pop_best_coalesce (coalesce_list *cl, int *p1, int *p2)
{
- coalesce_pair_p node;
+ coalesce_pair *node;
int ret;
if (cl->sorted == NULL)
/* Create a new empty coalesce list object and return it. */
-static inline coalesce_list_p
+static inline coalesce_list *
create_coalesce_list (void)
{
- coalesce_list_p list;
+ coalesce_list *list;
unsigned size = num_ssa_names * 3;
if (size < 40)
size = 40;
- list = (coalesce_list_p) xmalloc (sizeof (struct coalesce_list_d));
+ list = (coalesce_list *) xmalloc (sizeof (struct coalesce_list));
list->list = new coalesce_table_type (size);
list->sorted = NULL;
list->num_sorted = 0;
/* Delete coalesce list CL. */
static inline void
-delete_coalesce_list (coalesce_list_p cl)
+delete_coalesce_list (coalesce_list *cl)
{
gcc_assert (cl->cost_one_list == NULL);
delete cl->list;
one isn't found, return NULL if CREATE is false, otherwise create a new
coalesce pair object and return it. */
-static coalesce_pair_p
-find_coalesce_pair (coalesce_list_p cl, int p1, int p2, bool create)
+static coalesce_pair *
+find_coalesce_pair (coalesce_list *cl, int p1, int p2, bool create)
{
struct coalesce_pair p;
coalesce_pair **slot;
}
static inline void
-add_cost_one_coalesce (coalesce_list_p cl, int p1, int p2)
+add_cost_one_coalesce (coalesce_list *cl, int p1, int p2)
{
- cost_one_pair_p pair;
+ cost_one_pair *pair;
- pair = XNEW (struct cost_one_pair_d);
+ pair = XNEW (cost_one_pair);
pair->first_element = p1;
pair->second_element = p2;
pair->next = cl->cost_one_list;
/* Add a coalesce between P1 and P2 in list CL with a cost of VALUE. */
static inline void
-add_coalesce (coalesce_list_p cl, int p1, int p2, int value)
+add_coalesce (coalesce_list *cl, int p1, int p2, int value)
{
- coalesce_pair_p node;
+ coalesce_pair *node;
gcc_assert (cl->sorted == NULL);
if (p1 == p2)
static int
compare_pairs (const void *p1, const void *p2)
{
- const_coalesce_pair_p const *const pp1 = (const_coalesce_pair_p const *) p1;
- const_coalesce_pair_p const *const pp2 = (const_coalesce_pair_p const *) p2;
+ const coalesce_pair *const *const pp1 = (const coalesce_pair *const *) p1;
+ const coalesce_pair *const *const pp2 = (const coalesce_pair *const *) p2;
int result;
result = (* pp1)->cost - (* pp2)->cost;
/* Return the number of unique coalesce pairs in CL. */
static inline int
-num_coalesce_pairs (coalesce_list_p cl)
+num_coalesce_pairs (coalesce_list *cl)
{
return cl->list->elements ();
}
in order from most important coalesce to least important. */
static void
-sort_coalesce_list (coalesce_list_p cl)
+sort_coalesce_list (coalesce_list *cl)
{
unsigned x, num;
- coalesce_pair_p p;
+ coalesce_pair *p;
coalesce_iterator_type ppi;
gcc_assert (cl->sorted == NULL);
return;
/* Allocate a vector for the pair pointers. */
- cl->sorted = XNEWVEC (coalesce_pair_p, num);
+ cl->sorted = XNEWVEC (coalesce_pair *, num);
/* Populate the vector with pointers to the pairs. */
x = 0;
??? Maybe std::sort will do better, provided that compare_pairs
can be inlined. */
if (num > 2)
- qsort (cl->sorted, num, sizeof (coalesce_pair_p), compare_pairs);
+ qsort (cl->sorted, num, sizeof (coalesce_pair *), compare_pairs);
}
/* Send debug info for coalesce list CL to file F. */
static void
-dump_coalesce_list (FILE *f, coalesce_list_p cl)
+dump_coalesce_list (FILE *f, coalesce_list *cl)
{
- coalesce_pair_p node;
+ coalesce_pair *node;
coalesce_iterator_type ppi;
int x;
A full matrix is used for conflicts rather than just upper triangular form.
this make sit much simpler and faster to perform conflict merges. */
-typedef struct ssa_conflicts_d
+struct ssa_conflicts
{
bitmap_obstack obstack; /* A place to allocate our bitmaps. */
vec<bitmap> conflicts;
-} * ssa_conflicts_p;
+};
/* Return an empty new conflict graph for SIZE elements. */
-static inline ssa_conflicts_p
+static inline ssa_conflicts *
ssa_conflicts_new (unsigned size)
{
- ssa_conflicts_p ptr;
+ ssa_conflicts *ptr;
- ptr = XNEW (struct ssa_conflicts_d);
+ ptr = XNEW (ssa_conflicts);
bitmap_obstack_initialize (&ptr->obstack);
ptr->conflicts.create (size);
ptr->conflicts.safe_grow_cleared (size);
/* Free storage for conflict graph PTR. */
static inline void
-ssa_conflicts_delete (ssa_conflicts_p ptr)
+ssa_conflicts_delete (ssa_conflicts *ptr)
{
bitmap_obstack_release (&ptr->obstack);
ptr->conflicts.release ();
/* Test if elements X and Y conflict in graph PTR. */
static inline bool
-ssa_conflicts_test_p (ssa_conflicts_p ptr, unsigned x, unsigned y)
+ssa_conflicts_test_p (ssa_conflicts *ptr, unsigned x, unsigned y)
{
bitmap bx = ptr->conflicts[x];
bitmap by = ptr->conflicts[y];
/* Add a conflict with Y to the bitmap for X in graph PTR. */
static inline void
-ssa_conflicts_add_one (ssa_conflicts_p ptr, unsigned x, unsigned y)
+ssa_conflicts_add_one (ssa_conflicts *ptr, unsigned x, unsigned y)
{
bitmap bx = ptr->conflicts[x];
/* If there are no conflicts yet, allocate the bitmap and set bit. */
/* Add conflicts between X and Y in graph PTR. */
static inline void
-ssa_conflicts_add (ssa_conflicts_p ptr, unsigned x, unsigned y)
+ssa_conflicts_add (ssa_conflicts *ptr, unsigned x, unsigned y)
{
gcc_checking_assert (x != y);
ssa_conflicts_add_one (ptr, x, y);
/* Merge all Y's conflict into X in graph PTR. */
static inline void
-ssa_conflicts_merge (ssa_conflicts_p ptr, unsigned x, unsigned y)
+ssa_conflicts_merge (ssa_conflicts *ptr, unsigned x, unsigned y)
{
unsigned z;
bitmap_iterator bi;
/* Dump a conflicts graph. */
static void
-ssa_conflicts_dump (FILE *file, ssa_conflicts_p ptr)
+ssa_conflicts_dump (FILE *file, ssa_conflicts *ptr)
{
unsigned x;
bitmap b;
marked as being live. This delays clearing of these bitmaps until
they are actually needed again. */
-typedef struct live_track_d
+struct live_track
{
bitmap_obstack obstack; /* A place to allocate our bitmaps. */
bitmap live_base_var; /* Indicates if a basevar is live. */
bitmap *live_base_partitions; /* Live partitions for each basevar. */
var_map map; /* Var_map being used for partition mapping. */
-} * live_track_p;
+};
/* This routine will create a new live track structure based on the partitions
in MAP. */
-static live_track_p
+static live_track *
new_live_track (var_map map)
{
- live_track_p ptr;
+ live_track *ptr;
int lim, x;
/* Make sure there is a partition view in place. */
gcc_assert (map->partition_to_base_index != NULL);
- ptr = (live_track_p) xmalloc (sizeof (struct live_track_d));
+ ptr = (live_track *) xmalloc (sizeof (live_track));
ptr->map = map;
lim = num_basevars (map);
bitmap_obstack_initialize (&ptr->obstack);
/* This routine will free the memory associated with PTR. */
static void
-delete_live_track (live_track_p ptr)
+delete_live_track (live_track *ptr)
{
bitmap_obstack_release (&ptr->obstack);
free (ptr->live_base_partitions);
/* This function will remove PARTITION from the live list in PTR. */
static inline void
-live_track_remove_partition (live_track_p ptr, int partition)
+live_track_remove_partition (live_track *ptr, int partition)
{
int root;
/* This function will adds PARTITION to the live list in PTR. */
static inline void
-live_track_add_partition (live_track_p ptr, int partition)
+live_track_add_partition (live_track *ptr, int partition)
{
int root;
/* Clear the live bit for VAR in PTR. */
static inline void
-live_track_clear_var (live_track_p ptr, tree var)
+live_track_clear_var (live_track *ptr, tree var)
{
int p;
/* Return TRUE if VAR is live in PTR. */
static inline bool
-live_track_live_p (live_track_p ptr, tree var)
+live_track_live_p (live_track *ptr, tree var)
{
int p, root;
ssa live map and the live bitmap for the root of USE. */
static inline void
-live_track_process_use (live_track_p ptr, tree use)
+live_track_process_use (live_track *ptr, tree use)
{
int p;
variable, conflicts will be added to GRAPH. */
static inline void
-live_track_process_def (live_track_p ptr, tree def, ssa_conflicts_p graph)
+live_track_process_def (live_track *ptr, tree def, ssa_conflicts *graph)
{
int p, root;
bitmap b;
/* Initialize PTR with the partitions set in INIT. */
static inline void
-live_track_init (live_track_p ptr, bitmap init)
+live_track_init (live_track *ptr, bitmap init)
{
unsigned p;
bitmap_iterator bi;
/* This routine will clear all live partitions in PTR. */
static inline void
-live_track_clear_base_vars (live_track_p ptr)
+live_track_clear_base_vars (live_track *ptr)
{
/* Simply clear the live base list. Anything marked as live in the element
lists will be cleared later if/when the base variable ever comes alive
conflict graph. Only conflicts between ssa_name partitions with the same
base variable are added. */
-static ssa_conflicts_p
+static ssa_conflicts *
build_ssa_conflict_graph (tree_live_info_p liveinfo)
{
- ssa_conflicts_p graph;
+ ssa_conflicts *graph;
var_map map;
basic_block bb;
ssa_op_iter iter;
- live_track_p live;
+ live_track *live;
basic_block entry;
/* If inter-variable coalescing is enabled, we may attempt to
coalescing. */
static void
-coalesce_with_default (tree var, coalesce_list_p cl, bitmap used_in_copy)
+coalesce_with_default (tree var, coalesce_list *cl, bitmap used_in_copy)
{
if (SSA_NAME_IS_DEFAULT_DEF (var)
|| !SSA_NAME_VAR (var)
a coalesce list for use later in the out of ssa process. */
static var_map
-create_outofssa_var_map (coalesce_list_p cl, bitmap used_in_copy)
+create_outofssa_var_map (coalesce_list *cl, bitmap used_in_copy)
{
gimple_stmt_iterator gsi;
basic_block bb;
DEBUG, if it is nun-NULL. */
static inline bool
-attempt_coalesce (var_map map, ssa_conflicts_p graph, int x, int y,
+attempt_coalesce (var_map map, ssa_conflicts *graph, int x, int y,
FILE *debug)
{
int z;
GRAPH. Debug output is sent to DEBUG if it is non-NULL. */
static void
-coalesce_partitions (var_map map, ssa_conflicts_p graph, coalesce_list_p cl,
+coalesce_partitions (var_map map, ssa_conflicts *graph, coalesce_list *cl,
FILE *debug)
{
int x = 0, y = 0;
static void
compute_optimized_partition_bases (var_map map, bitmap used_in_copies,
- coalesce_list_p cl)
+ coalesce_list *cl)
{
int parts = num_var_partitions (map);
partition tentative = partition_new (parts);
pair, both of its members are in the same partition in
TENTATIVE. */
gcc_assert (!cl->sorted);
- coalesce_pair_p node;
+ coalesce_pair *node;
coalesce_iterator_type ppi;
FOR_EACH_PARTITION_PAIR (node, ppi, cl)
{
}
/* We have to deal with cost one pairs too. */
- for (cost_one_pair_d *co = cl->cost_one_list; co; co = co->next)
+ for (cost_one_pair *co = cl->cost_one_list; co; co = co->next)
{
tree v1 = ssa_name (co->first_element);
int p1 = partition_find (tentative, var_to_partition (map, v1));
coalesce_ssa_name (void)
{
tree_live_info_p liveinfo;
- ssa_conflicts_p graph;
- coalesce_list_p cl;
+ ssa_conflicts *graph;
+ coalesce_list *cl;
bitmap used_in_copies = BITMAP_ALLOC (NULL);
var_map map;
unsigned int i;
/* The data used by the induction variable optimizations. */
-typedef struct iv_use *iv_use_p;
-
-typedef struct iv_cand *iv_cand_p;
-
/* Hashtable helpers. */
struct iv_inv_expr_hasher : free_ptr_hash <iv_inv_expr_ent>
bitmap relevant;
/* The uses of induction variables. */
- vec<iv_use_p> iv_uses;
+ vec<iv_use *> iv_uses;
/* The candidates. */
- vec<iv_cand_p> iv_candidates;
+ vec<iv_cand *> iv_candidates;
/* A bitmap of important candidates. */
bitmap important_candidates;
AINC_NONE /* Also the number of auto increment types. */
};
-typedef struct address_cost_data_s
+struct address_cost_data
{
HOST_WIDE_INT min_offset, max_offset;
unsigned costs[2][2][2][2];
unsigned ainc_costs[AINC_NONE];
-} *address_cost_data;
+};
static comp_cost
bool stmt_after_inc, bool *may_autoinc)
{
machine_mode address_mode = targetm.addr_space.address_mode (as);
- static vec<address_cost_data> address_cost_data_list;
+ static vec<address_cost_data *> address_cost_data_list;
unsigned int data_index = (int) as * MAX_MACHINE_MODE + (int) mem_mode;
- address_cost_data data;
+ address_cost_data *data;
static bool has_preinc[MAX_MACHINE_MODE], has_postinc[MAX_MACHINE_MODE];
static bool has_predec[MAX_MACHINE_MODE], has_postdec[MAX_MACHINE_MODE];
unsigned cost, acost, complexity;
rtx addr, base;
rtx reg0, reg1;
- data = (address_cost_data) xcalloc (1, sizeof (*data));
+ data = (address_cost_data *) xcalloc (1, sizeof (*data));
reg1 = gen_raw_REG (address_mode, LAST_VIRTUAL_REGISTER + 1);
2. Left linearization of the expression trees, so that (A+B)+(C+D)
becomes (((A+B)+C)+D), which is easier for us to rewrite later.
During linearization, we place the operands of the binary
- expressions into a vector of operand_entry_t
+ expressions into a vector of operand_entry_*
3. Optimization of the operand lists, eliminating things like a +
-a, a & a, etc.
} reassociate_stats;
/* Operator, rank pair. */
-typedef struct operand_entry
+struct operand_entry
{
unsigned int rank;
int id;
tree op;
unsigned int count;
-} *operand_entry_t;
+};
static object_allocator<operand_entry> operand_entry_pool
("operand entry pool");
static int
sort_by_operand_rank (const void *pa, const void *pb)
{
- const operand_entry_t oea = *(const operand_entry_t *)pa;
- const operand_entry_t oeb = *(const operand_entry_t *)pb;
+ const operand_entry *oea = *(const operand_entry *const *)pa;
+ const operand_entry *oeb = *(const operand_entry *const *)pb;
/* It's nicer for optimize_expression if constants that are likely
to fold when added/multiplied//whatever are put next to each
/* Add an operand entry to *OPS for the tree operand OP. */
static void
-add_to_ops_vec (vec<operand_entry_t> *ops, tree op)
+add_to_ops_vec (vec<operand_entry *> *ops, tree op)
{
- operand_entry_t oe = operand_entry_pool.allocate ();
+ operand_entry *oe = operand_entry_pool.allocate ();
oe->op = op;
oe->rank = get_rank (op);
count REPEAT. */
static void
-add_repeat_to_ops_vec (vec<operand_entry_t> *ops, tree op,
+add_repeat_to_ops_vec (vec<operand_entry *> *ops, tree op,
HOST_WIDE_INT repeat)
{
- operand_entry_t oe = operand_entry_pool.allocate ();
+ operand_entry *oe = operand_entry_pool.allocate ();
oe->op = op;
oe->rank = get_rank (op);
static bool
eliminate_duplicate_pair (enum tree_code opcode,
- vec<operand_entry_t> *ops,
+ vec<operand_entry *> *ops,
bool *all_done,
unsigned int i,
- operand_entry_t curr,
- operand_entry_t last)
+ operand_entry *curr,
+ operand_entry *last)
{
/* If we have two of the same op, and the opcode is & |, min, or max,
static bool
eliminate_plus_minus_pair (enum tree_code opcode,
- vec<operand_entry_t> *ops,
+ vec<operand_entry *> *ops,
unsigned int currindex,
- operand_entry_t curr)
+ operand_entry *curr)
{
tree negateop;
tree notop;
unsigned int i;
- operand_entry_t oe;
+ operand_entry *oe;
if (opcode != PLUS_EXPR || TREE_CODE (curr->op) != SSA_NAME)
return false;
static bool
eliminate_not_pairs (enum tree_code opcode,
- vec<operand_entry_t> *ops,
+ vec<operand_entry *> *ops,
unsigned int currindex,
- operand_entry_t curr)
+ operand_entry *curr)
{
tree notop;
unsigned int i;
- operand_entry_t oe;
+ operand_entry *oe;
if ((opcode != BIT_IOR_EXPR && opcode != BIT_AND_EXPR)
|| TREE_CODE (curr->op) != SSA_NAME)
static void
eliminate_using_constants (enum tree_code opcode,
- vec<operand_entry_t> *ops)
+ vec<operand_entry *> *ops)
{
- operand_entry_t oelast = ops->last ();
+ operand_entry *oelast = ops->last ();
tree type = TREE_TYPE (oelast->op);
if (oelast->rank == 0
}
-static void linearize_expr_tree (vec<operand_entry_t> *, gimple *,
+static void linearize_expr_tree (vec<operand_entry *> *, gimple *,
bool, bool);
/* Structure for tracking and counting operands. */
static bool
undistribute_ops_list (enum tree_code opcode,
- vec<operand_entry_t> *ops, struct loop *loop)
+ vec<operand_entry *> *ops, struct loop *loop)
{
unsigned int length = ops->length ();
- operand_entry_t oe1;
+ operand_entry *oe1;
unsigned i, j;
sbitmap candidates, candidates2;
unsigned nr_candidates, nr_candidates2;
sbitmap_iterator sbi0;
- vec<operand_entry_t> *subops;
+ vec<operand_entry *> *subops;
bool changed = false;
int next_oecount_id = 0;
/* ??? Macro arguments cannot have multi-argument template types in
them. This typedef is needed to workaround that limitation. */
- typedef vec<operand_entry_t> vec_operand_entry_t_heap;
+ typedef vec<operand_entry *> vec_operand_entry_t_heap;
subops = XCNEWVEC (vec_operand_entry_t_heap, ops->length ());
EXECUTE_IF_SET_IN_BITMAP (candidates, 0, i, sbi0)
{
if (nr_candidates2 >= 2)
{
- operand_entry_t oe1, oe2;
+ operand_entry *oe1, *oe2;
gimple *prod;
int first = bitmap_first_set_bit (candidates2);
static bool
eliminate_redundant_comparison (enum tree_code opcode,
- vec<operand_entry_t> *ops,
+ vec<operand_entry *> *ops,
unsigned int currindex,
- operand_entry_t curr)
+ operand_entry *curr)
{
tree op1, op2;
enum tree_code lcode, rcode;
gimple *def1, *def2;
int i;
- operand_entry_t oe;
+ operand_entry *oe;
if (opcode != BIT_IOR_EXPR && opcode != BIT_AND_EXPR)
return false;
static void
optimize_ops_list (enum tree_code opcode,
- vec<operand_entry_t> *ops)
+ vec<operand_entry *> *ops)
{
unsigned int length = ops->length ();
unsigned int i;
- operand_entry_t oe;
- operand_entry_t oelast = NULL;
+ operand_entry *oe;
+ operand_entry *oelast = NULL;
bool iterate = false;
if (length == 1)
and try the next two. */
if (oelast->rank == 0 && is_gimple_min_invariant (oelast->op))
{
- operand_entry_t oelm1 = (*ops)[length - 2];
+ operand_entry *oelm1 = (*ops)[length - 2];
if (oelm1->rank == 0
&& is_gimple_min_invariant (oelm1->op)
update_range_test (struct range_entry *range, struct range_entry *otherrange,
struct range_entry **otherrangep,
unsigned int count, enum tree_code opcode,
- vec<operand_entry_t> *ops, tree exp, gimple_seq seq,
+ vec<operand_entry *> *ops, tree exp, gimple_seq seq,
bool in_p, tree low, tree high, bool strict_overflow_p)
{
- operand_entry_t oe = (*ops)[range->idx];
+ operand_entry *oe = (*ops)[range->idx];
tree op = oe->op;
gimple *stmt = op ? SSA_NAME_DEF_STMT (op) :
last_stmt (BASIC_BLOCK_FOR_FN (cfun, oe->id));
static bool
optimize_range_tests_xor (enum tree_code opcode, tree type,
tree lowi, tree lowj, tree highi, tree highj,
- vec<operand_entry_t> *ops,
+ vec<operand_entry *> *ops,
struct range_entry *rangei,
struct range_entry *rangej)
{
static bool
optimize_range_tests_diff (enum tree_code opcode, tree type,
tree lowi, tree lowj, tree highi, tree highj,
- vec<operand_entry_t> *ops,
+ vec<operand_entry *> *ops,
struct range_entry *rangei,
struct range_entry *rangej)
{
static bool
optimize_range_tests_1 (enum tree_code opcode, int first, int length,
- bool optimize_xor, vec<operand_entry_t> *ops,
+ bool optimize_xor, vec<operand_entry *> *ops,
struct range_entry *ranges)
{
int i, j;
static bool
optimize_range_tests_to_bit_test (enum tree_code opcode, int first, int length,
- vec<operand_entry_t> *ops,
+ vec<operand_entry *> *ops,
struct range_entry *ranges)
{
int i, j;
tree high = wide_int_to_tree (TREE_TYPE (lowi),
wi::to_widest (lowi)
+ prec - 1 - wi::clz (mask));
- operand_entry_t oe = (*ops)[ranges[i].idx];
+ operand_entry *oe = (*ops)[ranges[i].idx];
tree op = oe->op;
gimple *stmt = op ? SSA_NAME_DEF_STMT (op)
: last_stmt (BASIC_BLOCK_FOR_FN (cfun, oe->id));
static bool
optimize_range_tests (enum tree_code opcode,
- vec<operand_entry_t> *ops)
+ vec<operand_entry *> *ops)
{
unsigned int length = ops->length (), i, j, first;
- operand_entry_t oe;
+ operand_entry *oe;
struct range_entry *ranges;
bool any_changes = false;
return true and fill in *OPS recursively. */
static bool
-get_ops (tree var, enum tree_code code, vec<operand_entry_t> *ops,
+get_ops (tree var, enum tree_code code, vec<operand_entry *> *ops,
struct loop *loop)
{
gimple *stmt = SSA_NAME_DEF_STMT (var);
&& !get_ops (rhs[i], code, ops, loop)
&& has_single_use (rhs[i]))
{
- operand_entry_t oe = operand_entry_pool.allocate ();
+ operand_entry *oe = operand_entry_pool.allocate ();
oe->op = rhs[i];
oe->rank = code;
stmts. */
static tree
-update_ops (tree var, enum tree_code code, vec<operand_entry_t> ops,
+update_ops (tree var, enum tree_code code, vec<operand_entry *> ops,
unsigned int *pidx, struct loop *loop)
{
gimple *stmt = SSA_NAME_DEF_STMT (var);
basic_block bb;
edge_iterator ei;
edge e;
- auto_vec<operand_entry_t> ops;
+ auto_vec<operand_entry *> ops;
auto_vec<inter_bb_range_test_entry> bbinfo;
bool any_changes = false;
&& has_single_use (rhs))
{
/* Otherwise, push the _234 range test itself. */
- operand_entry_t oe = operand_entry_pool.allocate ();
+ operand_entry *oe = operand_entry_pool.allocate ();
oe->op = rhs;
oe->rank = code;
loop_containing_stmt (stmt))))
{
/* Or push the GIMPLE_COND stmt itself. */
- operand_entry_t oe = operand_entry_pool.allocate ();
+ operand_entry *oe = operand_entry_pool.allocate ();
oe->op = NULL;
oe->rank = (e->flags & EDGE_TRUE_VALUE)
cases, but it is unlikely to be worth it. */
static void
-swap_ops_for_binary_stmt (vec<operand_entry_t> ops,
+swap_ops_for_binary_stmt (vec<operand_entry *> ops,
unsigned int opindex, gimple *stmt)
{
- operand_entry_t oe1, oe2, oe3;
+ operand_entry *oe1, *oe2, *oe3;
oe1 = ops[opindex];
oe2 = ops[opindex + 1];
&& !is_phi_for_stmt (stmt, oe1->op)
&& !is_phi_for_stmt (stmt, oe2->op)))
{
- struct operand_entry temp = *oe3;
+ operand_entry temp = *oe3;
oe3->op = oe1->op;
oe3->rank = oe1->rank;
oe1->op = temp.op;
&& !is_phi_for_stmt (stmt, oe1->op)
&& !is_phi_for_stmt (stmt, oe3->op)))
{
- struct operand_entry temp = *oe2;
+ operand_entry temp = *oe2;
oe2->op = oe1->op;
oe2->rank = oe1->rank;
oe1->op = temp.op;
static tree
rewrite_expr_tree (gimple *stmt, unsigned int opindex,
- vec<operand_entry_t> ops, bool changed)
+ vec<operand_entry *> ops, bool changed)
{
tree rhs1 = gimple_assign_rhs1 (stmt);
tree rhs2 = gimple_assign_rhs2 (stmt);
tree lhs = gimple_assign_lhs (stmt);
- operand_entry_t oe;
+ operand_entry *oe;
/* The final recursion case for this function is that you have
exactly two operations left.
rewrites them one at a time. */
if (opindex + 2 == ops.length ())
{
- operand_entry_t oe1, oe2;
+ operand_entry *oe1, *oe2;
oe1 = ops[opindex];
oe2 = ops[opindex + 1];
static void
rewrite_expr_tree_parallel (gassign *stmt, int width,
- vec<operand_entry_t> ops)
+ vec<operand_entry *> ops)
{
enum tree_code opcode = gimple_assign_rhs_code (stmt);
int op_num = ops.length ();
Place the operands of the expression tree in the vector named OPS. */
static void
-linearize_expr_tree (vec<operand_entry_t> *ops, gimple *stmt,
+linearize_expr_tree (vec<operand_entry *> *ops, gimple *stmt,
bool is_associative, bool set_visited)
{
tree binlhs = gimple_assign_rhs1 (stmt);
}
/* Used for repeated factor analysis. */
-struct repeat_factor_d
+struct repeat_factor
{
/* An SSA name that occurs in a multiply chain. */
tree factor;
tree repr;
};
-typedef struct repeat_factor_d repeat_factor, *repeat_factor_t;
-typedef const struct repeat_factor_d *const_repeat_factor_t;
-
static vec<repeat_factor> repeat_factor_vec;
static int
compare_repeat_factors (const void *x1, const void *x2)
{
- const_repeat_factor_t rf1 = (const_repeat_factor_t) x1;
- const_repeat_factor_t rf2 = (const_repeat_factor_t) x2;
+ const repeat_factor *rf1 = (const repeat_factor *) x1;
+ const repeat_factor *rf2 = (const repeat_factor *) x2;
if (rf1->count != rf2->count)
return rf1->count - rf2->count;
SSA name representing the value of the replacement sequence. */
static tree
-attempt_builtin_powi (gimple *stmt, vec<operand_entry_t> *ops)
+attempt_builtin_powi (gimple *stmt, vec<operand_entry *> *ops)
{
unsigned i, j, vec_len;
int ii;
- operand_entry_t oe;
- repeat_factor_t rf1, rf2;
+ operand_entry *oe;
+ repeat_factor *rf1, *rf2;
repeat_factor rfnew;
tree result = NULL_TREE;
tree target_ssa, iter_result;
if (dump_file && (dump_flags & TDF_DETAILS))
{
unsigned elt;
- repeat_factor_t rf;
+ repeat_factor *rf;
fputs ("Multiplying by cached product ", dump_file);
for (elt = j; elt < vec_len; elt++)
{
if (dump_file && (dump_flags & TDF_DETAILS))
{
unsigned elt;
- repeat_factor_t rf;
+ repeat_factor *rf;
fputs ("Building __builtin_pow call for cached product (",
dump_file);
for (elt = j; elt < vec_len; elt++)
if (dump_file && (dump_flags & TDF_DETAILS))
{
unsigned elt;
- repeat_factor_t rf;
+ repeat_factor *rf;
fputs ("Building __builtin_pow call for (", dump_file);
for (elt = j; elt < vec_len; elt++)
{
if (associative_tree_code (rhs_code))
{
- auto_vec<operand_entry_t> ops;
+ auto_vec<operand_entry *> ops;
tree powi_result = NULL_TREE;
/* There may be no immediate uses left by the time we
reassoc_branch_fixups.release ();
}
-void dump_ops_vector (FILE *file, vec<operand_entry_t> ops);
-void debug_ops_vector (vec<operand_entry_t> ops);
+void dump_ops_vector (FILE *file, vec<operand_entry *> ops);
+void debug_ops_vector (vec<operand_entry *> ops);
/* Dump the operand entry vector OPS to FILE. */
void
-dump_ops_vector (FILE *file, vec<operand_entry_t> ops)
+dump_ops_vector (FILE *file, vec<operand_entry *> ops)
{
- operand_entry_t oe;
+ operand_entry *oe;
unsigned int i;
FOR_EACH_VEC_ELT (ops, i, oe)
/* Dump the operand entry vector OPS to STDERR. */
DEBUG_FUNCTION void
-debug_ops_vector (vec<operand_entry_t> ops)
+debug_ops_vector (vec<operand_entry *> ops)
{
dump_ops_vector (stderr, ops);
}
static int max_stridx;
/* String information record. */
-typedef struct strinfo_struct
+struct strinfo
{
/* String length of this string. */
tree length;
/* A flag for the next maybe_invalidate that this strinfo shouldn't
be invalidated. Always cleared by maybe_invalidate. */
bool dont_invalidate;
-} *strinfo;
+};
/* Pool for allocating strinfo_struct entries. */
-static object_allocator<strinfo_struct> strinfo_pool ("strinfo_struct pool");
+static object_allocator<strinfo> strinfo_pool ("strinfo pool");
/* Vector mapping positive string indexes to strinfo, for the
current basic block. The first pointer in the vector is special,
a basic block pointer to the owner basic_block if shared.
If some other bb wants to modify the vector, the vector needs
to be unshared first, and only the owner bb is supposed to free it. */
-static vec<strinfo, va_heap, vl_embed> *stridx_to_strinfo;
+static vec<strinfo *, va_heap, vl_embed> *stridx_to_strinfo;
/* One OFFSET->IDX mapping. */
struct stridxlist
int stridx;
} laststmt;
-static int get_stridx_plus_constant (strinfo, HOST_WIDE_INT, tree);
+static int get_stridx_plus_constant (strinfo *, HOST_WIDE_INT, tree);
/* Return strinfo vector entry IDX. */
-static inline strinfo
+static inline strinfo *
get_strinfo (int idx)
{
if (vec_safe_length (stridx_to_strinfo) <= (unsigned int) idx)
return 0;
if (ssa_ver_to_stridx[SSA_NAME_VERSION (rhs1)])
{
- strinfo si
+ strinfo *si
= get_strinfo (ssa_ver_to_stridx[SSA_NAME_VERSION (rhs1)]);
if (si
&& si->length
static void
unshare_strinfo_vec (void)
{
- strinfo si;
+ strinfo *si;
unsigned int i = 0;
gcc_assert (strinfo_shared ());
/* Create a new strinfo. */
-static strinfo
+static strinfo *
new_strinfo (tree ptr, int idx, tree length)
{
- strinfo si = strinfo_pool.allocate ();
+ strinfo *si = strinfo_pool.allocate ();
si->length = length;
si->ptr = ptr;
si->stmt = NULL;
/* Decrease strinfo refcount and free it if not referenced anymore. */
static inline void
-free_strinfo (strinfo si)
+free_strinfo (strinfo *si)
{
if (si && --si->refcount == 0)
strinfo_pool.remove (si);
/* Set strinfo in the vector entry IDX to SI. */
static inline void
-set_strinfo (int idx, strinfo si)
+set_strinfo (int idx, strinfo *si)
{
if (vec_safe_length (stridx_to_strinfo) && (*stridx_to_strinfo)[0])
unshare_strinfo_vec ();
/* Return string length, or NULL if it can't be computed. */
static tree
-get_string_length (strinfo si)
+get_string_length (strinfo *si)
{
if (si->length)
return si->length;
static bool
maybe_invalidate (gimple *stmt)
{
- strinfo si;
+ strinfo *si;
unsigned int i;
bool nonempty = false;
if stridx_to_strinfo vector is shared with some other
bbs. */
-static strinfo
-unshare_strinfo (strinfo si)
+static strinfo *
+unshare_strinfo (strinfo *si)
{
- strinfo nsi;
+ strinfo *nsi;
if (si->refcount == 1 && !strinfo_shared ())
return si;
if all strinfos in between belong to the chain, otherwise
NULL. */
-static strinfo
-verify_related_strinfos (strinfo origsi)
+static strinfo *
+verify_related_strinfos (strinfo *origsi)
{
- strinfo si = origsi, psi;
+ strinfo *si = origsi, *psi;
if (origsi->first == 0)
return NULL;
been created. */
static int
-get_stridx_plus_constant (strinfo basesi, HOST_WIDE_INT off, tree ptr)
+get_stridx_plus_constant (strinfo *basesi, HOST_WIDE_INT off, tree ptr)
{
gcc_checking_assert (TREE_CODE (ptr) == SSA_NAME);
return 0;
HOST_WIDE_INT len = tree_to_shwi (basesi->length) - off;
- strinfo si = basesi, chainsi;
+ strinfo *si = basesi, *chainsi;
if (si->first || si->prev || si->next)
si = verify_related_strinfos (basesi);
if (si == NULL
set_strinfo (idx, si);
if (chainsi->next)
{
- strinfo nextsi = unshare_strinfo (get_strinfo (chainsi->next));
+ strinfo *nextsi = unshare_strinfo (get_strinfo (chainsi->next));
si->next = nextsi->idx;
nextsi->prev = idx;
}
to a zero-length string and if possible chain it to a related strinfo
chain whose part is or might be CHAINSI. */
-static strinfo
-zero_length_string (tree ptr, strinfo chainsi)
+static strinfo *
+zero_length_string (tree ptr, strinfo *chainsi)
{
- strinfo si;
+ strinfo *si;
int idx;
if (ssa_ver_to_stridx.length () <= SSA_NAME_VERSION (ptr))
ssa_ver_to_stridx.safe_grow_cleared (num_ssa_names);
but don't adjust ORIGSI). */
static void
-adjust_related_strinfos (location_t loc, strinfo origsi, tree adj)
+adjust_related_strinfos (location_t loc, strinfo *origsi, tree adj)
{
- strinfo si = verify_related_strinfos (origsi);
+ strinfo *si = verify_related_strinfos (origsi);
if (si == NULL)
return;
while (1)
{
- strinfo nsi;
+ strinfo *nsi;
if (si != origsi)
{
strinfo. */
static void
-adjust_last_stmt (strinfo si, gimple *stmt, bool is_strcat)
+adjust_last_stmt (strinfo *si, gimple *stmt, bool is_strcat)
{
tree vuse, callee, len;
struct laststmt_struct last = laststmt;
- strinfo lastsi, firstsi;
+ strinfo *lastsi, *firstsi;
unsigned len_arg_no = 2;
laststmt.stmt = NULL;
return;
while (firstsi != lastsi)
{
- strinfo nextsi;
+ strinfo *nextsi;
if (firstsi->next == 0)
return;
nextsi = get_strinfo (firstsi->next);
idx = get_stridx (src);
if (idx)
{
- strinfo si = NULL;
+ strinfo *si = NULL;
tree rhs;
if (idx < 0)
return;
if (idx)
{
- strinfo si = new_strinfo (src, idx, lhs);
+ strinfo *si = new_strinfo (src, idx, lhs);
set_strinfo (idx, si);
find_equal_ptrs (src, idx);
}
idx = get_stridx (src);
if (idx)
{
- strinfo si = NULL;
+ strinfo *si = NULL;
tree rhs;
if (idx < 0)
tree srcu = fold_convert_loc (loc, size_type_node, src);
tree length = fold_build2_loc (loc, MINUS_EXPR,
size_type_node, lhsu, srcu);
- strinfo si = new_strinfo (src, idx, length);
+ strinfo *si = new_strinfo (src, idx, length);
si->endptr = lhs;
set_strinfo (idx, si);
find_equal_ptrs (src, idx);
tree src, dst, srclen, len, lhs, args, type, fn, oldlen;
bool success;
gimple *stmt = gsi_stmt (*gsi);
- strinfo si, dsi, olddsi, zsi;
+ strinfo *si, *dsi, *olddsi, *zsi;
location_t loc;
bool with_bounds = gimple_call_with_bounds_p (stmt);
if (dsi->length == NULL_TREE)
{
- strinfo chainsi;
+ strinfo *chainsi;
/* If string length of src is unknown, use delayed length
computation. If string lenth of dst will be needed, it
int idx, didx;
tree src, dst, len, lhs, oldlen, newlen;
gimple *stmt = gsi_stmt (*gsi);
- strinfo si, dsi, olddsi;
+ strinfo *si, *dsi, *olddsi;
bool with_bounds = gimple_call_with_bounds_p (stmt);
len = gimple_call_arg (stmt, with_bounds ? 4 : 2);
tree src, dst, srclen, dstlen, len, lhs, args, type, fn, objsz, endptr;
bool success;
gimple *stmt = gsi_stmt (*gsi);
- strinfo si, dsi;
+ strinfo *si, *dsi;
location_t loc;
bool with_bounds = gimple_call_with_bounds_p (stmt);
tree length = NULL_TREE;
if (bcode == BUILT_IN_CALLOC)
length = build_int_cst (size_type_node, 0);
- strinfo si = new_strinfo (lhs, idx, length);
+ strinfo *si = new_strinfo (lhs, idx, length);
if (bcode == BUILT_IN_CALLOC)
si->endptr = lhs;
set_strinfo (idx, si);
int idx1 = get_stridx (ptr);
if (idx1 <= 0)
return true;
- strinfo si1 = get_strinfo (idx1);
+ strinfo *si1 = get_strinfo (idx1);
if (!si1)
return true;
gimple *stmt1 = si1->stmt;
gimple *stmt = gsi_stmt (*gsi);
tree lhs = gimple_assign_lhs (stmt), off;
int idx = get_stridx (gimple_assign_rhs1 (stmt));
- strinfo si, zsi;
+ strinfo *si, *zsi;
if (idx == 0)
return;
handle_char_store (gimple_stmt_iterator *gsi)
{
int idx = -1;
- strinfo si = NULL;
+ strinfo *si = NULL;
gimple *stmt = gsi_stmt (*gsi);
tree ssaname = NULL_TREE, lhs = gimple_assign_lhs (stmt);
stridx_to_strinfo = NULL;
else
{
- stridx_to_strinfo = ((vec<strinfo, va_heap, vl_embed> *) dombb->aux);
+ stridx_to_strinfo = ((vec<strinfo *, va_heap, vl_embed> *) dombb->aux);
if (stridx_to_strinfo)
{
for (gphi_iterator gsi = gsi_start_phis (bb); !gsi_end_p (gsi);
if (!strinfo_shared ())
{
unsigned int i;
- strinfo si;
+ strinfo *si;
for (i = 1;
vec_safe_iterate (stridx_to_strinfo, i, &si);
bb->aux = stridx_to_strinfo;
if (vec_safe_length (stridx_to_strinfo) && !strinfo_shared ())
- (*stridx_to_strinfo)[0] = (strinfo) bb;
+ (*stridx_to_strinfo)[0] = (strinfo *) bb;
}
/* Callback for walk_dominator_tree. Free strinfo vector if it is
{
if (bb->aux)
{
- stridx_to_strinfo = ((vec<strinfo, va_heap, vl_embed> *) bb->aux);
+ stridx_to_strinfo = ((vec<strinfo *, va_heap, vl_embed> *) bb->aux);
if (vec_safe_length (stridx_to_strinfo)
- && (*stridx_to_strinfo)[0] == (strinfo) bb)
+ && (*stridx_to_strinfo)[0] == (strinfo *) bb)
{
unsigned int i;
- strinfo si;
+ strinfo *si;
for (i = 1; vec_safe_iterate (stridx_to_strinfo, i, &si); ++i)
free_strinfo (si);
Additionally, the hash value for the struct is cached in hashval, and
in_worklist indicates whether it's currently part of worklist. */
-struct same_succ_def : pointer_hash <same_succ_def>
+struct same_succ : pointer_hash <same_succ>
{
/* The bbs that have the same successor bbs. */
bitmap bbs;
hashval_t hashval;
/* hash_table support. */
- static inline hashval_t hash (const same_succ_def *);
- static int equal (const same_succ_def *, const same_succ_def *);
- static void remove (same_succ_def *);
+ static inline hashval_t hash (const same_succ *);
+ static int equal (const same_succ *, const same_succ *);
+ static void remove (same_succ *);
};
-typedef struct same_succ_def *same_succ;
-typedef const struct same_succ_def *const_same_succ;
/* hash routine for hash_table support, returns hashval of E. */
inline hashval_t
-same_succ_def::hash (const same_succ_def *e)
+same_succ::hash (const same_succ *e)
{
return e->hashval;
}
/* A group of bbs where 1 bb from bbs can replace the other bbs. */
-struct bb_cluster_def
+struct bb_cluster
{
/* The bbs in the cluster. */
bitmap bbs;
/* The bb to replace the cluster with. */
basic_block rep_bb;
};
-typedef struct bb_cluster_def *bb_cluster;
-typedef const struct bb_cluster_def *const_bb_cluster;
/* Per bb-info. */
/* The number of non-debug statements in the bb. */
int size;
/* The same_succ that this bb is a member of. */
- same_succ bb_same_succ;
+ same_succ *bb_same_succ;
/* The cluster that this bb is a member of. */
- bb_cluster cluster;
+ bb_cluster *cluster;
/* The vop state at the exit of a bb. This is shortlived data, used to
communicate data between update_block_by and update_vuses. */
tree vop_at_exit;
/* Prints E to FILE. */
static void
-same_succ_print (FILE *file, const same_succ e)
+same_succ_print (FILE *file, const same_succ *e)
{
unsigned int i;
bitmap_print (file, e->bbs, "bbs:", "\n");
/* Prints same_succ VE to VFILE. */
inline int
-ssa_same_succ_print_traverse (same_succ *pe, FILE *file)
+ssa_same_succ_print_traverse (same_succ **pe, FILE *file)
{
- const same_succ e = *pe;
+ const same_succ *e = *pe;
same_succ_print (file, e);
return 1;
}
/* Calculates hash value for same_succ VE. */
static hashval_t
-same_succ_hash (const_same_succ e)
+same_succ_hash (const same_succ *e)
{
inchash::hash hstate (bitmap_hash (e->succs));
int flags;
the other edge flags. */
static bool
-inverse_flags (const_same_succ e1, const_same_succ e2)
+inverse_flags (const same_succ *e1, const same_succ *e2)
{
int f1a, f1b, f2a, f2b;
int mask = ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
/* Compares SAME_SUCCs E1 and E2. */
int
-same_succ_def::equal (const same_succ_def *e1, const same_succ_def *e2)
+same_succ::equal (const same_succ *e1, const same_succ *e2)
{
unsigned int i, first1, first2;
gimple_stmt_iterator gsi1, gsi2;
/* Alloc and init a new SAME_SUCC. */
-static same_succ
+static same_succ *
same_succ_alloc (void)
{
- same_succ same = XNEW (struct same_succ_def);
+ same_succ *same = XNEW (struct same_succ);
same->bbs = BITMAP_ALLOC (NULL);
same->succs = BITMAP_ALLOC (NULL);
/* Delete same_succ E. */
void
-same_succ_def::remove (same_succ e)
+same_succ::remove (same_succ *e)
{
BITMAP_FREE (e->bbs);
BITMAP_FREE (e->succs);
/* Reset same_succ SAME. */
static void
-same_succ_reset (same_succ same)
+same_succ_reset (same_succ *same)
{
bitmap_clear (same->bbs);
bitmap_clear (same->succs);
same->succ_flags.truncate (0);
}
-static hash_table<same_succ_def> *same_succ_htab;
+static hash_table<same_succ> *same_succ_htab;
/* Array that is used to store the edge flags for a successor. */
/* Vector of bbs to process. */
-static vec<same_succ> worklist;
+static vec<same_succ *> worklist;
/* Prints worklist to FILE. */
/* Adds SAME to worklist. */
static void
-add_to_worklist (same_succ same)
+add_to_worklist (same_succ *same)
{
if (same->in_worklist)
return;
/* Add BB to same_succ_htab. */
static void
-find_same_succ_bb (basic_block bb, same_succ *same_p)
+find_same_succ_bb (basic_block bb, same_succ **same_p)
{
unsigned int j;
bitmap_iterator bj;
- same_succ same = *same_p;
- same_succ *slot;
+ same_succ *same = *same_p;
+ same_succ **slot;
edge_iterator ei;
edge e;
static void
find_same_succ (void)
{
- same_succ same = same_succ_alloc ();
+ same_succ *same = same_succ_alloc ();
basic_block bb;
FOR_EACH_BB_FN (bb, cfun)
same = same_succ_alloc ();
}
- same_succ_def::remove (same);
+ same_succ::remove (same);
}
/* Initializes worklist administration. */
init_worklist (void)
{
alloc_aux_for_blocks (sizeof (struct aux_bb_info));
- same_succ_htab = new hash_table<same_succ_def> (n_basic_blocks_for_fn (cfun));
+ same_succ_htab = new hash_table<same_succ> (n_basic_blocks_for_fn (cfun));
same_succ_edge_flags = XCNEWVEC (int, last_basic_block_for_fn (cfun));
deleted_bbs = BITMAP_ALLOC (NULL);
deleted_bb_preds = BITMAP_ALLOC (NULL);
static void
same_succ_flush_bb (basic_block bb)
{
- same_succ same = BB_SAME_SUCC (bb);
+ same_succ *same = BB_SAME_SUCC (bb);
BB_SAME_SUCC (bb) = NULL;
if (bitmap_single_bit_set_p (same->bbs))
same_succ_htab->remove_elt_with_hash (same, same->hashval);
unsigned int i;
bitmap_iterator bi;
basic_block bb;
- same_succ same;
+ same_succ *same;
bitmap_and_compl_into (deleted_bb_preds, deleted_bbs);
bitmap_clear (deleted_bbs);
if (same == NULL)
same = same_succ_alloc ();
}
- same_succ_def::remove (same);
+ same_succ::remove (same);
bitmap_clear (deleted_bb_preds);
}
/* Prints cluster C to FILE. */
static void
-print_cluster (FILE *file, bb_cluster c)
+print_cluster (FILE *file, bb_cluster *c)
{
if (c == NULL)
return;
/* Prints cluster C to stderr. */
-extern void debug_cluster (bb_cluster);
+extern void debug_cluster (bb_cluster *);
DEBUG_FUNCTION void
-debug_cluster (bb_cluster c)
+debug_cluster (bb_cluster *c)
{
print_cluster (stderr, c);
}
/* Update C->rep_bb, given that BB is added to the cluster. */
static void
-update_rep_bb (bb_cluster c, basic_block bb)
+update_rep_bb (bb_cluster *c, basic_block bb)
{
/* Initial. */
if (c->rep_bb == NULL)
/* Add BB to cluster C. Sets BB in C->bbs, and preds of BB in C->preds. */
static void
-add_bb_to_cluster (bb_cluster c, basic_block bb)
+add_bb_to_cluster (bb_cluster *c, basic_block bb)
{
edge e;
edge_iterator ei;
/* Allocate and init new cluster. */
-static bb_cluster
+static bb_cluster *
new_cluster (void)
{
- bb_cluster c;
- c = XCNEW (struct bb_cluster_def);
+ bb_cluster *c;
+ c = XCNEW (bb_cluster);
c->bbs = BITMAP_ALLOC (NULL);
c->preds = BITMAP_ALLOC (NULL);
c->rep_bb = NULL;
/* Delete clusters. */
static void
-delete_cluster (bb_cluster c)
+delete_cluster (bb_cluster *c)
{
if (c == NULL)
return;
/* Array that contains all clusters. */
-static vec<bb_cluster> all_clusters;
+static vec<bb_cluster *> all_clusters;
/* Allocate all cluster vectors. */
/* Merge cluster C2 into C1. */
static void
-merge_clusters (bb_cluster c1, bb_cluster c2)
+merge_clusters (bb_cluster *c1, bb_cluster *c2)
{
bitmap_ior_into (c1->bbs, c2->bbs);
bitmap_ior_into (c1->preds, c2->preds);
set_cluster (basic_block bb1, basic_block bb2)
{
basic_block merge_bb, other_bb;
- bb_cluster merge, old, c;
+ bb_cluster *merge, *old, *c;
if (BB_CLUSTER (bb1) == NULL && BB_CLUSTER (bb2) == NULL)
{
gimple_bb (s2) are members of SAME_SUCC. */
static bool
-gimple_equal_p (same_succ same_succ, gimple *s1, gimple *s2)
+gimple_equal_p (same_succ *same_succ, gimple *s1, gimple *s2)
{
unsigned int i;
tree lhs1, lhs2;
clusters them. */
static void
-find_duplicate (same_succ same_succ, basic_block bb1, basic_block bb2)
+find_duplicate (same_succ *same_succ, basic_block bb1, basic_block bb2)
{
gimple_stmt_iterator gsi1 = gsi_last_nondebug_bb (bb1);
gimple_stmt_iterator gsi2 = gsi_last_nondebug_bb (bb2);
phi alternatives for BB1 and BB2 are equal. */
static bool
-same_phi_alternatives (same_succ same_succ, basic_block bb1, basic_block bb2)
+same_phi_alternatives (same_succ *same_succ, basic_block bb1, basic_block bb2)
{
unsigned int s;
bitmap_iterator bs;
/* Within SAME_SUCC->bbs, find clusters of bbs which can be merged. */
static void
-find_clusters_1 (same_succ same_succ)
+find_clusters_1 (same_succ *same_succ)
{
basic_block bb1, bb2;
unsigned int i, j;
static void
find_clusters (void)
{
- same_succ same;
+ same_succ *same;
while (!worklist.is_empty ())
{
apply_clusters (void)
{
basic_block bb1, bb2;
- bb_cluster c;
+ bb_cluster *c;
unsigned int i, j;
bitmap_iterator bj;
int nr_bbs_removed = 0;
/* Range of values that can be associated with an SSA_NAME after VRP
has executed. */
-struct value_range_d
+struct value_range
{
/* Lattice value represented by this range. */
enum value_range_type type;
bitmap equiv;
};
-typedef struct value_range_d value_range_t;
-
#define VR_INITIALIZER { VR_UNDEFINED, NULL_TREE, NULL_TREE, NULL }
/* Set of SSA names found live during the RPO traversal of the function
/* Local functions. */
static int compare_values (tree val1, tree val2);
static int compare_values_warnv (tree val1, tree val2, bool *);
-static void vrp_meet (value_range_t *, value_range_t *);
-static void vrp_intersect_ranges (value_range_t *, value_range_t *);
+static void vrp_meet (value_range *, value_range *);
+static void vrp_intersect_ranges (value_range *, value_range *);
static tree vrp_evaluate_conditional_warnv_with_ops (enum tree_code,
tree, tree, bool, bool *,
bool *);
/* Value range array. After propagation, VR_VALUE[I] holds the range
of values that SSA name N_I may take. */
static unsigned num_vr_values;
-static value_range_t **vr_value;
+static value_range **vr_value;
static bool values_propagated;
/* For a PHI node which sets SSA name N_I, VR_COUNTS[I] holds the
/* Set value range VR to VR_UNDEFINED. */
static inline void
-set_value_range_to_undefined (value_range_t *vr)
+set_value_range_to_undefined (value_range *vr)
{
vr->type = VR_UNDEFINED;
vr->min = vr->max = NULL_TREE;
/* Set value range VR to VR_VARYING. */
static inline void
-set_value_range_to_varying (value_range_t *vr)
+set_value_range_to_varying (value_range *vr)
{
vr->type = VR_VARYING;
vr->min = vr->max = NULL_TREE;
/* Set value range VR to {T, MIN, MAX, EQUIV}. */
static void
-set_value_range (value_range_t *vr, enum value_range_type t, tree min,
+set_value_range (value_range *vr, enum value_range_type t, tree min,
tree max, bitmap equiv)
{
#if defined ENABLE_CHECKING
extract ranges from var + CST op limit. */
static void
-set_and_canonicalize_value_range (value_range_t *vr, enum value_range_type t,
+set_and_canonicalize_value_range (value_range *vr, enum value_range_type t,
tree min, tree max, bitmap equiv)
{
/* Use the canonical setters for VR_UNDEFINED and VR_VARYING. */
/* Copy value range FROM into value range TO. */
static inline void
-copy_value_range (value_range_t *to, value_range_t *from)
+copy_value_range (value_range *to, value_range *from)
{
set_value_range (to, from->type, from->min, from->max, from->equiv);
}
infinity when we shouldn't. */
static inline void
-set_value_range_to_value (value_range_t *vr, tree val, bitmap equiv)
+set_value_range_to_value (value_range *vr, tree val, bitmap equiv)
{
gcc_assert (is_gimple_min_invariant (val));
if (TREE_OVERFLOW_P (val))
overflow does not occur. */
static inline void
-set_value_range_to_nonnegative (value_range_t *vr, tree type,
+set_value_range_to_nonnegative (value_range *vr, tree type,
bool overflow_infinity)
{
tree zero;
/* Set value range VR to a non-NULL range of type TYPE. */
static inline void
-set_value_range_to_nonnull (value_range_t *vr, tree type)
+set_value_range_to_nonnull (value_range *vr, tree type)
{
tree zero = build_int_cst (type, 0);
set_value_range (vr, VR_ANTI_RANGE, zero, zero, vr->equiv);
/* Set value range VR to a NULL range of type TYPE. */
static inline void
-set_value_range_to_null (value_range_t *vr, tree type)
+set_value_range_to_null (value_range *vr, tree type)
{
set_value_range_to_value (vr, build_int_cst (type, 0), vr->equiv);
}
/* Set value range VR to a range of a truthvalue of type TYPE. */
static inline void
-set_value_range_to_truthvalue (value_range_t *vr, tree type)
+set_value_range_to_truthvalue (value_range *vr, tree type)
{
if (TYPE_PRECISION (type) == 1)
set_value_range_to_varying (vr);
abs (min) >= abs (max), set VR to [-min, min]. */
static void
-abs_extent_range (value_range_t *vr, tree min, tree max)
+abs_extent_range (value_range *vr, tree min, tree max)
{
int cmp;
If we have no values ranges recorded (ie, VRP is not running), then
return NULL. Otherwise create an empty range if none existed for VAR. */
-static value_range_t *
+static value_range *
get_value_range (const_tree var)
{
- static const struct value_range_d vr_const_varying
+ static const value_range vr_const_varying
= { VR_VARYING, NULL_TREE, NULL_TREE, NULL };
- value_range_t *vr;
+ value_range *vr;
tree sym;
unsigned ver = SSA_NAME_VERSION (var);
We should get here at most from the substitute-and-fold stage which
will never try to change values. */
if (ver >= num_vr_values)
- return CONST_CAST (value_range_t *, &vr_const_varying);
+ return CONST_CAST (value_range *, &vr_const_varying);
vr = vr_value[ver];
if (vr)
/* After propagation finished do not allocate new value-ranges. */
if (values_propagated)
- return CONST_CAST (value_range_t *, &vr_const_varying);
+ return CONST_CAST (value_range *, &vr_const_varying);
/* Create a default value range. */
- vr_value[ver] = vr = XCNEW (value_range_t);
+ vr_value[ver] = vr = XCNEW (value_range);
/* Defer allocating the equivalence set. */
vr->equiv = NULL;
is the range object associated with another SSA name. */
static inline bool
-update_value_range (const_tree var, value_range_t *new_vr)
+update_value_range (const_tree var, value_range *new_vr)
{
- value_range_t *old_vr;
+ value_range *old_vr;
bool is_new;
/* If there is a value-range on the SSA name from earlier analysis
value_range_type rtype = get_range_info (var, &min, &max);
if (rtype == VR_RANGE || rtype == VR_ANTI_RANGE)
{
- value_range_d nr;
+ value_range nr;
nr.type = rtype;
nr.min = wide_int_to_tree (TREE_TYPE (var), min);
nr.max = wide_int_to_tree (TREE_TYPE (var), max);
add_equivalence (bitmap *equiv, const_tree var)
{
unsigned ver = SSA_NAME_VERSION (var);
- value_range_t *vr = vr_value[ver];
+ value_range *vr = vr_value[ver];
if (*equiv == NULL)
*equiv = BITMAP_ALLOC (NULL);
/* Return true if VR is ~[0, 0]. */
static inline bool
-range_is_nonnull (value_range_t *vr)
+range_is_nonnull (value_range *vr)
{
return vr->type == VR_ANTI_RANGE
&& integer_zerop (vr->min)
/* Return true if VR is [0, 0]. */
static inline bool
-range_is_null (value_range_t *vr)
+range_is_null (value_range *vr)
{
return vr->type == VR_RANGE
&& integer_zerop (vr->min)
a singleton. */
static inline bool
-range_int_cst_p (value_range_t *vr)
+range_int_cst_p (value_range *vr)
{
return (vr->type == VR_RANGE
&& TREE_CODE (vr->max) == INTEGER_CST
/* Return true if VR is a INTEGER_CST singleton. */
static inline bool
-range_int_cst_singleton_p (value_range_t *vr)
+range_int_cst_singleton_p (value_range *vr)
{
return (range_int_cst_p (vr)
&& !is_overflow_infinity (vr->min)
/* Return true if value range VR involves at least one symbol. */
static inline bool
-symbolic_range_p (value_range_t *vr)
+symbolic_range_p (value_range *vr)
{
return (!is_gimple_min_invariant (vr->min)
|| !is_gimple_min_invariant (vr->max));
/* Return true if value range VR involves exactly one symbol SYM. */
static bool
-symbolic_range_based_on_p (value_range_t *vr, const_tree sym)
+symbolic_range_based_on_p (value_range *vr, const_tree sym)
{
bool neg, min_has_symbol, max_has_symbol;
tree inv;
/* Return true if value range VR uses an overflow infinity. */
static inline bool
-overflow_infinity_range_p (value_range_t *vr)
+overflow_infinity_range_p (value_range *vr)
{
return (vr->type == VR_RANGE
&& (is_overflow_infinity (vr->min)
uses an overflow infinity. */
static bool
-usable_range_p (value_range_t *vr, bool *strict_overflow_p)
+usable_range_p (value_range *vr, bool *strict_overflow_p)
{
gcc_assert (vr->type == VR_RANGE);
if (is_overflow_infinity (vr->min))
&& TREE_CODE (base) == MEM_REF
&& TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME)
{
- value_range_t *vr = get_value_range (TREE_OPERAND (base, 0));
+ value_range *vr = get_value_range (TREE_OPERAND (base, 0));
if (range_is_nonnull (vr))
return true;
}
*/
static inline bool
-value_ranges_intersect_p (value_range_t *vr0, value_range_t *vr1)
+value_ranges_intersect_p (value_range *vr0, value_range *vr1)
{
/* The value ranges do not intersect if the maximum of the first range is
less than the minimum of the second range or vice versa.
/* Return true if *VR is know to only contain nonnegative values. */
static inline bool
-value_range_nonnegative_p (value_range_t *vr)
+value_range_nonnegative_p (value_range *vr)
{
/* Testing for VR_ANTI_RANGE is not useful here as any anti-range
which would return a useful value should be encoded as a
otherwise return NULL_TREE. */
static tree
-value_range_constant_singleton (value_range_t *vr)
+value_range_constant_singleton (value_range *vr)
{
if (vr->type == VR_RANGE
&& operand_equal_p (vr->min, vr->max, 0)
static bool
op_with_boolean_value_range_p (tree op)
{
- value_range_t *vr;
+ value_range *vr;
if (TYPE_PRECISION (TREE_TYPE (op)) == 1)
return true;
it in *VR_P. */
static void
-extract_range_from_assert (value_range_t *vr_p, tree expr)
+extract_range_from_assert (value_range *vr_p, tree expr)
{
tree var, cond, limit, min, max, type;
- value_range_t *limit_vr;
+ value_range *limit_vr;
enum tree_code cond_code;
var = ASSERT_EXPR_VAR (expr);
always false. */
static void
-extract_range_from_ssa_name (value_range_t *vr, tree var)
+extract_range_from_ssa_name (value_range *vr, tree var)
{
- value_range_t *var_vr = get_value_range (var);
+ value_range *var_vr = get_value_range (var);
if (var_vr->type != VR_VARYING)
copy_value_range (vr, var_vr);
static bool
zero_nonzero_bits_from_vr (const tree expr_type,
- value_range_t *vr,
+ value_range *vr,
wide_int *may_be_nonzero,
wide_int *must_be_nonzero)
{
*VR1 will be VR_UNDEFINED. */
static bool
-ranges_from_anti_range (value_range_t *ar,
- value_range_t *vr0, value_range_t *vr1)
+ranges_from_anti_range (value_range *ar,
+ value_range *vr0, value_range *vr1)
{
tree type = TREE_TYPE (ar->min);
*VR0 CODE *VR1. */
static void
-extract_range_from_multiplicative_op_1 (value_range_t *vr,
+extract_range_from_multiplicative_op_1 (value_range *vr,
enum tree_code code,
- value_range_t *vr0, value_range_t *vr1)
+ value_range *vr0, value_range *vr1)
{
enum value_range_type type;
tree val[4];
type EXPR_TYPE. The resulting range is stored in *VR. */
static void
-extract_range_from_binary_expr_1 (value_range_t *vr,
+extract_range_from_binary_expr_1 (value_range *vr,
enum tree_code code, tree expr_type,
- value_range_t *vr0_, value_range_t *vr1_)
+ value_range *vr0_, value_range *vr1_)
{
- value_range_t vr0 = *vr0_, vr1 = *vr1_;
- value_range_t vrtem0 = VR_INITIALIZER, vrtem1 = VR_INITIALIZER;
+ value_range vr0 = *vr0_, vr1 = *vr1_;
+ value_range vrtem0 = VR_INITIALIZER, vrtem1 = VR_INITIALIZER;
enum value_range_type type;
tree min = NULL_TREE, max = NULL_TREE;
int cmp;
extract_range_from_binary_expr_1 (vr, code, expr_type, &vrtem0, vr1_);
if (vrtem1.type != VR_UNDEFINED)
{
- value_range_t vrres = VR_INITIALIZER;
+ value_range vrres = VR_INITIALIZER;
extract_range_from_binary_expr_1 (&vrres, code, expr_type,
&vrtem1, vr1_);
vrp_meet (vr, &vrres);
extract_range_from_binary_expr_1 (vr, code, expr_type, vr0_, &vrtem0);
if (vrtem1.type != VR_UNDEFINED)
{
- value_range_t vrres = VR_INITIALIZER;
+ value_range vrres = VR_INITIALIZER;
extract_range_from_binary_expr_1 (&vrres, code, expr_type,
vr0_, &vrtem1);
vrp_meet (vr, &vrres);
&& range_int_cst_singleton_p (&vr1))
{
bool saved_flag_wrapv;
- value_range_t vr1p = VR_INITIALIZER;
+ value_range vr1p = VR_INITIALIZER;
vr1p.type = VR_RANGE;
vr1p.min = (wide_int_to_tree
(expr_type,
The resulting range is stored in *VR. */
static void
-extract_range_from_binary_expr (value_range_t *vr,
+extract_range_from_binary_expr (value_range *vr,
enum tree_code code,
tree expr_type, tree op0, tree op1)
{
- value_range_t vr0 = VR_INITIALIZER;
- value_range_t vr1 = VR_INITIALIZER;
+ value_range vr0 = VR_INITIALIZER;
+ value_range vr1 = VR_INITIALIZER;
/* Get value ranges for each operand. For constant operands, create
a new value range with the operand to simplify processing. */
&& symbolic_range_based_on_p (&vr0, op1))
{
const bool minus_p = (code == MINUS_EXPR);
- value_range_t n_vr1 = VR_INITIALIZER;
+ value_range n_vr1 = VR_INITIALIZER;
/* Try with VR0 and [-INF, OP1]. */
if (is_gimple_min_invariant (minus_p ? vr0.max : vr0.min))
&& symbolic_range_based_on_p (&vr1, op0))
{
const bool minus_p = (code == MINUS_EXPR);
- value_range_t n_vr0 = VR_INITIALIZER;
+ value_range n_vr0 = VR_INITIALIZER;
/* Try with [-INF, OP0] and VR1. */
if (is_gimple_min_invariant (minus_p ? vr1.max : vr1.min))
The resulting range is stored in *VR. */
static void
-extract_range_from_unary_expr_1 (value_range_t *vr,
+extract_range_from_unary_expr_1 (value_range *vr,
enum tree_code code, tree type,
- value_range_t *vr0_, tree op0_type)
+ value_range *vr0_, tree op0_type)
{
- value_range_t vr0 = *vr0_, vrtem0 = VR_INITIALIZER, vrtem1 = VR_INITIALIZER;
+ value_range vr0 = *vr0_, vrtem0 = VR_INITIALIZER, vrtem1 = VR_INITIALIZER;
/* VRP only operates on integral and pointer types. */
if (!(INTEGRAL_TYPE_P (op0_type)
{
/* -X is simply 0 - X, so re-use existing code that also handles
anti-ranges fine. */
- value_range_t zero = VR_INITIALIZER;
+ value_range zero = VR_INITIALIZER;
set_value_range_to_value (&zero, build_int_cst (type, 0), NULL);
extract_range_from_binary_expr_1 (vr, MINUS_EXPR, type, &zero, &vr0);
return;
{
/* ~X is simply -1 - X, so re-use existing code that also handles
anti-ranges fine. */
- value_range_t minusone = VR_INITIALIZER;
+ value_range minusone = VR_INITIALIZER;
set_value_range_to_value (&minusone, build_int_cst (type, -1), NULL);
extract_range_from_binary_expr_1 (vr, MINUS_EXPR,
type, &minusone, &vr0);
extract_range_from_unary_expr_1 (vr, code, type, &vrtem0, op0_type);
if (vrtem1.type != VR_UNDEFINED)
{
- value_range_t vrres = VR_INITIALIZER;
+ value_range vrres = VR_INITIALIZER;
extract_range_from_unary_expr_1 (&vrres, code, type,
&vrtem1, op0_type);
vrp_meet (vr, &vrres);
The resulting range is stored in *VR. */
static void
-extract_range_from_unary_expr (value_range_t *vr, enum tree_code code,
+extract_range_from_unary_expr (value_range *vr, enum tree_code code,
tree type, tree op0)
{
- value_range_t vr0 = VR_INITIALIZER;
+ value_range vr0 = VR_INITIALIZER;
/* Get value ranges for the operand. For constant operands, create
a new value range with the operand to simplify processing. */
the ranges of each of its operands and the expression code. */
static void
-extract_range_from_cond_expr (value_range_t *vr, gassign *stmt)
+extract_range_from_cond_expr (value_range *vr, gassign *stmt)
{
tree op0, op1;
- value_range_t vr0 = VR_INITIALIZER;
- value_range_t vr1 = VR_INITIALIZER;
+ value_range vr0 = VR_INITIALIZER;
+ value_range vr1 = VR_INITIALIZER;
/* Get value ranges for each operand. For constant operands, create
a new value range with the operand to simplify processing. */
on the range of its operand and the expression code. */
static void
-extract_range_from_comparison (value_range_t *vr, enum tree_code code,
+extract_range_from_comparison (value_range *vr, enum tree_code code,
tree type, tree op0, tree op1)
{
bool sop = false;
check_for_binary_op_overflow (enum tree_code subcode, tree type,
tree op0, tree op1, bool *ovf)
{
- value_range_t vr0 = VR_INITIALIZER;
- value_range_t vr1 = VR_INITIALIZER;
+ value_range vr0 = VR_INITIALIZER;
+ value_range vr1 = VR_INITIALIZER;
if (TREE_CODE (op0) == SSA_NAME)
vr0 = *get_value_range (op0);
else if (TREE_CODE (op0) == INTEGER_CST)
Store the result in *VR */
static void
-extract_range_basic (value_range_t *vr, gimple *stmt)
+extract_range_basic (value_range *vr, gimple *stmt)
{
bool sop = false;
tree type = gimple_expr_type (stmt);
maxi = prec;
if (TREE_CODE (arg) == SSA_NAME)
{
- value_range_t *vr0 = get_value_range (arg);
+ value_range *vr0 = get_value_range (arg);
/* If arg is non-zero, then ffs or popcount
are non-zero. */
if (((vr0->type == VR_RANGE
mini = -2;
if (TREE_CODE (arg) == SSA_NAME)
{
- value_range_t *vr0 = get_value_range (arg);
+ value_range *vr0 = get_value_range (arg);
/* From clz of VR_RANGE minimum we can compute
result maximum. */
if (vr0->type == VR_RANGE
}
if (TREE_CODE (arg) == SSA_NAME)
{
- value_range_t *vr0 = get_value_range (arg);
+ value_range *vr0 = get_value_range (arg);
/* If arg is non-zero, then use [0, prec - 1]. */
if (((vr0->type == VR_RANGE
&& integer_nonzerop (vr0->min))
}
else
{
- value_range_t vr0 = VR_INITIALIZER;
- value_range_t vr1 = VR_INITIALIZER;
+ value_range vr0 = VR_INITIALIZER;
+ value_range vr1 = VR_INITIALIZER;
bool saved_flag_wrapv = flag_wrapv;
/* Pretend the arithmetics is wrapping. If there is
any overflow, IMAGPART_EXPR will be set. */
in *VR. */
static void
-extract_range_from_assignment (value_range_t *vr, gassign *stmt)
+extract_range_from_assignment (value_range *vr, gassign *stmt)
{
enum tree_code code = gimple_assign_rhs_code (stmt);
for VAR. If so, update VR with the new limits. */
static void
-adjust_range_with_scev (value_range_t *vr, struct loop *loop,
+adjust_range_with_scev (value_range *vr, struct loop *loop,
gimple *stmt, tree var)
{
tree init, step, chrec, tmin, tmax, min, max, type, tem;
the number of latch executions is the correct thing to use. */
if (max_loop_iterations (loop, &nit))
{
- value_range_t maxvr = VR_INITIALIZER;
+ value_range maxvr = VR_INITIALIZER;
signop sgn = TYPE_SIGN (TREE_TYPE (step));
bool overflow;
static tree
-compare_ranges (enum tree_code comp, value_range_t *vr0, value_range_t *vr1,
+compare_ranges (enum tree_code comp, value_range *vr0, value_range *vr1,
bool *strict_overflow_p)
{
/* VARYING or UNDEFINED ranges cannot be compared. */
if (vr0->type == VR_RANGE)
{
/* To simplify processing, make VR0 the anti-range. */
- value_range_t *tmp = vr0;
+ value_range *tmp = vr0;
vr0 = vr1;
vr1 = tmp;
}
infinity was used in the test. */
static tree
-compare_range_with_value (enum tree_code comp, value_range_t *vr, tree val,
+compare_range_with_value (enum tree_code comp, value_range *vr, tree val,
bool *strict_overflow_p)
{
if (vr->type == VR_VARYING || vr->type == VR_UNDEFINED)
/* Debugging dumps. */
-void dump_value_range (FILE *, value_range_t *);
-void debug_value_range (value_range_t *);
+void dump_value_range (FILE *, value_range *);
+void debug_value_range (value_range *);
void dump_all_value_ranges (FILE *);
void debug_all_value_ranges (void);
void dump_vr_equiv (FILE *, bitmap);
/* Dump value range VR to FILE. */
void
-dump_value_range (FILE *file, value_range_t *vr)
+dump_value_range (FILE *file, value_range *vr)
{
if (vr == NULL)
fprintf (file, "[]");
/* Dump value range VR to stderr. */
DEBUG_FUNCTION void
-debug_value_range (value_range_t *vr)
+debug_value_range (value_range *vr)
{
dump_value_range (stderr, vr);
fprintf (stderr, "\n");
static void
check_array_ref (location_t location, tree ref, bool ignore_off_by_one)
{
- value_range_t* vr = NULL;
+ value_range *vr = NULL;
tree low_sub, up_sub;
tree low_bound, up_bound, up_bound_p1;
tree base;
values_propagated = false;
num_vr_values = num_ssa_names;
- vr_value = XCNEWVEC (value_range_t *, num_vr_values);
+ vr_value = XCNEWVEC (value_range *, num_vr_values);
vr_phi_edge_counts = XCNEWVEC (int, num_ssa_names);
FOR_EACH_BB_FN (bb, cfun)
{
if (TREE_CODE (name) == SSA_NAME)
{
- value_range_t *vr = get_value_range (name);
+ value_range *vr = get_value_range (name);
if (vr->type == VR_RANGE
&& (vr->min == vr->max
|| operand_equal_p (vr->min, vr->max, 0)))
if (!gimple_nop_p (def_stmt)
&& prop_simulate_again_p (def_stmt))
return NULL_TREE;
- value_range_t *vr = get_value_range (name);
+ value_range *vr = get_value_range (name);
if (range_int_cst_singleton_p (vr))
return vr->min;
}
&& TYPE_MAX_VALUE (TREE_TYPE (lhs)))
|| POINTER_TYPE_P (TREE_TYPE (lhs))))
{
- value_range_t new_vr = VR_INITIALIZER;
+ value_range new_vr = VR_INITIALIZER;
/* Try folding the statement to a constant first. */
tree tem = gimple_fold_stmt_to_constant_1 (stmt, vrp_valueize,
SSA_PROP_NOT_INTERESTING. If there are no
{REAL,IMAG}PART_EXPR uses at all,
return SSA_PROP_VARYING. */
- value_range_t new_vr = VR_INITIALIZER;
+ value_range new_vr = VR_INITIALIZER;
extract_range_basic (&new_vr, use_stmt);
- value_range_t *old_vr = get_value_range (use_lhs);
+ value_range *old_vr = get_value_range (use_lhs);
if (old_vr->type != new_vr.type
|| !vrp_operand_equal_p (old_vr->min, new_vr.min)
|| !vrp_operand_equal_p (old_vr->max, new_vr.max)
or a symbolic range containing the SSA_NAME only if the value range
is varying or undefined. */
-static inline value_range_t
+static inline value_range
get_vr_for_comparison (int i)
{
- value_range_t vr = *get_value_range (ssa_name (i));
+ value_range vr = *get_value_range (ssa_name (i));
/* If name N_i does not have a valid range, use N_i as its own
range. This allows us to compare against names that may
tree retval, t;
int used_strict_overflow;
bool sop;
- value_range_t equiv_vr;
+ value_range equiv_vr;
/* Get the set of equivalences for VAR. */
e = get_value_range (var)->equiv;
of the loop just to check N1 and N2 ranges. */
EXECUTE_IF_SET_IN_BITMAP (e1, 0, i1, bi1)
{
- value_range_t vr1 = get_vr_for_comparison (i1);
+ value_range vr1 = get_vr_for_comparison (i1);
t = retval = NULL_TREE;
EXECUTE_IF_SET_IN_BITMAP (e2, 0, i2, bi2)
{
bool sop = false;
- value_range_t vr2 = get_vr_for_comparison (i2);
+ value_range vr2 = get_vr_for_comparison (i2);
t = compare_ranges (comp, &vr1, &vr2, &sop);
if (t)
tree op0, tree op1,
bool * strict_overflow_p)
{
- value_range_t *vr0, *vr1;
+ value_range *vr0, *vr1;
vr0 = (TREE_CODE (op0) == SSA_NAME) ? get_value_range (op0) : NULL;
vr1 = (TREE_CODE (op1) == SSA_NAME) ? get_value_range (op1) : NULL;
always fold regardless of the value of OP0. If -Wtype-limits
was specified, emit a warning. */
tree type = TREE_TYPE (op0);
- value_range_t *vr0 = get_value_range (op0);
+ value_range *vr0 = get_value_range (op0);
if (vr0->type == VR_RANGE
&& INTEGRAL_TYPE_P (type)
Returns true if the default label is not needed. */
static bool
-find_case_label_ranges (gswitch *stmt, value_range_t *vr, size_t *min_idx1,
+find_case_label_ranges (gswitch *stmt, value_range *vr, size_t *min_idx1,
size_t *max_idx1, size_t *min_idx2,
size_t *max_idx2)
{
vrp_visit_switch_stmt (gswitch *stmt, edge *taken_edge_p)
{
tree op, val;
- value_range_t *vr;
+ value_range *vr;
size_t i = 0, j = 0, k, l;
bool take_default;
in *VR0. This may not be the smallest possible such range. */
static void
-vrp_intersect_ranges_1 (value_range_t *vr0, value_range_t *vr1)
+vrp_intersect_ranges_1 (value_range *vr0, value_range *vr1)
{
- value_range_t saved;
+ value_range saved;
/* If either range is VR_VARYING the other one wins. */
if (vr1->type == VR_VARYING)
}
static void
-vrp_intersect_ranges (value_range_t *vr0, value_range_t *vr1)
+vrp_intersect_ranges (value_range *vr0, value_range *vr1)
{
if (dump_file && (dump_flags & TDF_DETAILS))
{
may not be the smallest possible such range. */
static void
-vrp_meet_1 (value_range_t *vr0, value_range_t *vr1)
+vrp_meet_1 (value_range *vr0, value_range *vr1)
{
- value_range_t saved;
+ value_range saved;
if (vr0->type == VR_UNDEFINED)
{
}
static void
-vrp_meet (value_range_t *vr0, value_range_t *vr1)
+vrp_meet (value_range *vr0, value_range *vr1)
{
if (dump_file && (dump_flags & TDF_DETAILS))
{
{
size_t i;
tree lhs = PHI_RESULT (phi);
- value_range_t *lhs_vr = get_value_range (lhs);
- value_range_t vr_result = VR_INITIALIZER;
+ value_range *lhs_vr = get_value_range (lhs);
+ value_range vr_result = VR_INITIALIZER;
bool first = true;
int edges, old_edges;
struct loop *l;
if (e->flags & EDGE_EXECUTABLE)
{
tree arg = PHI_ARG_DEF (phi, i);
- value_range_t vr_arg;
+ value_range vr_arg;
++edges;
tree val = NULL;
tree op0 = gimple_assign_rhs1 (stmt);
tree op1 = gimple_assign_rhs2 (stmt);
- value_range_t *vr = get_value_range (op0);
+ value_range *vr = get_value_range (op0);
if (rhs_code == TRUNC_MOD_EXPR
&& TREE_CODE (op1) == INTEGER_CST
simplify_abs_using_ranges (gimple *stmt)
{
tree op = gimple_assign_rhs1 (stmt);
- value_range_t *vr = get_value_range (op);
+ value_range *vr = get_value_range (op);
if (vr)
{
tree op0 = gimple_assign_rhs1 (stmt);
tree op1 = gimple_assign_rhs2 (stmt);
tree op = NULL_TREE;
- value_range_t vr0 = VR_INITIALIZER;
- value_range_t vr1 = VR_INITIALIZER;
+ value_range vr0 = VR_INITIALIZER;
+ value_range vr1 = VR_INITIALIZER;
wide_int may_be_nonzero0, may_be_nonzero1;
wide_int must_be_nonzero0, must_be_nonzero1;
wide_int mask;
static tree
test_for_singularity (enum tree_code cond_code, tree op0,
- tree op1, value_range_t *vr,
+ tree op1, value_range *vr,
bool *strict_overflow_p)
{
tree min = NULL;
by PRECISION and UNSIGNED_P. */
static bool
-range_fits_type_p (value_range_t *vr, unsigned dest_precision, signop dest_sgn)
+range_fits_type_p (value_range *vr, unsigned dest_precision, signop dest_sgn)
{
tree src_type;
unsigned src_precision;
&& INTEGRAL_TYPE_P (TREE_TYPE (op0))
&& is_gimple_min_invariant (op1))
{
- value_range_t *vr = get_value_range (op0);
+ value_range *vr = get_value_range (op0);
/* If we have range information for OP0, then we might be
able to simplify this conditional. */
if (TREE_CODE (innerop) == SSA_NAME
&& !POINTER_TYPE_P (TREE_TYPE (innerop)))
{
- value_range_t *vr = get_value_range (innerop);
+ value_range *vr = get_value_range (innerop);
if (range_int_cst_p (vr)
&& range_fits_type_p (vr,
simplify_switch_using_ranges (gswitch *stmt)
{
tree op = gimple_switch_index (stmt);
- value_range_t *vr;
+ value_range *vr;
bool take_default;
edge e;
edge_iterator ei;
{
tree innerop, middleop, finaltype;
gimple *def_stmt;
- value_range_t *innervr;
+ value_range *innervr;
signop inner_sgn, middle_sgn, final_sgn;
unsigned inner_prec, middle_prec, final_prec;
widest_int innermin, innermed, innermax, middlemin, middlemed, middlemax;
gimple *stmt)
{
tree rhs1 = gimple_assign_rhs1 (stmt);
- value_range_t *vr = get_value_range (rhs1);
+ value_range *vr = get_value_range (rhs1);
machine_mode fltmode = TYPE_MODE (TREE_TYPE (gimple_assign_lhs (stmt)));
machine_mode mode;
tree tem;
if (gassign *assign_stmt = dyn_cast <gassign *> (stmt))
{
- value_range_t new_vr = VR_INITIALIZER;
+ value_range new_vr = VR_INITIALIZER;
tree lhs = gimple_assign_lhs (assign_stmt);
if (TREE_CODE (lhs) == SSA_NAME
register is described by a chain of these structures.
The chains are pretty short (usually 1 or 2 elements) and thus
chain is the best data structure. */
-typedef struct attrs_def
+struct attrs
{
/* Pointer to next member of the list. */
- struct attrs_def *next;
+ attrs *next;
/* The rtx of register. */
rtx loc;
/* Offset from start of DECL. */
HOST_WIDE_INT offset;
-} *attrs;
+};
/* Structure for chaining the locations. */
struct location_chain
/* Enumeration type used to discriminate various types of one-part
variables. */
-typedef enum onepart_enum
+enum onepart_enum
{
/* Not a one-part variable. */
NOT_ONEPART = 0,
ONEPART_DEXPR = 2,
/* A VALUE. */
ONEPART_VALUE = 3
-} onepart_enum_t;
+};
/* Structure describing where the variable is located. */
-typedef struct variable_def
+struct variable
{
/* The declaration of the variable, or an RTL value being handled
like a declaration. */
/* The variable parts. */
variable_part var_part[1];
-} *variable;
-typedef const struct variable_def *const_variable;
+};
/* Pointer to the BB's information specific to variable tracking pass. */
-#define VTI(BB) ((variable_tracking_info) (BB)->aux)
+#define VTI(BB) ((variable_tracking_info *) (BB)->aux)
/* Macro to access MEM_OFFSET as an HOST_WIDE_INT. Evaluates MEM twice. */
#define INT_MEM_OFFSET(mem) (MEM_OFFSET_KNOWN_P (mem) ? MEM_OFFSET (mem) : 0)
/* Access VAR's Ith part's offset, checking that it's not a one-part
variable. */
#define VAR_PART_OFFSET(var, i) __extension__ \
-(*({ variable const __v = (var); \
+(*({ variable *const __v = (var); \
gcc_checking_assert (!__v->onepart); \
&__v->var_part[(i)].aux.offset; }))
/* Access VAR's one-part auxiliary data, checking that it is a
one-part variable. */
#define VAR_LOC_1PAUX(var) __extension__ \
-(*({ variable const __v = (var); \
+(*({ variable *const __v = (var); \
gcc_checking_assert (__v->onepart); \
&__v->var_part[0].aux.onepaux; }))
/* Variable hashtable helpers. */
-struct variable_hasher : pointer_hash <variable_def>
+struct variable_hasher : pointer_hash <variable>
{
typedef void *compare_type;
- static inline hashval_t hash (const variable_def *);
- static inline bool equal (const variable_def *, const void *);
- static inline void remove (variable_def *);
+ static inline hashval_t hash (const variable *);
+ static inline bool equal (const variable *, const void *);
+ static inline void remove (variable *);
};
/* The hash function for variable_htab, computes the hash value
from the declaration of variable X. */
inline hashval_t
-variable_hasher::hash (const variable_def *v)
+variable_hasher::hash (const variable *v)
{
return dv_htab_hash (v->dv);
}
/* Compare the declaration of variable X with declaration Y. */
inline bool
-variable_hasher::equal (const variable_def *v, const void *y)
+variable_hasher::equal (const variable *v, const void *y)
{
decl_or_value dv = CONST_CAST2 (decl_or_value, const void *, y);
/* Free the element of VARIABLE_HTAB (its type is struct variable_def). */
inline void
-variable_hasher::remove (variable_def *var)
+variable_hasher::remove (variable *var)
{
variable_htab_free (var);
}
HOST_WIDE_INT stack_adjust;
/* Attributes for registers (lists of attrs). */
- attrs regs[FIRST_PSEUDO_REGISTER];
+ attrs *regs[FIRST_PSEUDO_REGISTER];
/* Variable locations. */
shared_hash *vars;
/* The structure (one for each basic block) containing the information
needed for variable tracking. */
-typedef struct variable_tracking_info_def
+struct variable_tracking_info
{
/* The vector of micro operations. */
vec<micro_operation> mos;
/* Has the block been flooded in VTA? */
bool flooded;
-} *variable_tracking_info;
+};
/* Alloc pool for struct attrs_def. */
-object_allocator<attrs_def> attrs_def_pool ("attrs_def pool");
+object_allocator<attrs> attrs_pool ("attrs pool");
/* Alloc pool for struct variable_def with MAX_VAR_PARTS entries. */
static pool_allocator var_pool
- ("variable_def pool", sizeof (variable_def) +
- (MAX_VAR_PARTS - 1) * sizeof (((variable)NULL)->var_part[0]));
+ ("variable_def pool", sizeof (variable) +
+ (MAX_VAR_PARTS - 1) * sizeof (((variable *)NULL)->var_part[0]));
/* Alloc pool for struct variable_def with a single var_part entry. */
static pool_allocator valvar_pool
- ("small variable_def pool", sizeof (variable_def));
+ ("small variable_def pool", sizeof (variable));
/* Alloc pool for struct location_chain. */
static object_allocator<location_chain> location_chain_pool
static bitmap scratch_regs = NULL;
#ifdef HAVE_window_save
-typedef struct GTY(()) parm_reg {
+struct GTY(()) parm_reg {
rtx outgoing;
rtx incoming;
-} parm_reg_t;
+};
/* Vector of windowed parameter registers, if any. */
-static vec<parm_reg_t, va_gc> *windowed_parm_regs = NULL;
+static vec<parm_reg, va_gc> *windowed_parm_regs = NULL;
#endif
/* Variable used to tell whether cselib_process_insn called our hook. */
HOST_WIDE_INT *);
static bool vt_stack_adjustments (void);
-static void init_attrs_list_set (attrs *);
-static void attrs_list_clear (attrs *);
-static attrs attrs_list_member (attrs, decl_or_value, HOST_WIDE_INT);
-static void attrs_list_insert (attrs *, decl_or_value, HOST_WIDE_INT, rtx);
-static void attrs_list_copy (attrs *, attrs);
-static void attrs_list_union (attrs *, attrs);
+static void init_attrs_list_set (attrs **);
+static void attrs_list_clear (attrs **);
+static attrs *attrs_list_member (attrs *, decl_or_value, HOST_WIDE_INT);
+static void attrs_list_insert (attrs **, decl_or_value, HOST_WIDE_INT, rtx);
+static void attrs_list_copy (attrs **, attrs *);
+static void attrs_list_union (attrs **, attrs *);
-static variable_def **unshare_variable (dataflow_set *set, variable_def **slot,
- variable var, enum var_init_status);
+static variable **unshare_variable (dataflow_set *set, variable **slot,
+ variable *var, enum var_init_status);
static void vars_copy (variable_table_type *, variable_table_type *);
static tree var_debug_decl (tree);
static void var_reg_set (dataflow_set *, rtx, enum var_init_status, rtx);
static void dataflow_set_copy (dataflow_set *, dataflow_set *);
static int variable_union_info_cmp_pos (const void *, const void *);
static void dataflow_set_union (dataflow_set *, dataflow_set *);
-static location_chain *find_loc_in_1pdv (rtx, variable, variable_table_type *);
+static location_chain *find_loc_in_1pdv (rtx, variable *,
+ variable_table_type *);
static bool canon_value_cmp (rtx, rtx);
static int loc_cmp (rtx, rtx);
static bool variable_part_different_p (variable_part *, variable_part *);
-static bool onepart_variable_different_p (variable, variable);
-static bool variable_different_p (variable, variable);
+static bool onepart_variable_different_p (variable *, variable *);
+static bool variable_different_p (variable *, variable *);
static bool dataflow_set_different (dataflow_set *, dataflow_set *);
static void dataflow_set_destroy (dataflow_set *);
static bool compute_bb_dataflow (basic_block);
static bool vt_find_locations (void);
-static void dump_attrs_list (attrs);
-static void dump_var (variable);
+static void dump_attrs_list (attrs *);
+static void dump_var (variable *);
static void dump_vars (variable_table_type *);
static void dump_dataflow_set (dataflow_set *);
static void dump_dataflow_sets (void);
static void set_dv_changed (decl_or_value, bool);
-static void variable_was_changed (variable, dataflow_set *);
-static variable_def **set_slot_part (dataflow_set *, rtx, variable_def **,
- decl_or_value, HOST_WIDE_INT,
- enum var_init_status, rtx);
+static void variable_was_changed (variable *, dataflow_set *);
+static variable **set_slot_part (dataflow_set *, rtx, variable **,
+ decl_or_value, HOST_WIDE_INT,
+ enum var_init_status, rtx);
static void set_variable_part (dataflow_set *, rtx,
decl_or_value, HOST_WIDE_INT,
enum var_init_status, rtx, enum insert_option);
-static variable_def **clobber_slot_part (dataflow_set *, rtx,
- variable_def **, HOST_WIDE_INT, rtx);
+static variable **clobber_slot_part (dataflow_set *, rtx,
+ variable **, HOST_WIDE_INT, rtx);
static void clobber_variable_part (dataflow_set *, rtx,
decl_or_value, HOST_WIDE_INT, rtx);
-static variable_def **delete_slot_part (dataflow_set *, rtx, variable_def **,
- HOST_WIDE_INT);
+static variable **delete_slot_part (dataflow_set *, rtx, variable **,
+ HOST_WIDE_INT);
static void delete_variable_part (dataflow_set *, rtx,
decl_or_value, HOST_WIDE_INT);
static void emit_notes_in_bb (basic_block, dataflow_set *);
{
unsigned int i, nregs = vec_safe_length (windowed_parm_regs);
rtx rtl = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (nregs * 2));
- parm_reg_t *p;
+ parm_reg *p;
FOR_EACH_VEC_SAFE_ELT (windowed_parm_regs, i, p)
{
/* Return nonzero if a decl_or_value must not have more than one
variable part. The returned value discriminates among various
kinds of one-part DVs ccording to enum onepart_enum. */
-static inline onepart_enum_t
+static inline onepart_enum
dv_onepart_p (decl_or_value dv)
{
tree decl;
/* Return the variable pool to be used for a dv of type ONEPART. */
static inline pool_allocator &
-onepart_pool (onepart_enum_t onepart)
+onepart_pool (onepart_enum onepart)
{
return onepart ? valvar_pool : var_pool;
}
/* Allocate a variable_def from the corresponding variable pool. */
-static inline variable_def *
-onepart_pool_allocate (onepart_enum_t onepart)
+static inline variable *
+onepart_pool_allocate (onepart_enum onepart)
{
- return (variable_def*) onepart_pool (onepart).allocate ();
+ return (variable*) onepart_pool (onepart).allocate ();
}
/* Build a decl_or_value out of a decl. */
debug_generic_stmt (dv_as_decl (dv));
}
-static void loc_exp_dep_clear (variable var);
+static void loc_exp_dep_clear (variable *var);
/* Free the element of VARIABLE_HTAB (its type is struct variable_def). */
variable_htab_free (void *elem)
{
int i;
- variable var = (variable) elem;
+ variable *var = (variable *) elem;
location_chain *node, *next;
gcc_checking_assert (var->refcount > 0);
/* Initialize the set (array) SET of attrs to empty lists. */
static void
-init_attrs_list_set (attrs *set)
+init_attrs_list_set (attrs **set)
{
int i;
/* Make the list *LISTP empty. */
static void
-attrs_list_clear (attrs *listp)
+attrs_list_clear (attrs **listp)
{
- attrs list, next;
+ attrs *list, *next;
for (list = *listp; list; list = next)
{
/* Return true if the pair of DECL and OFFSET is the member of the LIST. */
-static attrs
-attrs_list_member (attrs list, decl_or_value dv, HOST_WIDE_INT offset)
+static attrs *
+attrs_list_member (attrs *list, decl_or_value dv, HOST_WIDE_INT offset)
{
for (; list; list = list->next)
if (dv_as_opaque (list->dv) == dv_as_opaque (dv) && list->offset == offset)
/* Insert the triplet DECL, OFFSET, LOC to the list *LISTP. */
static void
-attrs_list_insert (attrs *listp, decl_or_value dv,
+attrs_list_insert (attrs **listp, decl_or_value dv,
HOST_WIDE_INT offset, rtx loc)
{
- attrs list = new attrs_def;
+ attrs *list = new attrs;
list->loc = loc;
list->dv = dv;
list->offset = offset;
/* Copy all nodes from SRC and create a list *DSTP of the copies. */
static void
-attrs_list_copy (attrs *dstp, attrs src)
+attrs_list_copy (attrs **dstp, attrs *src)
{
attrs_list_clear (dstp);
for (; src; src = src->next)
{
- attrs n = new attrs_def;
+ attrs *n = new attrs;
n->loc = src->loc;
n->dv = src->dv;
n->offset = src->offset;
/* Add all nodes from SRC which are not in *DSTP to *DSTP. */
static void
-attrs_list_union (attrs *dstp, attrs src)
+attrs_list_union (attrs **dstp, attrs *src)
{
for (; src; src = src->next)
{
*DSTP. */
static void
-attrs_list_mpdv_union (attrs *dstp, attrs src, attrs src2)
+attrs_list_mpdv_union (attrs **dstp, attrs *src, attrs *src2)
{
gcc_assert (!*dstp);
for (; src; src = src->next)
/* Return true if VAR is shared, or maybe because VARS is shared. */
static inline bool
-shared_var_p (variable var, shared_hash *vars)
+shared_var_p (variable *var, shared_hash *vars)
{
/* Don't count an entry in the changed_variables table as a duplicate. */
return ((var->refcount > 1 + (int) var->in_changed_variables)
/* Unshare *PVARS if shared and return slot for DV. If INS is
INSERT, insert it if not already present. */
-static inline variable_def **
+static inline variable **
shared_hash_find_slot_unshare_1 (shared_hash **pvars, decl_or_value dv,
hashval_t dvhash, enum insert_option ins)
{
return shared_hash_htab (*pvars)->find_slot_with_hash (dv, dvhash, ins);
}
-static inline variable_def **
+static inline variable **
shared_hash_find_slot_unshare (shared_hash **pvars, decl_or_value dv,
enum insert_option ins)
{
If it is not present, insert it only VARS is not shared, otherwise
return NULL. */
-static inline variable_def **
+static inline variable **
shared_hash_find_slot_1 (shared_hash *vars, decl_or_value dv, hashval_t dvhash)
{
return shared_hash_htab (vars)->find_slot_with_hash (dv, dvhash,
? NO_INSERT : INSERT);
}
-static inline variable_def **
+static inline variable **
shared_hash_find_slot (shared_hash *vars, decl_or_value dv)
{
return shared_hash_find_slot_1 (vars, dv, dv_htab_hash (dv));
/* Return slot for DV only if it is already present in the hash table. */
-static inline variable_def **
+static inline variable **
shared_hash_find_slot_noinsert_1 (shared_hash *vars, decl_or_value dv,
hashval_t dvhash)
{
return shared_hash_htab (vars)->find_slot_with_hash (dv, dvhash, NO_INSERT);
}
-static inline variable_def **
+static inline variable **
shared_hash_find_slot_noinsert (shared_hash *vars, decl_or_value dv)
{
return shared_hash_find_slot_noinsert_1 (vars, dv, dv_htab_hash (dv));
/* Return variable for DV or NULL if not already present in the hash
table. */
-static inline variable
+static inline variable *
shared_hash_find_1 (shared_hash *vars, decl_or_value dv, hashval_t dvhash)
{
return shared_hash_htab (vars)->find_with_hash (dv, dvhash);
}
-static inline variable
+static inline variable *
shared_hash_find (shared_hash *vars, decl_or_value dv)
{
return shared_hash_find_1 (vars, dv, dv_htab_hash (dv));
/* Return a copy of a variable VAR and insert it to dataflow set SET. */
-static variable_def **
-unshare_variable (dataflow_set *set, variable_def **slot, variable var,
+static variable **
+unshare_variable (dataflow_set *set, variable **slot, variable *var,
enum var_init_status initialized)
{
- variable new_var;
+ variable *new_var;
int i;
new_var = onepart_pool_allocate (var->onepart);
*slot = new_var;
if (var->in_changed_variables)
{
- variable_def **cslot
+ variable **cslot
= changed_variables->find_slot_with_hash (var->dv,
dv_htab_hash (var->dv),
NO_INSERT);
vars_copy (variable_table_type *dst, variable_table_type *src)
{
variable_iterator_type hi;
- variable var;
+ variable *var;
FOR_EACH_HASH_TABLE_ELEMENT (*src, var, variable, hi)
{
- variable_def **dstp;
+ variable **dstp;
var->refcount++;
dstp = dst->find_slot_with_hash (var->dv, dv_htab_hash (var->dv),
INSERT);
decl_or_value dv, HOST_WIDE_INT offset, rtx set_src,
enum insert_option iopt)
{
- attrs node;
+ attrs *node;
bool decl_p = dv_is_decl_p (dv);
if (decl_p)
static enum var_init_status
get_init_value (dataflow_set *set, rtx loc, decl_or_value dv)
{
- variable var;
+ variable *var;
int i;
enum var_init_status ret_val = VAR_INIT_STATUS_UNKNOWN;
{
tree decl = REG_EXPR (loc);
HOST_WIDE_INT offset = REG_OFFSET (loc);
- attrs node, next;
- attrs *nextp;
+ attrs *node, *next;
+ attrs **nextp;
decl = var_debug_decl (decl);
static void
var_reg_delete (dataflow_set *set, rtx loc, bool clobber)
{
- attrs *nextp = &set->regs[REGNO (loc)];
- attrs node, next;
+ attrs **nextp = &set->regs[REGNO (loc)];
+ attrs *node, *next;
if (clobber)
{
static void
var_regno_delete (dataflow_set *set, int regno)
{
- attrs *reg = &set->regs[regno];
- attrs node, next;
+ attrs **reg = &set->regs[regno];
+ attrs *node, *next;
for (node = *reg; node; node = next)
{
{
rtx x;
decl_or_value dv;
- variable var;
+ variable *var;
location_chain *l;
gcc_checking_assert (GET_CODE (loc) == VALUE);
canonicalized itself. */
int
-drop_overlapping_mem_locs (variable_def **slot, overlapping_mems *coms)
+drop_overlapping_mem_locs (variable **slot, overlapping_mems *coms)
{
dataflow_set *set = coms->set;
rtx mloc = coms->loc, addr = coms->addr;
- variable var = *slot;
+ variable *var = *slot;
if (var->onepart == ONEPART_VALUE)
{
static void
val_reset (dataflow_set *set, decl_or_value dv)
{
- variable var = shared_hash_find (set->vars, dv) ;
+ variable *var = shared_hash_find (set->vars, dv) ;
location_chain *node;
rtx cval;
if (REG_P (loc))
{
- attrs node, found = NULL;
+ attrs *node, *found = NULL;
for (node = set->regs[REGNO (loc)]; node; node = node->next)
if (dv_is_value_p (node->dv)
we keep the newest locations in the beginning. */
static int
-variable_union (variable src, dataflow_set *set)
+variable_union (variable *src, dataflow_set *set)
{
- variable dst;
- variable_def **dstp;
+ variable *dst;
+ variable **dstp;
int i, j, k;
dstp = shared_hash_find_slot (set->vars, src->dv);
{
dstp = unshare_variable (set, dstp, dst,
VAR_INIT_STATUS_UNKNOWN);
- dst = (variable)*dstp;
+ dst = (variable *)*dstp;
}
}
else
{
variable_iterator_type hi;
- variable var;
+ variable *var;
FOR_EACH_HASH_TABLE_ELEMENT (*shared_hash_htab (src->vars),
var, variable, hi)
be in star-canonical form. */
static location_chain *
-find_loc_in_1pdv (rtx loc, variable var, variable_table_type *vars)
+find_loc_in_1pdv (rtx loc, variable *var, variable_table_type *vars)
{
location_chain *node;
enum rtx_code loc_code;
for (node = var->var_part[0].loc_chain; node; node = node->next)
{
decl_or_value dv;
- variable rvar;
+ variable *rvar;
if (GET_CODE (node->loc) != loc_code)
{
static void
intersect_loc_chains (rtx val, location_chain **dest, struct dfset_merge *dsm,
- location_chain *s1node, variable s2var)
+ location_chain *s1node, variable *s2var)
{
dataflow_set *s1set = dsm->cur;
dataflow_set *s2set = dsm->src;
&& !VALUE_RECURSED_INTO (s1node->loc))
{
decl_or_value dv = dv_from_value (s1node->loc);
- variable svar = shared_hash_find (s1set->vars, dv);
+ variable *svar = shared_hash_find (s1set->vars, dv);
if (svar)
{
if (svar->n_var_parts == 1)
/* Check the order of entries in one-part variables. */
int
-canonicalize_loc_order_check (variable_def **slot,
+canonicalize_loc_order_check (variable **slot,
dataflow_set *data ATTRIBUTE_UNUSED)
{
- variable var = *slot;
+ variable *var = *slot;
location_chain *node, *next;
#ifdef ENABLE_RTL_CHECKING
the connections bidirectional. */
int
-canonicalize_values_mark (variable_def **slot, dataflow_set *set)
+canonicalize_values_mark (variable **slot, dataflow_set *set)
{
- variable var = *slot;
+ variable *var = *slot;
decl_or_value dv = var->dv;
rtx val;
location_chain *node;
else
{
decl_or_value odv = dv_from_value (node->loc);
- variable_def **oslot;
+ variable **oslot;
oslot = shared_hash_find_slot_noinsert (set->vars, odv);
set_slot_part (set, val, oslot, odv, 0,
variables, canonicalizing equivalence sets into star shapes. */
int
-canonicalize_values_star (variable_def **slot, dataflow_set *set)
+canonicalize_values_star (variable **slot, dataflow_set *set)
{
- variable var = *slot;
+ variable *var = *slot;
decl_or_value dv = var->dv;
location_chain *node;
decl_or_value cdv;
rtx val, cval;
- variable_def **cslot;
+ variable **cslot;
bool has_value;
bool has_marks;
}
else if (GET_CODE (node->loc) == REG)
{
- attrs list = set->regs[REGNO (node->loc)], *listp;
+ attrs *list = set->regs[REGNO (node->loc)], **listp;
/* Change an existing attribute referring to dv so that it
refers to cdv, removing any duplicate this might
get to a variable that references another member of the set. */
int
-canonicalize_vars_star (variable_def **slot, dataflow_set *set)
+canonicalize_vars_star (variable **slot, dataflow_set *set)
{
- variable var = *slot;
+ variable *var = *slot;
decl_or_value dv = var->dv;
location_chain *node;
rtx cval;
decl_or_value cdv;
- variable_def **cslot;
- variable cvar;
+ variable **cslot;
+ variable *cvar;
location_chain *cnode;
if (!var->onepart || var->onepart == ONEPART_VALUE)
intersection. */
static int
-variable_merge_over_cur (variable s1var, struct dfset_merge *dsm)
+variable_merge_over_cur (variable *s1var, struct dfset_merge *dsm)
{
dataflow_set *dst = dsm->dst;
- variable_def **dstslot;
- variable s2var, dvar = NULL;
+ variable **dstslot;
+ variable *s2var, *dvar = NULL;
decl_or_value dv = s1var->dv;
- onepart_enum_t onepart = s1var->onepart;
+ onepart_enum onepart = s1var->onepart;
rtx val;
hashval_t dvhash;
location_chain *node, **nodep;
if (GET_CODE (node->loc) == REG)
{
- attrs list;
+ attrs *list;
for (list = dst->regs[REGNO (node->loc)]; list; list = list->next)
if (GET_MODE (node->loc) == GET_MODE (list->loc)
if (GET_CODE (node->loc) == VALUE)
{
decl_or_value dv = dv_from_value (node->loc);
- variable_def **slot = NULL;
+ variable **slot = NULL;
if (shared_hash_shared (dst->vars))
slot = shared_hash_find_slot_noinsert (dst->vars, dv);
INSERT);
if (!*slot)
{
- variable var = onepart_pool_allocate (ONEPART_VALUE);
+ variable *var = onepart_pool_allocate (ONEPART_VALUE);
var->dv = dv;
var->refcount = 1;
var->n_var_parts = 1;
variable_merge_over_cur(). */
static int
-variable_merge_over_src (variable s2var, struct dfset_merge *dsm)
+variable_merge_over_src (variable *s2var, struct dfset_merge *dsm)
{
dataflow_set *dst = dsm->dst;
decl_or_value dv = s2var->dv;
if (!s2var->onepart)
{
- variable_def **dstp = shared_hash_find_slot (dst->vars, dv);
+ variable **dstp = shared_hash_find_slot (dst->vars, dv);
*dstp = s2var;
s2var->refcount++;
return 1;
int i;
size_t src1_elems, src2_elems;
variable_iterator_type hi;
- variable var;
+ variable *var;
src1_elems = shared_hash_htab (src1->vars)->elements ();
src2_elems = shared_hash_htab (src2->vars)->elements ();
dataflow_set_equiv_regs (dataflow_set *set)
{
int i;
- attrs list, *listp;
+ attrs *list, **listp;
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
{
if (list->offset == 0 && dv_onepart_p (list->dv))
{
rtx cval = canon[(int)GET_MODE (list->loc)];
- variable_def **slot;
+ variable **slot;
if (!cval)
continue;
be unshared and 1-part. */
static void
-remove_duplicate_values (variable var)
+remove_duplicate_values (variable *var)
{
location_chain *node, **nodep;
variables that don't have value numbers for them. */
int
-variable_post_merge_new_vals (variable_def **slot, dfset_post_merge *dfpm)
+variable_post_merge_new_vals (variable **slot, dfset_post_merge *dfpm)
{
dataflow_set *set = dfpm->set;
- variable var = *slot;
+ variable *var = *slot;
location_chain *node;
if (!var->onepart || !var->n_var_parts)
gcc_assert (!VALUE_RECURSED_INTO (node->loc));
else if (GET_CODE (node->loc) == REG)
{
- attrs att, *attp, *curp = NULL;
+ attrs *att, **attp, **curp = NULL;
if (var->refcount != 1)
{
chosen expression. */
int
-variable_post_merge_perm_vals (variable_def **pslot, dfset_post_merge *dfpm)
+variable_post_merge_perm_vals (variable **pslot, dfset_post_merge *dfpm)
{
dataflow_set *set = dfpm->set;
- variable pvar = *pslot, var;
+ variable *pvar = *pslot, *var;
location_chain *pnode;
decl_or_value dv;
- attrs att;
+ attrs *att;
gcc_assert (dv_is_value_p (pvar->dv)
&& pvar->n_var_parts == 1);
{
location_chain *node;
decl_or_value dv;
- variable var;
+ variable *var;
location_chain *where = NULL;
if (!val)
the variable itself, directly or within a VALUE. */
int
-dataflow_set_preserve_mem_locs (variable_def **slot, dataflow_set *set)
+dataflow_set_preserve_mem_locs (variable **slot, dataflow_set *set)
{
- variable var = *slot;
+ variable *var = *slot;
if (var->onepart == ONEPART_VDECL || var->onepart == ONEPART_DEXPR)
{
value. */
int
-dataflow_set_remove_mem_locs (variable_def **slot, dataflow_set *set)
+dataflow_set_remove_mem_locs (variable **slot, dataflow_set *set)
{
- variable var = *slot;
+ variable *var = *slot;
if (var->onepart == ONEPART_VALUE)
{
They must be in canonical order. */
static bool
-onepart_variable_different_p (variable var1, variable var2)
+onepart_variable_different_p (variable *var1, variable *var2)
{
location_chain *lc1, *lc2;
/* Return true if variables VAR1 and VAR2 are different. */
static bool
-variable_different_p (variable var1, variable var2)
+variable_different_p (variable *var1, variable *var2)
{
int i;
dataflow_set_different (dataflow_set *old_set, dataflow_set *new_set)
{
variable_iterator_type hi;
- variable var1;
+ variable *var1;
if (old_set->vars == new_set->vars)
return false;
var1, variable, hi)
{
variable_table_type *htab = shared_hash_htab (new_set->vars);
- variable var2 = htab->find_with_hash (var1->dv, dv_htab_hash (var1->dv));
+ variable *var2 = htab->find_with_hash (var1->dv, dv_htab_hash (var1->dv));
if (!var2)
{
if (dump_file && (dump_flags & TDF_DETAILS))
{
tree decl = NULL_TREE; /* The variable being copied around. */
rtx set_src = NULL_RTX; /* The value for "decl" stored in "src". */
- variable var;
+ variable *var;
location_chain *nextp;
int i;
bool found;
/* Print the content of the LIST to dump file. */
static void
-dump_attrs_list (attrs list)
+dump_attrs_list (attrs *list)
{
for (; list; list = list->next)
{
/* Print the information about variable *SLOT to dump file. */
int
-dump_var_tracking_slot (variable_def **slot, void *data ATTRIBUTE_UNUSED)
+dump_var_tracking_slot (variable **slot, void *data ATTRIBUTE_UNUSED)
{
- variable var = *slot;
+ variable *var = *slot;
dump_var (var);
/* Print the information about variable VAR to dump file. */
static void
-dump_var (variable var)
+dump_var (variable *var)
{
int i;
location_chain *node;
/* Return the variable for DV in dropped_values, inserting one if
requested with INSERT. */
-static inline variable
+static inline variable *
variable_from_dropped (decl_or_value dv, enum insert_option insert)
{
- variable_def **slot;
- variable empty_var;
- onepart_enum_t onepart;
+ variable **slot;
+ variable *empty_var;
+ onepart_enum onepart;
slot = dropped_values->find_slot_with_hash (dv, dv_htab_hash (dv), insert);
/* Recover the one-part aux from dropped_values. */
static struct onepart_aux *
-recover_dropped_1paux (variable var)
+recover_dropped_1paux (variable *var)
{
- variable dvar;
+ variable *dvar;
gcc_checking_assert (var->onepart);
if it has no locations delete it from SET's hash table. */
static void
-variable_was_changed (variable var, dataflow_set *set)
+variable_was_changed (variable *var, dataflow_set *set)
{
hashval_t hash = dv_htab_hash (var->dv);
if (emit_notes)
{
- variable_def **slot;
+ variable **slot;
/* Remember this decl or VALUE has been added to changed_variables. */
set_dv_changed (var->dv, true);
if (*slot)
{
- variable old_var = *slot;
+ variable *old_var = *slot;
gcc_assert (old_var->in_changed_variables);
old_var->in_changed_variables = false;
if (var != old_var && var->onepart)
if (set && var->n_var_parts == 0)
{
- onepart_enum_t onepart = var->onepart;
- variable empty_var = NULL;
- variable_def **dslot = NULL;
+ onepart_enum onepart = var->onepart;
+ variable *empty_var = NULL;
+ variable **dslot = NULL;
if (onepart == ONEPART_VALUE || onepart == ONEPART_DEXPR)
{
gcc_assert (set);
if (var->n_var_parts == 0)
{
- variable_def **slot;
+ variable **slot;
drop_var:
slot = shared_hash_find_slot_noinsert (set->vars, var->dv);
have, if it should be inserted. */
static inline int
-find_variable_location_part (variable var, HOST_WIDE_INT offset,
+find_variable_location_part (variable *var, HOST_WIDE_INT offset,
int *insertion_point)
{
int pos, low, high;
return -1;
}
-static variable_def **
-set_slot_part (dataflow_set *set, rtx loc, variable_def **slot,
+static variable **
+set_slot_part (dataflow_set *set, rtx loc, variable **slot,
decl_or_value dv, HOST_WIDE_INT offset,
enum var_init_status initialized, rtx set_src)
{
int pos;
location_chain *node, *next;
location_chain **nextp;
- variable var;
- onepart_enum_t onepart;
+ variable *var;
+ onepart_enum onepart;
var = *slot;
enum var_init_status initialized, rtx set_src,
enum insert_option iopt)
{
- variable_def **slot;
+ variable **slot;
if (iopt == NO_INSERT)
slot = shared_hash_find_slot_noinsert (set->vars, dv);
The variable part is specified by variable's declaration or value
DV and offset OFFSET. */
-static variable_def **
-clobber_slot_part (dataflow_set *set, rtx loc, variable_def **slot,
+static variable **
+clobber_slot_part (dataflow_set *set, rtx loc, variable **slot,
HOST_WIDE_INT offset, rtx set_src)
{
- variable var = *slot;
+ variable *var = *slot;
int pos = find_variable_location_part (var, offset, NULL);
if (pos >= 0)
{
if (REG_P (node->loc))
{
- attrs anode, anext;
- attrs *anextp;
+ attrs *anode, *anext;
+ attrs **anextp;
/* Remove the variable part from the register's
list, but preserve any other variable parts
clobber_variable_part (dataflow_set *set, rtx loc, decl_or_value dv,
HOST_WIDE_INT offset, rtx set_src)
{
- variable_def **slot;
+ variable **slot;
if (!dv_as_opaque (dv)
|| (!dv_is_value_p (dv) && ! DECL_P (dv_as_decl (dv))))
variable part is specified by its SET->vars slot SLOT and offset
OFFSET and the part's location by LOC. */
-static variable_def **
-delete_slot_part (dataflow_set *set, rtx loc, variable_def **slot,
+static variable **
+delete_slot_part (dataflow_set *set, rtx loc, variable **slot,
HOST_WIDE_INT offset)
{
- variable var = *slot;
+ variable *var = *slot;
int pos = find_variable_location_part (var, offset, NULL);
if (pos >= 0)
delete_variable_part (dataflow_set *set, rtx loc, decl_or_value dv,
HOST_WIDE_INT offset)
{
- variable_def **slot = shared_hash_find_slot_noinsert (set->vars, dv);
+ variable **slot = shared_hash_find_slot_noinsert (set->vars, dv);
if (!slot)
return;
room for COUNT dependencies. */
static void
-loc_exp_dep_alloc (variable var, int count)
+loc_exp_dep_alloc (variable *var, int count)
{
size_t allocsize;
removing them from the back-links lists too. */
static void
-loc_exp_dep_clear (variable var)
+loc_exp_dep_clear (variable *var)
{
while (VAR_LOC_DEP_VEC (var) && !VAR_LOC_DEP_VEC (var)->is_empty ())
{
back-links in VARS. */
static void
-loc_exp_insert_dep (variable var, rtx x, variable_table_type *vars)
+loc_exp_insert_dep (variable *var, rtx x, variable_table_type *vars)
{
decl_or_value dv;
- variable xvar;
+ variable *xvar;
loc_exp_dep *led;
dv = dv_from_rtx (x);
true if we found any pending-recursion results. */
static bool
-loc_exp_dep_set (variable var, rtx result, rtx *value, int count,
+loc_exp_dep_set (variable *var, rtx result, rtx *value, int count,
variable_table_type *vars)
{
bool pending_recursion = false;
attempt to compute a current location. */
static void
-notify_dependents_of_resolved_value (variable ivar, variable_table_type *vars)
+notify_dependents_of_resolved_value (variable *ivar, variable_table_type *vars)
{
loc_exp_dep *led, *next;
for (led = VAR_LOC_DEP_LST (ivar); led; led = next)
{
decl_or_value dv = led->dv;
- variable var;
+ variable *var;
next = led->next;
it is pending recursion resolution. */
static inline rtx
-vt_expand_var_loc_chain (variable var, bitmap regs, void *data, bool *pendrecp)
+vt_expand_var_loc_chain (variable *var, bitmap regs, void *data,
+ bool *pendrecp)
{
struct expand_loc_callback_data *elcd
= (struct expand_loc_callback_data *) data;
struct expand_loc_callback_data *elcd
= (struct expand_loc_callback_data *) data;
decl_or_value dv;
- variable var;
+ variable *var;
rtx result, subreg;
bool pending_recursion = false;
bool from_empty = false;
in VARS, updating their CUR_LOCs in the process. */
static rtx
-vt_expand_1pvar (variable var, variable_table_type *vars)
+vt_expand_1pvar (variable *var, variable_table_type *vars)
{
struct expand_loc_callback_data data;
rtx loc;
before or after instruction INSN. */
int
-emit_note_insn_var_location (variable_def **varp, emit_note_data *data)
+emit_note_insn_var_location (variable **varp, emit_note_data *data)
{
- variable var = *varp;
+ variable *var = *varp;
rtx_insn *insn = data->insn;
enum emit_note_where where = data->where;
variable_table_type *vars = data->vars;
values) entries that aren't user variables. */
int
-var_track_values_to_stack (variable_def **slot,
+var_track_values_to_stack (variable **slot,
vec<rtx, va_heap> *changed_values_stack)
{
- variable var = *slot;
+ variable *var = *slot;
if (var->onepart == ONEPART_VALUE)
changed_values_stack->safe_push (dv_as_value (var->dv));
remove_value_from_changed_variables (rtx val)
{
decl_or_value dv = dv_from_rtx (val);
- variable_def **slot;
- variable var;
+ variable **slot;
+ variable *var;
slot = changed_variables->find_slot_with_hash (dv, dv_htab_hash (dv),
NO_INSERT);
notify_dependents_of_changed_value (rtx val, variable_table_type *htab,
vec<rtx, va_heap> *changed_values_stack)
{
- variable_def **slot;
- variable var;
+ variable **slot;
+ variable *var;
loc_exp_dep *led;
decl_or_value dv = dv_from_rtx (val);
while ((led = VAR_LOC_DEP_LST (var)))
{
decl_or_value ldv = led->dv;
- variable ivar;
+ variable *ivar;
/* Deactivate and remove the backlink, as it was “used up”. It
makes no sense to attempt to notify the same entity again:
same variable in hash table DATA or is not there at all. */
int
-emit_notes_for_differences_1 (variable_def **slot, variable_table_type *new_vars)
+emit_notes_for_differences_1 (variable **slot, variable_table_type *new_vars)
{
- variable old_var, new_var;
+ variable *old_var, *new_var;
old_var = *slot;
new_var = new_vars->find_with_hash (old_var->dv, dv_htab_hash (old_var->dv));
if (!new_var)
{
/* Variable has disappeared. */
- variable empty_var = NULL;
+ variable *empty_var = NULL;
if (old_var->onepart == ONEPART_VALUE
|| old_var->onepart == ONEPART_DEXPR)
table DATA. */
int
-emit_notes_for_differences_2 (variable_def **slot, variable_table_type *old_vars)
+emit_notes_for_differences_2 (variable **slot, variable_table_type *old_vars)
{
- variable old_var, new_var;
+ variable *old_var, *new_var;
new_var = *slot;
old_var = old_vars->find_with_hash (new_var->dv, dv_htab_hash (new_var->dv));
&& HARD_REGISTER_P (incoming)
&& OUTGOING_REGNO (REGNO (incoming)) != REGNO (incoming))
{
- parm_reg_t p;
+ parm_reg p;
p.incoming = incoming;
incoming
= gen_rtx_REG_offset (incoming, GET_MODE (incoming),
for (i = 0; i < XVECLEN (incoming, 0); i++)
{
rtx reg = XEXP (XVECEXP (incoming, 0, i), 0);
- parm_reg_t p;
+ parm_reg p;
p.incoming = reg;
reg = gen_rtx_REG_offset (reg, GET_MODE (reg),
OUTGOING_REGNO (REGNO (reg)), 0);
rtx reg = XEXP (incoming, 0);
if (OUTGOING_REGNO (REGNO (reg)) != REGNO (reg))
{
- parm_reg_t p;
+ parm_reg p;
p.incoming = reg;
reg = gen_raw_REG (GET_MODE (reg), OUTGOING_REGNO (REGNO (reg)));
p.outgoing = reg;
basic_block bb;
HOST_WIDE_INT fp_cfa_offset = -1;
- alloc_aux_for_blocks (sizeof (struct variable_tracking_info_def));
+ alloc_aux_for_blocks (sizeof (variable_tracking_info));
empty_shared_hash = new shared_hash;
empty_shared_hash->refcount = 1;
empty_shared_hash->htab = NULL;
delete changed_variables;
changed_variables = NULL;
- attrs_def_pool.release ();
+ attrs_pool.release ();
var_pool.release ();
location_chain_pool.release ();
shared_hash_pool.release ();