+2015-07-16 Martin Liska <mliska@suse.cz>
+
+ * alloc-pool.h
+ (object_allocator): Add new class.
+ (pool_allocator::initialize): Use the underlying class.
+ (pool_allocator::allocate): Likewise.
+ (pool_allocator::remove): Likewise.
+ (operator new): A new generic allocator.
+ * asan.c (struct asan_mem_ref): Remove unused members.
+ (asan_mem_ref_new): Replace new operator with
+ object_allocator::allocate.
+ (free_mem_ref_resources): Change deallocation.
+ * cfg.c (initialize_original_copy_tables): Replace pool_allocator
+ with object_allocator.
+ * config/sh/sh.c (add_constant): Replace new operator with
+ object_allocator::allocate.
+ (sh_reorg): Change call to a release method.
+ * cselib.c (struct elt_list): Remove unused members.
+ (new_elt_list): Replace new operator with
+ object_allocator::allocate.
+ (new_elt_loc_list): Likewise.
+ (new_cselib_val): Likewise.
+ (unchain_one_elt_list): Change delete operator with remove method.
+ (unchain_one_elt_loc_list): Likewise.
+ (unchain_one_value): Likewise.
+ (cselib_finish): Release newly added static allocators.
+ * cselib.h (struct cselib_val): Remove unused members.
+ (struct elt_loc_list): Likewise.
+ * df-problems.c (df_chain_alloc): Replace pool_allocator with
+ object_allocator.
+ * df-scan.c (struct df_scan_problem_data): Likewise.
+ (df_scan_alloc): Likewise.
+ * df.h (struct dataflow): Likewise.
+ * dse.c (struct read_info_type): Likewise.
+ (struct insn_info_type): Likewise.
+ (struct dse_bb_info_type): Likewise.
+ (struct group_info): Likewise.
+ (struct deferred_change): Likewise.
+ (get_group_info): Likewise.
+ (delete_dead_store_insn): Likewise.
+ (free_read_records): Likewise.
+ (replace_read): Likewise.
+ (check_mem_read_rtx): Likewise.
+ (scan_insn): Likewise.
+ (dse_step1): Likewise.
+ (dse_step7): Likewise.
+ * et-forest.c (struct et_occ): Remove unused members.
+ (et_new_occ): Use allocate instead of new operator.
+ (et_new_tree): Likewise.
+ (et_free_tree): Call release method explicitly.
+ (et_free_tree_force): Likewise.
+ (et_free_pools): Likewise.
+ (et_split): Use remove instead of delete operator.
+ * et-forest.h (struct et_node): Remove unused members.
+ * ipa-cp.c: Change pool_allocator to object_allocator.
+ * ipa-inline-analysis.c: Likewise.
+ * ipa-profile.c: Likewise.
+ * ipa-prop.c: Likewise.
+ * ipa-prop.h: Likewise.
+ * ira-build.c (initiate_cost_vectors): Cast return value.
+ (ira_allocate_cost_vector): Likewise.
+ * ira-color.c (struct update_cost_record): Remove unused members.
+ * lra-int.h (struct lra_live_range): Likewise.
+ (struct lra_copy): Likewise.
+ (struct lra_insn_reg): Likewise.
+ * lra-lives.c (lra_live_ranges_finish): Release new static allocator.
+ * lra.c (new_insn_reg): Replace new operator with allocate method.
+ (free_insn_regs): Same for operator delete.
+ (finish_insn_regs): Release new static allocator.
+ (finish_insn_recog_data): Likewise.
+ (lra_free_copies): Replace delete operator with remove method.
+ (lra_create_copy): Replace operator new with allocate method.
+ (invalidate_insn_data_regno_info): Same for remove method.
+ * regcprop.c (struct queued_debug_insn_change): Remove unused members.
+ (free_debug_insn_changes): Replace delete operator with remove method.
+ (replace_oldest_value_reg): Replace operator new with allocate method.
+ (pass_cprop_hardreg::execute): Release new static variable.
+ * sched-deps.c (sched_deps_init): Change pool_allocator to
+ object_allocator.
+ * sel-sched-ir.c: Likewise.
+ * sel-sched-ir.h: Likewise.
+ * stmt.c (expand_case): Likewise.
+ (expand_sjlj_dispatch_table): Likewise.
+ * tree-sra.c (struct access): Remove unused members.
+ (struct assign_link): Likewise.
+ (sra_deinitialize): Release newly added static pools.
+ (create_access_1):Replace operator new with allocate method.
+ (build_accesses_from_assign): Likewise.
+ (create_artificial_child_access): Likewise.
+ * tree-ssa-math-opts.c (pass_cse_reciprocals::execute): Change
+ pool_allocator to object_allocator.
+ * tree-ssa-pre.c: Likewise.
+ * tree-ssa-reassoc.c: Likewise.
+ * tree-ssa-sccvn.c (allocate_vn_table): Likewise.
+ * tree-ssa-strlen.c: Likewise.
+ * tree-ssa-structalias.c: Likewise.
+ * var-tracking.c (onepart_pool_allocate): New function.
+ (unshare_variable): Use the newly added function.
+ (variable_merge_over_cur): Likewise.
+ (variable_from_dropped): Likewise.
+ (variable_was_changed): Likewise.
+ (set_slot_part): Likewise.
+ (emit_notes_for_differences_1): Likewise.
+ (vt_finalize): Release newly added static pools.
+
2015-07-16 Martin Jambor <mjambor@suse.cz>
* ipa-prop.h (param_aa_status): Rename to ipa_param_aa_status. Adjust
typedef unsigned long ALLOC_POOL_ID_TYPE;
+/* Last used ID. */
+extern ALLOC_POOL_ID_TYPE last_id;
+
/* Pool allocator memory usage. */
struct pool_usage: public mem_usage
{
extern mem_alloc_description<pool_usage> pool_allocator_usage;
-/* Type based memory pool allocator. */
-template <typename T>
+/* Generic pool allocator. */
class pool_allocator
{
public:
/* Default constructor for pool allocator called NAME. Each block
- has NUM elements. The allocator support EXTRA_SIZE and can
- potentially IGNORE_TYPE_SIZE. */
- pool_allocator (const char *name, size_t num, size_t extra_size = 0,
- bool ignore_type_size = false CXX_MEM_STAT_INFO);
+ has NUM elements. */
+ pool_allocator (const char *name, size_t num, size_t size CXX_MEM_STAT_INFO);
~pool_allocator ();
void release ();
void release_if_empty ();
- T *allocate () ATTRIBUTE_MALLOC;
- void remove (T *object);
+ void *allocate () ATTRIBUTE_MALLOC;
+ void remove (void *object);
private:
struct allocation_pool_list
/* Initialize a pool allocator. */
void initialize ();
- template <typename U>
struct allocation_object
{
/* The ID of alloc pool which the object was allocated from. */
int64_t align_i;
} u;
- static inline allocation_object<U> *
+ static inline allocation_object*
get_instance (void *data_ptr)
{
- return (allocation_object<U> *)(((char *)(data_ptr))
- - offsetof (allocation_object<U>,
+ return (allocation_object *)(((char *)(data_ptr))
+ - offsetof (allocation_object,
u.data));
}
- static inline U *
+ static inline void*
get_data (void *instance_ptr)
{
- return (U*)(((allocation_object<U> *) instance_ptr)->u.data);
+ return (void*)(((allocation_object *) instance_ptr)->u.data);
}
};
size_t m_block_size;
/* Size of a pool elements in bytes. */
size_t m_elt_size;
- /* Flag if we shoul ignore size of a type. */
- bool m_ignore_type_size;
- /* Extra size in bytes that should be allocated for each element. */
- size_t m_extra_size;
+ /* Size in bytes that should be allocated for each element. */
+ size_t m_size;
/* Flag if a pool allocator is initialized. */
bool m_initialized;
/* Memory allocation location. */
mem_location m_location;
};
-/* Last used ID. */
-extern ALLOC_POOL_ID_TYPE last_id;
-
-/* Store information about each particular alloc_pool. Note that this
- will underestimate the amount the amount of storage used by a small amount:
- 1) The overhead in a pool is not accounted for.
- 2) The unallocated elements in a block are not accounted for. Note
- that this can at worst case be one element smaller that the block
- size for that pool. */
-struct alloc_pool_descriptor
-{
- /* Number of pools allocated. */
- unsigned long created;
- /* Gross allocated storage. */
- unsigned long allocated;
- /* Amount of currently active storage. */
- unsigned long current;
- /* Peak amount of storage used. */
- unsigned long peak;
- /* Size of element in the pool. */
- int elt_size;
-};
-
-
-/* Hashtable mapping alloc_pool names to descriptors. */
-extern hash_map<const char *, alloc_pool_descriptor> *alloc_pool_hash;
-
-template <typename T>
inline
-pool_allocator<T>::pool_allocator (const char *name, size_t num,
- size_t extra_size, bool ignore_type_size
- MEM_STAT_DECL):
+pool_allocator::pool_allocator (const char *name, size_t num,
+ size_t size MEM_STAT_DECL):
m_name (name), m_id (0), m_elts_per_block (num), m_returned_free_list (NULL),
m_virgin_free_list (NULL), m_virgin_elts_remaining (0), m_elts_allocated (0),
m_elts_free (0), m_blocks_allocated (0), m_block_list (NULL),
- m_block_size (0), m_ignore_type_size (ignore_type_size),
- m_extra_size (extra_size), m_initialized (false),
+ m_block_size (0), m_size (size), m_initialized (false),
m_location (ALLOC_POOL_ORIGIN, false PASS_MEM_STAT) {}
/* Initialize a pool allocator. */
-template <typename T>
-void
-pool_allocator<T>::initialize ()
+inline void
+pool_allocator::initialize ()
{
gcc_checking_assert (!m_initialized);
m_initialized = true;
size_t header_size;
- size_t size = (m_ignore_type_size ? 0 : sizeof (T)) + m_extra_size;
+ size_t size = m_size;
gcc_checking_assert (m_name);
size = align_eight (size);
/* Add the aligned size of ID. */
- size += offsetof (allocation_object<T>, u.data);
+ size += offsetof (allocation_object, u.data);
/* Um, we can't really allocate 0 elements per block. */
gcc_checking_assert (m_elts_per_block);
}
/* Free all memory allocated for the given memory pool. */
-template <typename T>
inline void
-pool_allocator<T>::release ()
+pool_allocator::release ()
{
if (!m_initialized)
return;
m_block_list = NULL;
}
-template <typename T>
void
-inline pool_allocator<T>::release_if_empty ()
+inline pool_allocator::release_if_empty ()
{
if (m_elts_free == m_elts_allocated)
release ();
}
-template <typename T>
-inline pool_allocator<T>::~pool_allocator ()
+inline pool_allocator::~pool_allocator ()
{
release ();
}
/* Allocates one element from the pool specified. */
-template <typename T>
-inline T *
-pool_allocator<T>::allocate ()
+inline void*
+pool_allocator::allocate ()
{
if (!m_initialized)
initialize ();
}
#ifdef ENABLE_VALGRIND_ANNOTATIONS
- size = m_elt_size - offsetof (allocation_object<T>, u.data);
+ size = m_elt_size - offsetof (allocation_object, u.data);
#endif
/* If there are no more free elements, make some more!. */
/* We now know that we can take the first elt off the virgin list and
put it on the returned list. */
block = m_virgin_free_list;
- header = (allocation_pool_list*) allocation_object<T>::get_data (block);
+ header = (allocation_pool_list*) allocation_object::get_data (block);
header->next = NULL;
#ifdef ENABLE_CHECKING
/* Mark the element to be free. */
- ((allocation_object<T> *) block)->id = 0;
+ ((allocation_object*) block)->id = 0;
#endif
VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (header,size));
m_returned_free_list = header;
#ifdef ENABLE_CHECKING
/* Set the ID for element. */
- allocation_object<T>::get_instance (header)->id = m_id;
+ allocation_object::get_instance (header)->id = m_id;
#endif
VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (header, size));
- /* Call default constructor. */
- return (T *)(header);
+ return (void *)(header);
}
/* Puts PTR back on POOL's free list. */
-template <typename T>
-void
-pool_allocator<T>::remove (T *object)
+inline void
+pool_allocator::remove (void *object)
{
gcc_checking_assert (m_initialized);
allocation_pool_list *header;
int size ATTRIBUTE_UNUSED;
- size = m_elt_size - offsetof (allocation_object<T>, u.data);
+ size = m_elt_size - offsetof (allocation_object, u.data);
#ifdef ENABLE_CHECKING
gcc_assert (object
/* Check if we free more than we allocated, which is Bad (TM). */
&& m_elts_free < m_elts_allocated
/* Check whether the PTR was allocated from POOL. */
- && m_id == allocation_object<T>::get_instance (object)->id);
+ && m_id == allocation_object::get_instance (object)->id);
memset (object, 0xaf, size);
/* Mark the element to be free. */
- allocation_object<T>::get_instance (object)->id = 0;
+ allocation_object::get_instance (object)->id = 0;
#endif
header = (allocation_pool_list*) object;
}
}
+/* Type based memory pool allocator. */
+template <typename T>
+class object_allocator
+{
+public:
+ /* Default constructor for pool allocator called NAME. Each block
+ has NUM elements. */
+ object_allocator (const char *name, size_t num CXX_MEM_STAT_INFO):
+ m_allocator (name, num, sizeof (T) PASS_MEM_STAT) {}
+
+ inline void
+ release ()
+ {
+ m_allocator.release ();
+ }
+
+ inline void release_if_empty ()
+ {
+ m_allocator.release_if_empty ();
+ }
+
+ inline T *
+ allocate () ATTRIBUTE_MALLOC
+ {
+ return ::new (m_allocator.allocate ()) T ();
+ }
+
+ inline void
+ remove (T *object)
+ {
+ /* Call destructor. */
+ object->~T ();
+
+ m_allocator.remove (object);
+ }
+
+private:
+ pool_allocator m_allocator;
+};
+
+/* Store information about each particular alloc_pool. Note that this
+ will underestimate the amount the amount of storage used by a small amount:
+ 1) The overhead in a pool is not accounted for.
+ 2) The unallocated elements in a block are not accounted for. Note
+ that this can at worst case be one element smaller that the block
+ size for that pool. */
+struct alloc_pool_descriptor
+{
+ /* Number of pools allocated. */
+ unsigned long created;
+ /* Gross allocated storage. */
+ unsigned long allocated;
+ /* Amount of currently active storage. */
+ unsigned long current;
+ /* Peak amount of storage used. */
+ unsigned long peak;
+ /* Size of element in the pool. */
+ int elt_size;
+};
+
+/* Helper for classes that do not provide default ctor. */
+
+template <typename T>
+inline void *
+operator new (size_t, object_allocator<T> &a)
+{
+ return a.allocate ();
+}
+
+/* Hashtable mapping alloc_pool names to descriptors. */
+extern hash_map<const char *, alloc_pool_descriptor> *alloc_pool_hash;
+
+
#endif
/* The size of the access. */
HOST_WIDE_INT access_size;
-
- /* Pool allocation new operator. */
- inline void *operator new (size_t)
- {
- return pool.allocate ();
- }
-
- /* Delete operator utilizing pool allocation. */
- inline void operator delete (void *ptr)
- {
- pool.remove ((asan_mem_ref *) ptr);
- }
-
- /* Memory allocation pool. */
- static pool_allocator<asan_mem_ref> pool;
};
-pool_allocator<asan_mem_ref> asan_mem_ref::pool ("asan_mem_ref", 10);
+object_allocator <asan_mem_ref> asan_mem_ref_pool ("asan_mem_ref", 10);
/* Initializes an instance of asan_mem_ref. */
static asan_mem_ref*
asan_mem_ref_new (tree start, HOST_WIDE_INT access_size)
{
- asan_mem_ref *ref = new asan_mem_ref;
+ asan_mem_ref *ref = asan_mem_ref_pool.allocate ();
asan_mem_ref_init (ref, start, access_size);
return ref;
delete asan_mem_ref_ht;
asan_mem_ref_ht = NULL;
- asan_mem_ref::pool.release ();
+ asan_mem_ref_pool.release ();
}
/* Return true iff the memory reference REF has been instrumented. */
+2015-07-16 Martin Liska <mliska@suse.cz>
+
+ * c-format.c (static void check_format_info_main): Use
+ object_allocator instead of pool_allocator.
+ (check_format_arg): Likewise.
+ (check_format_info_main): Likewise.
+
2015-07-15 Andrew MacLeod <amacleod@redhat.com>
* c-opts.c: Remove multiline #include comment.
function_format_info *,
const char *, int, tree,
unsigned HOST_WIDE_INT,
- pool_allocator<format_wanted_type> &);
+ object_allocator<format_wanted_type> &);
static void init_dollar_format_checking (int, tree);
static int maybe_read_dollar_number (const char **, int,
will decrement it if it finds there are extra arguments, but this way
need not adjust it for every return. */
res->number_other++;
- pool_allocator <format_wanted_type> fwt_pool ("format_wanted_type pool", 10);
+ object_allocator <format_wanted_type> fwt_pool ("format_wanted_type pool",
+ 10);
check_format_info_main (res, info, format_chars, format_length,
params, arg_num, fwt_pool);
}
function_format_info *info, const char *format_chars,
int format_length, tree params,
unsigned HOST_WIDE_INT arg_num,
- pool_allocator<format_wanted_type> &fwt_pool)
+ object_allocator <format_wanted_type> &fwt_pool)
{
const char *orig_format_chars = format_chars;
tree first_fillin_param = params;
/* And between loops and copies. */
static hash_table<bb_copy_hasher> *loop_copy;
-static pool_allocator<htab_bb_copy_original_entry> *original_copy_bb_pool;
+static object_allocator<htab_bb_copy_original_entry> *original_copy_bb_pool;
/* Initialize the data structures to maintain mapping between blocks
and its copies. */
void
initialize_original_copy_tables (void)
{
-
- original_copy_bb_pool = new pool_allocator<htab_bb_copy_original_entry>
+ original_copy_bb_pool = new object_allocator<htab_bb_copy_original_entry>
("original_copy", 10);
bb_original = new hash_table<bb_copy_hasher> (10);
bb_copy = new hash_table<bb_copy_hasher> (10);
{
rtx_code_label *label;
struct label_ref_list_d *next;
-
- /* Pool allocation new operator. */
- inline void *operator new (size_t)
- {
- return pool.allocate ();
- }
-
- /* Delete operator utilizing pool allocation. */
- inline void operator delete (void *ptr)
- {
- pool.remove ((label_ref_list_d *) ptr);
- }
-
- /* Memory allocation pool. */
- static pool_allocator<label_ref_list_d> pool;
-
} *label_ref_list_t;
-pool_allocator<label_ref_list_d> label_ref_list_d::pool
+static object_allocator<label_ref_list_d> label_ref_list_d_pool
("label references list", 30);
/* The SH cannot load a large constant into a register, constants have to
}
if (lab && pool_window_label)
{
- newref = new label_ref_list_d;
+ newref = label_ref_list_d_pool.allocate ();
newref->label = pool_window_label;
ref = pool_vector[pool_window_last].wend;
newref->next = ref;
pool_vector[pool_size].part_of_sequence_p = (lab == 0);
if (lab && pool_window_label)
{
- newref = new label_ref_list_d;
+ newref = label_ref_list_d_pool.allocate ();
newref->label = pool_window_label;
ref = pool_vector[pool_window_last].wend;
newref->next = ref;
insn = barrier;
}
}
- label_ref_list_d::pool.release ();
+ label_ref_list_d_pool.release ();
for (insn = first; insn; insn = NEXT_INSN (insn))
PUT_MODE (insn, VOIDmode);
{
struct elt_list *next;
cselib_val *elt;
-
- /* Pool allocation new operator. */
- inline void *operator new (size_t)
- {
- return pool.allocate ();
- }
-
- /* Delete operator utilizing pool allocation. */
- inline void operator delete (void *ptr)
- {
- pool.remove ((elt_list *) ptr);
- }
-
- /* Memory allocation pool. */
- static pool_allocator<elt_list> pool;
};
static bool cselib_record_memory;
each time memory is invalidated. */
static cselib_val *first_containing_mem = &dummy_val;
-pool_allocator<elt_list> elt_list::pool ("elt_list", 10);
-pool_allocator<elt_loc_list> elt_loc_list::pool ("elt_loc_list", 10);
-pool_allocator<cselib_val> cselib_val::pool ("cselib_val_list", 10);
+static object_allocator<elt_list> elt_list_pool ("elt_list", 10);
+static object_allocator<elt_loc_list> elt_loc_list_pool ("elt_loc_list", 10);
+static object_allocator<cselib_val> cselib_val_pool ("cselib_val_list", 10);
-static pool_allocator<rtx_def> value_pool ("value", 100, RTX_CODE_SIZE (VALUE),
- true);
+static pool_allocator value_pool ("value", 100, RTX_CODE_SIZE (VALUE));
/* If nonnull, cselib will call this function before freeing useless
VALUEs. A VALUE is deemed useless if its "locs" field is null. */
static inline struct elt_list *
new_elt_list (struct elt_list *next, cselib_val *elt)
{
- elt_list *el = new elt_list ();
+ elt_list *el = elt_list_pool.allocate ();
el->next = next;
el->elt = elt;
return el;
}
/* Chain LOC back to VAL. */
- el = new elt_loc_list;
+ el = elt_loc_list_pool.allocate ();
el->loc = val->val_rtx;
el->setting_insn = cselib_current_insn;
el->next = NULL;
CSELIB_VAL_PTR (loc)->locs = el;
}
- el = new elt_loc_list;
+ el = elt_loc_list_pool.allocate ();
el->loc = loc;
el->setting_insn = cselib_current_insn;
el->next = next;
struct elt_list *l = *pl;
*pl = l->next;
- delete l;
+ elt_list_pool.remove (l);
}
/* Likewise for elt_loc_lists. */
struct elt_loc_list *l = *pl;
*pl = l->next;
- delete l;
+ elt_loc_list_pool.remove (l);
}
/* Likewise for cselib_vals. This also frees the addr_list associated with
while (v->addr_list)
unchain_one_elt_list (&v->addr_list);
- delete v;
+ cselib_val_pool.remove (v);
}
/* Remove all entries from the hash table. Also used during
static inline cselib_val *
new_cselib_val (unsigned int hash, machine_mode mode, rtx x)
{
- cselib_val *e = new cselib_val;
+ cselib_val *e = cselib_val_pool.allocate ();
gcc_assert (hash);
gcc_assert (next_uid);
precisely when we can have VALUE RTXen (when cselib is active)
so we don't need to put them in garbage collected memory.
??? Why should a VALUE be an RTX in the first place? */
- e->val_rtx = value_pool.allocate ();
+ e->val_rtx = (rtx_def*) value_pool.allocate ();
memset (e->val_rtx, 0, RTX_HDR_SIZE);
PUT_CODE (e->val_rtx, VALUE);
PUT_MODE (e->val_rtx, mode);
cselib_any_perm_equivs = false;
cfa_base_preserved_val = NULL;
cfa_base_preserved_regno = INVALID_REGNUM;
- elt_list::pool.release ();
- elt_loc_list::pool.release ();
- cselib_val::pool.release ();
+ elt_list_pool.release ();
+ elt_loc_list_pool.release ();
+ cselib_val_pool.release ();
value_pool.release ();
cselib_clear_table ();
delete cselib_hash_table;
struct elt_list *addr_list;
struct cselib_val *next_containing_mem;
-
- /* Pool allocation new operator. */
- inline void *operator new (size_t)
- {
- return pool.allocate ();
- }
-
- /* Delete operator utilizing pool allocation. */
- inline void operator delete (void *ptr)
- {
- pool.remove ((cselib_val *) ptr);
- }
-
- /* Memory allocation pool. */
- static pool_allocator<cselib_val> pool;
};
/* A list of rtl expressions that hold the same value. */
rtx loc;
/* The insn that made the equivalence. */
rtx_insn *setting_insn;
-
- /* Pool allocation new operator. */
- inline void *operator new (size_t)
- {
- return pool.allocate ();
- }
-
- /* Delete operator utilizing pool allocation. */
- inline void operator delete (void *ptr)
- {
- pool.remove ((elt_loc_list *) ptr);
- }
-
- /* Memory allocation pool. */
- static pool_allocator<elt_loc_list> pool;
};
/* Describe a single set that is part of an insn. */
df_chain_alloc (bitmap all_blocks ATTRIBUTE_UNUSED)
{
df_chain_remove_problem ();
- df_chain->block_pool = new pool_allocator<df_link> ("df_chain_block pool",
+ df_chain->block_pool = new object_allocator<df_link> ("df_chain_block pool",
50);
df_chain->optional_p = true;
}
/* Problem data for the scanning dataflow function. */
struct df_scan_problem_data
{
- pool_allocator<df_base_ref> *ref_base_pool;
- pool_allocator<df_artificial_ref> *ref_artificial_pool;
- pool_allocator<df_regular_ref> *ref_regular_pool;
- pool_allocator<df_insn_info> *insn_pool;
- pool_allocator<df_reg_info> *reg_pool;
- pool_allocator<df_mw_hardreg> *mw_reg_pool;
+ object_allocator<df_base_ref> *ref_base_pool;
+ object_allocator<df_artificial_ref> *ref_artificial_pool;
+ object_allocator<df_regular_ref> *ref_regular_pool;
+ object_allocator<df_insn_info> *insn_pool;
+ object_allocator<df_reg_info> *reg_pool;
+ object_allocator<df_mw_hardreg> *mw_reg_pool;
bitmap_obstack reg_bitmaps;
bitmap_obstack insn_bitmaps;
df_scan->problem_data = problem_data;
df_scan->computed = true;
- problem_data->ref_base_pool = new pool_allocator<df_base_ref>
+ problem_data->ref_base_pool = new object_allocator<df_base_ref>
("df_scan ref base", SCAN_PROBLEM_DATA_BLOCK_SIZE);
- problem_data->ref_artificial_pool = new pool_allocator<df_artificial_ref>
+ problem_data->ref_artificial_pool = new object_allocator<df_artificial_ref>
("df_scan ref artificial", SCAN_PROBLEM_DATA_BLOCK_SIZE);
- problem_data->ref_regular_pool = new pool_allocator<df_regular_ref>
+ problem_data->ref_regular_pool = new object_allocator<df_regular_ref>
("df_scan ref regular", SCAN_PROBLEM_DATA_BLOCK_SIZE);
- problem_data->insn_pool = new pool_allocator<df_insn_info>
+ problem_data->insn_pool = new object_allocator<df_insn_info>
("df_scan insn", SCAN_PROBLEM_DATA_BLOCK_SIZE);
- problem_data->reg_pool = new pool_allocator<df_reg_info>
+ problem_data->reg_pool = new object_allocator<df_reg_info>
("df_scan reg", SCAN_PROBLEM_DATA_BLOCK_SIZE);
- problem_data->mw_reg_pool = new pool_allocator<df_mw_hardreg>
+ problem_data->mw_reg_pool = new object_allocator<df_mw_hardreg>
("df_scan mw_reg", SCAN_PROBLEM_DATA_BLOCK_SIZE / 16);
bitmap_obstack_initialize (&problem_data->reg_bitmaps);
unsigned int block_info_size;
/* The pool to allocate the block_info from. */
- pool_allocator<df_link> *block_pool;
+ object_allocator<df_link> *block_pool;
/* The lr and live problems have their transfer functions recomputed
only if necessary. This is possible for them because, the
}
typedef struct store_info *store_info_t;
-static pool_allocator<store_info> cse_store_info_pool ("cse_store_info_pool",
+static object_allocator<store_info> cse_store_info_pool ("cse_store_info_pool",
100);
-static pool_allocator<store_info> rtx_store_info_pool ("rtx_store_info_pool",
+static object_allocator<store_info> rtx_store_info_pool ("rtx_store_info_pool",
100);
/* This structure holds information about a load. These are only
/* The next read_info for this insn. */
struct read_info_type *next;
-
- /* Pool allocation new operator. */
- inline void *operator new (size_t)
- {
- return pool.allocate ();
- }
-
- /* Delete operator utilizing pool allocation. */
- inline void operator delete (void *ptr)
- {
- pool.remove ((read_info_type *) ptr);
- }
-
- /* Memory allocation pool. */
- static pool_allocator<read_info_type> pool;
};
typedef struct read_info_type *read_info_t;
-pool_allocator<read_info_type> read_info_type::pool ("read_info_pool", 100);
+static object_allocator<read_info_type> read_info_type_pool
+ ("read_info_pool", 100);
/* One of these records is created for each insn. */
time it is guaranteed to be correct is when the traversal starts
at active_local_stores. */
struct insn_info_type * next_local_store;
-
- /* Pool allocation new operator. */
- inline void *operator new (size_t)
- {
- return pool.allocate ();
- }
-
- /* Delete operator utilizing pool allocation. */
- inline void operator delete (void *ptr)
- {
- pool.remove ((insn_info_type *) ptr);
- }
-
- /* Memory allocation pool. */
- static pool_allocator<insn_info_type> pool;
};
typedef struct insn_info_type *insn_info_t;
-pool_allocator<insn_info_type> insn_info_type::pool ("insn_info_pool", 100);
+static object_allocator<insn_info_type> insn_info_type_pool
+ ("insn_info_pool", 100);
/* The linked list of stores that are under consideration in this
basic block. */
to assure that shift and/or add sequences that are inserted do not
accidentally clobber live hard regs. */
bitmap regs_live;
-
- /* Pool allocation new operator. */
- inline void *operator new (size_t)
- {
- return pool.allocate ();
- }
-
- /* Delete operator utilizing pool allocation. */
- inline void operator delete (void *ptr)
- {
- pool.remove ((dse_bb_info_type *) ptr);
- }
-
- /* Memory allocation pool. */
- static pool_allocator<dse_bb_info_type> pool;
};
typedef struct dse_bb_info_type *bb_info_t;
-pool_allocator<dse_bb_info_type> dse_bb_info_type::pool ("bb_info_pool", 100);
+
+static object_allocator<dse_bb_info_type> dse_bb_info_type_pool
+ ("bb_info_pool", 100);
/* Table to hold all bb_infos. */
static bb_info_t *bb_table;
care about. */
int *offset_map_n, *offset_map_p;
int offset_map_size_n, offset_map_size_p;
-
- /* Pool allocation new operator. */
- inline void *operator new (size_t)
- {
- return pool.allocate ();
- }
-
- /* Delete operator utilizing pool allocation. */
- inline void operator delete (void *ptr)
- {
- pool.remove ((group_info *) ptr);
- }
-
- /* Memory allocation pool. */
- static pool_allocator<group_info> pool;
};
typedef struct group_info *group_info_t;
typedef const struct group_info *const_group_info_t;
-pool_allocator<group_info> group_info::pool ("rtx_group_info_pool", 100);
+static object_allocator<group_info> group_info_pool
+ ("rtx_group_info_pool", 100);
/* Index into the rtx_group_vec. */
static int rtx_group_next_id;
rtx reg;
struct deferred_change *next;
-
- /* Pool allocation new operator. */
- inline void *operator new (size_t)
- {
- return pool.allocate ();
- }
-
- /* Delete operator utilizing pool allocation. */
- inline void operator delete (void *ptr)
- {
- pool.remove ((deferred_change *) ptr);
- }
-
- /* Memory allocation pool. */
- static pool_allocator<deferred_change> pool;
};
typedef struct deferred_change *deferred_change_t;
-pool_allocator<deferred_change> deferred_change::pool
+static object_allocator<deferred_change> deferred_change_pool
("deferred_change_pool", 10);
static deferred_change_t deferred_change_list = NULL;
{
if (!clear_alias_group)
{
- clear_alias_group = gi = new group_info;
+ clear_alias_group = gi = group_info_pool.allocate ();
memset (gi, 0, sizeof (struct group_info));
gi->id = rtx_group_next_id++;
gi->store1_n = BITMAP_ALLOC (&dse_bitmap_obstack);
if (gi == NULL)
{
- *slot = gi = new group_info;
+ *slot = gi = group_info_pool.allocate ();
gi->rtx_base = base;
gi->id = rtx_group_next_id++;
gi->base_mem = gen_rtx_MEM (BLKmode, base);
while (read_info)
{
read_info_t next = read_info->next;
- delete read_info;
+ read_info_type_pool.remove (read_info);
read_info = next;
}
insn_info->read_rec = NULL;
read_info_t next = (*ptr)->next;
if ((*ptr)->alias_set == 0)
{
- delete *ptr;
+ read_info_type_pool.remove (*ptr);
*ptr = next;
}
else
if (validate_change (read_insn->insn, loc, read_reg, 0))
{
- deferred_change_t change = new deferred_change;
+ deferred_change_t change = deferred_change_pool.allocate ();
/* Insert this right before the store insn where it will be safe
from later insns that might change it before the read. */
/* Get rid of the read_info, from the point of view of the
rest of dse, play like this read never happened. */
read_insn->read_rec = read_info->next;
- delete read_info;
+ read_info_type_pool.remove (read_info);
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, " -- replaced the loaded MEM with ");
else
width = GET_MODE_SIZE (GET_MODE (mem));
- read_info = new read_info_type;
+ read_info = read_info_type_pool.allocate ();
read_info->group_id = group_id;
read_info->mem = mem;
read_info->alias_set = spill_alias_set;
scan_insn (bb_info_t bb_info, rtx_insn *insn)
{
rtx body;
- insn_info_type *insn_info = new insn_info_type;
+ insn_info_type *insn_info = insn_info_type_pool.allocate ();
int mems_found = 0;
memset (insn_info, 0, sizeof (struct insn_info_type));
FOR_ALL_BB_FN (bb, cfun)
{
insn_info_t ptr;
- bb_info_t bb_info = new dse_bb_info_type;
+ bb_info_t bb_info = dse_bb_info_type_pool.allocate ();
memset (bb_info, 0, sizeof (dse_bb_info_type));
bitmap_set_bit (all_blocks, bb->index);
/* There is no reason to validate this change. That was
done earlier. */
*deferred_change_list->loc = deferred_change_list->reg;
- delete deferred_change_list;
+ deferred_change_pool.remove (deferred_change_list);
deferred_change_list = next;
}
BITMAP_FREE (scratch);
rtx_store_info_pool.release ();
- read_info_type::pool.release ();
- insn_info_type::pool.release ();
- dse_bb_info_type::pool.release ();
- group_info::pool.release ();
- deferred_change::pool.release ();
+ read_info_type_pool.release ();
+ insn_info_type_pool.release ();
+ dse_bb_info_type_pool.release ();
+ group_info_pool.release ();
+ deferred_change_pool.release ();
}
on the path to the root. */
struct et_occ *min_occ; /* The occurrence in the subtree with the minimal
depth. */
-
- /* Pool allocation new operator. */
- inline void *operator new (size_t)
- {
- return pool.allocate ();
- }
-
- /* Delete operator utilizing pool allocation. */
- inline void operator delete (void *ptr)
- {
- pool.remove ((et_occ *) ptr);
- }
-
- /* Memory allocation pool. */
- static pool_allocator<et_occ> pool;
-
};
-pool_allocator<et_node> et_node::pool ("et_nodes pool", 300);
-pool_allocator<et_occ> et_occ::pool ("et_occ pool", 300);
+static object_allocator<et_node> et_nodes ("et_nodes pool", 300);
+static object_allocator<et_occ> et_occurrences ("et_occ pool", 300);
/* Changes depth of OCC to D. */
static struct et_occ *
et_new_occ (struct et_node *node)
{
- et_occ *nw = new et_occ;
+ et_occ *nw = et_occurrences.allocate ();
nw->of = node;
nw->parent = NULL;
struct et_node *
et_new_tree (void *data)
{
- struct et_node *nw;
-
- nw = new et_node;
+ et_node *nw = et_nodes.allocate ();
nw->data = data;
nw->father = NULL;
if (t->father)
et_split (t);
- delete t->rightmost_occ;
- delete t;
+ et_occurrences.remove (t->rightmost_occ);
+ et_nodes.remove (t);
}
/* Releases et tree T without maintaining other nodes. */
void
et_free_tree_force (struct et_node *t)
{
- delete t->rightmost_occ;
+ et_occurrences.remove (t->rightmost_occ);
if (t->parent_occ)
- delete t->parent_occ;
- delete t;
+ et_occurrences.remove (t->parent_occ);
+ et_nodes.remove (t);
}
/* Release the alloc pools, if they are empty. */
void
et_free_pools (void)
{
- et_occ::pool.release_if_empty ();
- et_node::pool.release_if_empty ();
+ et_occurrences.release_if_empty ();
+ et_nodes.release_if_empty ();
}
/* Sets father of et tree T to FATHER. */
rmost->depth = 0;
rmost->min = 0;
- delete p_occ;
+ et_occurrences.remove (p_occ);
/* Update the tree. */
if (father->son == t)
struct et_occ *rightmost_occ; /* The rightmost occurrence. */
struct et_occ *parent_occ; /* The occurrence of the parent node. */
-
- /* Pool allocation new operator. */
- inline void *operator new (size_t)
- {
- return pool.allocate ();
- }
-
- /* Delete operator utilizing pool allocation. */
- inline void operator delete (void *ptr)
- {
- pool.remove ((et_node *) ptr);
- }
-
- /* Memory allocation pool. */
- static pool_allocator<et_node> pool;
};
struct et_node *et_new_tree (void *data);
/* Allocation pools for values and their sources in ipa-cp. */
-pool_allocator<ipcp_value<tree> > ipcp_cst_values_pool
+object_allocator<ipcp_value<tree> > ipcp_cst_values_pool
("IPA-CP constant values", 32);
-pool_allocator<ipcp_value<ipa_polymorphic_call_context> >
+object_allocator<ipcp_value<ipa_polymorphic_call_context> >
ipcp_poly_ctx_values_pool ("IPA-CP polymorphic contexts", 32);
-pool_allocator<ipcp_value_source<tree> > ipcp_sources_pool
+object_allocator<ipcp_value_source<tree> > ipcp_sources_pool
("IPA-CP value sources", 64);
-pool_allocator<ipcp_agg_lattice> ipcp_agg_lattice_pool
+object_allocator<ipcp_agg_lattice> ipcp_agg_lattice_pool
("IPA_CP aggregate lattices", 32);
/* Maximal count found in program. */
vec<edge_growth_cache_entry> edge_growth_cache;
/* Edge predicates goes here. */
-static pool_allocator<predicate> edge_predicate_pool ("edge predicates", 10);
+static object_allocator<predicate> edge_predicate_pool ("edge predicates", 10);
/* Return true predicate (tautology).
We represent it by empty list of clauses. */
duplicate entries. */
vec<histogram_entry *> histogram;
-static pool_allocator<histogram_entry> histogram_pool
+static object_allocator<histogram_entry> histogram_pool
("IPA histogram", 10);
/* Hashtable support for storing SSA names hashed by their SSA_NAME_VAR. */
/* Allocation pool for reference descriptions. */
-static pool_allocator<ipa_cst_ref_desc> ipa_refdesc_pool
+static object_allocator<ipa_cst_ref_desc> ipa_refdesc_pool
("IPA-PROP ref descriptions", 32);
/* Return true if DECL_FUNCTION_SPECIFIC_OPTIMIZATION of the decl associated
template <typename value>
class ipcp_value;
-extern pool_allocator<ipcp_value<tree> > ipcp_cst_values_pool;
-extern pool_allocator<ipcp_value<ipa_polymorphic_call_context> >
+extern object_allocator<ipcp_value<tree> > ipcp_cst_values_pool;
+extern object_allocator<ipcp_value<ipa_polymorphic_call_context> >
ipcp_poly_ctx_values_pool;
template <typename valtype>
class ipcp_value_source;
-extern pool_allocator<ipcp_value_source<tree> > ipcp_sources_pool;
+extern object_allocator<ipcp_value_source<tree> > ipcp_sources_pool;
class ipcp_agg_lattice;
-extern pool_allocator<ipcp_agg_lattice> ipcp_agg_lattice_pool;
+extern object_allocator<ipcp_agg_lattice> ipcp_agg_lattice_pool;
/* Operation to be performed for the parameter in ipa_parm_adjustment
below. */
\f
/* Pools for allocnos, allocno live ranges and objects. */
-static pool_allocator<live_range> live_range_pool ("live ranges", 100);
-static pool_allocator<ira_allocno> allocno_pool ("allocnos", 100);
-static pool_allocator<ira_object> object_pool ("objects", 100);
+static object_allocator<live_range> live_range_pool ("live ranges", 100);
+static object_allocator<ira_allocno> allocno_pool ("allocnos", 100);
+static object_allocator<ira_object> object_pool ("objects", 100);
/* Vec containing references to all created allocnos. It is a
container of array allocnos. */
\f
/* Pools for allocno preferences. */
-static pool_allocator <ira_allocno_pref> pref_pool ("prefs", 100);
+static object_allocator <ira_allocno_pref> pref_pool ("prefs", 100);
/* Vec containing references to all created preferences. It is a
container of array ira_prefs. */
\f
/* Pools for copies. */
-static pool_allocator<ira_allocno_copy> copy_pool ("copies", 100);
+static object_allocator<ira_allocno_copy> copy_pool ("copies", 100);
/* Vec containing references to all created copies. It is a
container of array ira_copies. */
\f
/* Pools for cost vectors. It is defined only for allocno classes. */
-static pool_allocator<int> * cost_vector_pool[N_REG_CLASSES];
+static pool_allocator *cost_vector_pool[N_REG_CLASSES];
/* The function initiates work with hard register cost vectors. It
creates allocation pool for each allocno class. */
for (i = 0; i < ira_allocno_classes_num; i++)
{
aclass = ira_allocno_classes[i];
- cost_vector_pool[aclass] = new pool_allocator<int>
+ cost_vector_pool[aclass] = new pool_allocator
("cost vectors", 100,
- sizeof (int) * (ira_class_hard_regs_num[aclass] - 1));
+ sizeof (int) * (ira_class_hard_regs_num[aclass]));
}
}
int *
ira_allocate_cost_vector (reg_class_t aclass)
{
- return cost_vector_pool[(int) aclass]->allocate ();
+ return (int*) cost_vector_pool[(int) aclass]->allocate ();
}
/* Free a cost vector VEC for ACLASS. */
int divisor;
/* Next record for given allocno. */
struct update_cost_record *next;
-
- /* Pool allocation new operator. */
- inline void *operator new (size_t)
- {
- return pool.allocate ();
- }
-
- /* Delete operator utilizing pool allocation. */
- inline void operator delete (void *ptr)
- {
- pool.remove ((update_cost_record *) ptr);
- }
-
- /* Memory allocation pool. */
- static pool_allocator<update_cost_record> pool;
};
/* To decrease footprint of ira_allocno structure we store all data
allocnos. */
/* Pool for update cost records. */
-static pool_allocator<update_cost_record> update_cost_record_pool
+static object_allocator<update_cost_record> update_cost_record_pool
("update cost records", 100);
/* Return new update cost record with given params. */
lra_live_range_t next;
/* Pointer to structures with the same start. */
lra_live_range_t start_next;
-
- /* Pool allocation new operator. */
- inline void *operator new (size_t)
- {
- return pool.allocate ();
- }
-
- /* Delete operator utilizing pool allocation. */
- inline void operator delete (void *ptr)
- {
- pool.remove ((lra_live_range *) ptr);
- }
-
- /* Memory allocation pool. */
- static pool_allocator<lra_live_range> pool;
};
typedef struct lra_copy *lra_copy_t;
int regno1, regno2;
/* Next copy with correspondingly REGNO1 and REGNO2. */
lra_copy_t regno1_next, regno2_next;
-
- /* Pool allocation new operator. */
- inline void *operator new (size_t)
- {
- return pool.allocate ();
- }
-
- /* Delete operator utilizing pool allocation. */
- inline void operator delete (void *ptr)
- {
- pool.remove ((lra_copy *) ptr);
- }
-
- /* Memory allocation pool. */
- static pool_allocator<lra_copy> pool;
-
};
/* Common info about a register (pseudo or hard register). */
int regno;
/* Next reg info of the same insn. */
struct lra_insn_reg *next;
-
- /* Pool allocation new operator. */
- inline void *operator new (size_t)
- {
- return pool.allocate ();
- }
-
- /* Delete operator utilizing pool allocation. */
- inline void operator delete (void *ptr)
- {
- pool.remove ((lra_insn_reg *) ptr);
- }
-
- /* Memory allocation pool. */
- static pool_allocator<lra_insn_reg> pool;
};
/* Static part (common info for insns with the same ICODE) of LRA
static bitmap_head temp_bitmap;
/* Pool for pseudo live ranges. */
-pool_allocator <lra_live_range> lra_live_range::pool ("live ranges", 100);
+static object_allocator<lra_live_range> lra_live_range_pool
+ ("live ranges", 100);
/* Free live range list LR. */
static void
{
finish_live_solver ();
bitmap_clear (&temp_bitmap);
- lra_live_range::pool.release ();
+ lra_live_range_pool.release ();
}
insns. */
/* Pools for insn reg info. */
-pool_allocator<lra_insn_reg> lra_insn_reg::pool ("insn regs", 100);
+object_allocator<lra_insn_reg> lra_insn_reg_pool ("insn regs", 100);
/* Create LRA insn related info about a reference to REGNO in INSN with
TYPE (in/out/inout), biggest reference mode MODE, flag that it is
machine_mode mode,
bool subreg_p, bool early_clobber, struct lra_insn_reg *next)
{
- lra_insn_reg *ir = new lra_insn_reg ();
+ lra_insn_reg *ir = lra_insn_reg_pool.allocate ();
ir->type = type;
ir->biggest_mode = mode;
if (GET_MODE_SIZE (mode) > GET_MODE_SIZE (lra_reg_info[regno].biggest_mode)
for (; ir != NULL; ir = next_ir)
{
next_ir = ir->next;
- delete ir;
+ lra_insn_reg_pool.remove (ir);
}
}
static void
finish_insn_regs (void)
{
- lra_insn_reg::pool.release ();
+ lra_insn_reg_pool.release ();
}
\f
free (data);
}
+/* Pools for copies. */
+static object_allocator<lra_copy> lra_copy_pool ("lra copies", 100);
+
/* Finish LRA data about all insns. */
static void
finish_insn_recog_data (void)
if ((data = lra_insn_recog_data[i]) != NULL)
free_insn_recog_data (data);
finish_insn_regs ();
- lra_copy::pool.release ();
- lra_insn_reg::pool.release ();
+ lra_copy_pool.release ();
+ lra_insn_reg_pool.release ();
free (lra_insn_recog_data);
}
return ++last_reg_value;
}
-/* Pools for copies. */
-pool_allocator<lra_copy> lra_copy::pool ("lra copies", 100);
-
/* Vec referring to pseudo copies. */
static vec<lra_copy_t> copy_vec;
{
cp = copy_vec.pop ();
lra_reg_info[cp->regno1].copies = lra_reg_info[cp->regno2].copies = NULL;
- delete cp;
+ lra_copy_pool.remove (cp);
}
}
std::swap (regno1, regno2);
regno1_dest_p = false;
}
- cp = new lra_copy ();
+ cp = lra_copy_pool.allocate ();
copy_vec.safe_push (cp);
cp->regno1_dest_p = regno1_dest_p;
cp->freq = freq;
{
i = ir->regno;
next_ir = ir->next;
- delete ir;
+ lra_insn_reg_pool.remove (ir);
bitmap_clear_bit (&lra_reg_info[i].insn_bitmap, uid);
if (i >= FIRST_PSEUDO_REGISTER && ! debug_p)
{
rtx_insn *insn;
rtx *loc;
rtx new_rtx;
-
- /* Pool allocation new operator. */
- inline void *operator new (size_t)
- {
- return pool.allocate ();
- }
-
- /* Delete operator utilizing pool allocation. */
- inline void operator delete (void *ptr)
- {
- pool.remove ((queued_debug_insn_change *) ptr);
- }
-
- /* Memory allocation pool. */
- static pool_allocator<queued_debug_insn_change> pool;
};
/* For each register, we have a list of registers that contain the same
unsigned int n_debug_insn_changes;
};
-pool_allocator<queued_debug_insn_change> queued_debug_insn_change::pool
+static object_allocator<queued_debug_insn_change> queued_debug_insn_change_pool
("debug insn changes pool", 256);
static bool skip_debug_insn_p;
{
next = cur->next;
--vd->n_debug_insn_changes;
- delete cur;
+ queued_debug_insn_change_pool.remove (cur);
}
vd->e[regno].debug_insn_changes = NULL;
}
fprintf (dump_file, "debug_insn %u: queued replacing reg %u with %u\n",
INSN_UID (insn), REGNO (*loc), REGNO (new_rtx));
- change = new queued_debug_insn_change;
+ change = queued_debug_insn_change_pool.allocate ();
change->next = vd->e[REGNO (new_rtx)].debug_insn_changes;
change->insn = insn;
change->loc = loc;
}
}
- queued_debug_insn_change::pool.release ();
+ queued_debug_insn_change_pool.release ();
}
sbitmap_free (visited);
}
/* Pool to hold all dependency nodes (dep_node_t). */
-static pool_allocator<_dep_node> *dn_pool;
+static object_allocator<_dep_node> *dn_pool;
/* Number of dep_nodes out there. */
static int dn_pool_diff = 0;
}
/* Pool to hold dependencies lists (deps_list_t). */
-static pool_allocator<_deps_list> *dl_pool;
+static object_allocator<_deps_list> *dl_pool;
/* Number of deps_lists out there. */
static int dl_pool_diff = 0;
if (global_p)
{
- dl_pool = new pool_allocator<_deps_list> ("deps_list",
+ dl_pool = new object_allocator<_deps_list> ("deps_list",
/* Allocate lists for one block at a time. */
insns_in_block);
- dn_pool = new pool_allocator<_dep_node> ("dep_node",
+ dn_pool = new object_allocator<_dep_node> ("dep_node",
/* Allocate nodes for one block at a time.
We assume that average insn has
5 producers. */
sel_region_bb_info = vNULL;
/* A pool for allocating all lists. */
-pool_allocator<_list_node> sched_lists_pool ("sel-sched-lists", 500);
+object_allocator<_list_node> sched_lists_pool ("sel-sched-lists", 500);
/* This contains information about successors for compute_av_set. */
struct succs_info current_succs;
/* _list_t functions.
All of _*list_* functions are used through accessor macros, thus
we can't move them in sel-sched-ir.c. */
-extern pool_allocator<_list_node> sched_lists_pool;
+extern object_allocator<_list_node> sched_lists_pool;
static inline _list_t
_list_alloc (void)
static struct case_node *
add_case_node (struct case_node *head, tree low, tree high,
- tree label, int prob, pool_allocator<case_node> &case_node_pool)
+ tree label, int prob,
+ object_allocator<case_node> &case_node_pool)
{
struct case_node *r;
struct case_node *case_list = 0;
/* A pool for case nodes. */
- pool_allocator<case_node> case_node_pool ("struct case_node pool", 100);
+ object_allocator<case_node> case_node_pool ("struct case_node pool", 100);
/* An ERROR_MARK occurs for various reasons including invalid data type.
??? Can this still happen, with GIMPLE and all? */
{
/* Similar to expand_case, but much simpler. */
struct case_node *case_list = 0;
- pool_allocator<case_node> case_node_pool ("struct sjlj_case pool",
+ object_allocator<case_node> case_node_pool ("struct sjlj_case pool",
ncases);
tree index_expr = make_tree (index_type, dispatch_index);
tree minval = build_int_cst (index_type, 0);
/* Set when we discover that this pointer is not safe to dereference in the
caller. */
unsigned grp_not_necessarilly_dereferenced : 1;
-
- /* Pool allocation new operator. */
- inline void *operator new (size_t)
- {
- return pool.allocate ();
- }
-
- /* Delete operator utilizing pool allocation. */
- inline void operator delete (void *ptr)
- {
- pool.remove ((access *) ptr);
- }
-
- /* Memory allocation pool. */
- static pool_allocator<access> pool;
};
typedef struct access *access_p;
/* Alloc pool for allocating access structures. */
-pool_allocator<struct access> access::pool ("SRA accesses", 16);
+static object_allocator<struct access> access_pool ("SRA accesses", 16);
/* A structure linking lhs and rhs accesses from an aggregate assignment. They
are used to propagate subaccesses from rhs to lhs as long as they don't
{
struct access *lacc, *racc;
struct assign_link *next;
-
- /* Pool allocation new operator. */
- inline void *operator new (size_t)
- {
- return pool.allocate ();
- }
-
- /* Delete operator utilizing pool allocation. */
- inline void operator delete (void *ptr)
- {
- pool.remove ((assign_link *) ptr);
- }
-
- /* Memory allocation pool. */
- static pool_allocator<assign_link> pool;
};
/* Alloc pool for allocating assign link structures. */
-pool_allocator<assign_link> assign_link::pool ("SRA links", 16);
+static object_allocator<assign_link> assign_link_pool ("SRA links", 16);
/* Base (tree) -> Vector (vec<access_p> *) map. */
static hash_map<tree, auto_vec<access_p> > *base_access_vec;
candidates = NULL;
BITMAP_FREE (should_scalarize_away_bitmap);
BITMAP_FREE (cannot_scalarize_away_bitmap);
- access::pool.release ();
- assign_link::pool.release ();
+ access_pool.release ();
+ assign_link_pool.release ();
obstack_free (&name_obstack, NULL);
delete base_access_vec;
static struct access *
create_access_1 (tree base, HOST_WIDE_INT offset, HOST_WIDE_INT size)
{
- struct access *access = new struct access;
+ struct access *access = access_pool.allocate ();
memset (access, 0, sizeof (struct access));
access->base = base;
{
struct assign_link *link;
- link = new assign_link;
+ link = assign_link_pool.allocate ();
memset (link, 0, sizeof (struct assign_link));
link->lacc = lacc;
gcc_assert (!model->grp_unscalarizable_region);
- struct access *access = new struct access;
+ struct access *access = access_pool.allocate ();
memset (access, 0, sizeof (struct access));
if (!build_user_friendly_ref_for_offset (&expr, TREE_TYPE (expr), new_offset,
model->type))
static struct occurrence *occ_head;
/* Allocation pool for getting instances of "struct occurrence". */
-static pool_allocator<occurrence> *occ_pool;
+static object_allocator<occurrence> *occ_pool;
basic_block bb;
tree arg;
- occ_pool = new pool_allocator<occurrence>
+ occ_pool = new object_allocator<occurrence>
("dominators for recip", n_basic_blocks_for_fn (fun) / 3 + 1);
memset (&reciprocal_stats, 0, sizeof (reciprocal_stats));
expressions.release ();
}
-static pool_allocator<pre_expr_d> pre_expr_pool ("pre_expr nodes", 30);
+static object_allocator<pre_expr_d> pre_expr_pool ("pre_expr nodes", 30);
/* Given an SSA_NAME NAME, get or create a pre_expr to represent it. */
/* We can add and remove elements and entries to and from sets
and hash tables, so we use alloc pools for them. */
-static pool_allocator<bitmap_set> bitmap_set_pool ("Bitmap sets", 30);
+static object_allocator<bitmap_set> bitmap_set_pool ("Bitmap sets", 30);
static bitmap_obstack grand_bitmap_obstack;
/* Set of blocks with statements that have had their EH properties changed. */
unsigned int count;
} *operand_entry_t;
-static pool_allocator<operand_entry> operand_entry_pool ("operand entry pool",
+static object_allocator<operand_entry> operand_entry_pool ("operand entry pool",
30);
/* This is used to assign a unique ID to each struct operand_entry
vn_phi_table_type *phis;
vn_reference_table_type *references;
struct obstack nary_obstack;
- pool_allocator<vn_phi_s> *phis_pool;
- pool_allocator<vn_reference_s> *references_pool;
+ object_allocator<vn_phi_s> *phis_pool;
+ object_allocator<vn_reference_s> *references_pool;
} *vn_tables_t;
table->references = new vn_reference_table_type (23);
gcc_obstack_init (&table->nary_obstack);
- table->phis_pool = new pool_allocator<vn_phi_s> ("VN phis", 30);
- table->references_pool = new pool_allocator<vn_reference_s> ("VN references",
- 30);
+ table->phis_pool = new object_allocator<vn_phi_s> ("VN phis", 30);
+ table->references_pool = new object_allocator<vn_reference_s>
+ ("VN references", 30);
}
/* Free a value number table. */
} *strinfo;
/* Pool for allocating strinfo_struct entries. */
-static pool_allocator<strinfo_struct> strinfo_pool ("strinfo_struct pool", 64);
+static object_allocator<strinfo_struct> strinfo_pool ("strinfo_struct pool",
+ 64);
/* Vector mapping positive string indexes to strinfo, for the
current basic block. The first pointer in the vector is special,
static inline bool type_can_have_subvars (const_tree);
/* Pool of variable info structures. */
-static pool_allocator<variable_info> variable_info_pool
+static object_allocator<variable_info> variable_info_pool
("Variable info pool", 30);
/* Map varinfo to final pt_solution. */
/* List of constraints that we use to build the constraint graph from. */
static vec<constraint_t> constraints;
-static pool_allocator<constraint> constraint_pool ("Constraint pool", 30);
+static object_allocator<constraint> constraint_pool ("Constraint pool", 30);
/* The constraint graph is represented as an array of bitmaps
containing successor nodes. */
/* Offset from start of DECL. */
HOST_WIDE_INT offset;
-
- /* Pool allocation new operator. */
- inline void *operator new (size_t)
- {
- return pool.allocate ();
- }
-
- /* Delete operator utilizing pool allocation. */
- inline void operator delete (void *ptr)
- {
- pool.remove ((attrs_def *) ptr);
- }
-
- /* Memory allocation pool. */
- static pool_allocator<attrs_def> pool;
} *attrs;
/* Structure for chaining the locations. */
/* Initialized? */
enum var_init_status init;
-
- /* Pool allocation new operator. */
- inline void *operator new (size_t)
- {
- return pool.allocate ();
- }
-
- /* Delete operator utilizing pool allocation. */
- inline void operator delete (void *ptr)
- {
- pool.remove ((location_chain_def *) ptr);
- }
-
- /* Memory allocation pool. */
- static pool_allocator<location_chain_def> pool;
} *location_chain;
/* A vector of loc_exp_dep holds the active dependencies of a one-part
/* A pointer to the pointer to this entry (head or prev's next) in
the doubly-linked list. */
struct loc_exp_dep_s **pprev;
-
- /* Pool allocation new operator. */
- inline void *operator new (size_t)
- {
- return pool.allocate ();
- }
-
- /* Delete operator utilizing pool allocation. */
- inline void operator delete (void *ptr)
- {
- pool.remove ((loc_exp_dep_s *) ptr);
- }
-
- /* Memory allocation pool. */
- static pool_allocator<loc_exp_dep_s> pool;
} loc_exp_dep;
/* Actual hash table. */
variable_table_type *htab;
-
- /* Pool allocation new operator. */
- inline void *operator new (size_t)
- {
- return pool.allocate ();
- }
-
- /* Delete operator utilizing pool allocation. */
- inline void operator delete (void *ptr)
- {
- pool.remove ((shared_hash_def *) ptr);
- }
-
- /* Memory allocation pool. */
- static pool_allocator<shared_hash_def> pool;
} *shared_hash;
/* Structure holding the IN or OUT set for a basic block. */
} *variable_tracking_info;
/* Alloc pool for struct attrs_def. */
-pool_allocator<attrs_def> attrs_def::pool ("attrs_def pool", 1024);
+object_allocator<attrs_def> attrs_def_pool ("attrs_def pool", 1024);
/* Alloc pool for struct variable_def with MAX_VAR_PARTS entries. */
-static pool_allocator<variable_def> var_pool
- ("variable_def pool", 64,
+static pool_allocator var_pool
+ ("variable_def pool", 64, sizeof (variable_def) +
(MAX_VAR_PARTS - 1) * sizeof (((variable)NULL)->var_part[0]));
/* Alloc pool for struct variable_def with a single var_part entry. */
-static pool_allocator<variable_def> valvar_pool
- ("small variable_def pool", 256);
+static pool_allocator valvar_pool
+ ("small variable_def pool", 256, sizeof (variable_def));
/* Alloc pool for struct location_chain_def. */
-pool_allocator<location_chain_def> location_chain_def::pool
+static object_allocator<location_chain_def> location_chain_def_pool
("location_chain_def pool", 1024);
/* Alloc pool for struct shared_hash_def. */
-pool_allocator<shared_hash_def> shared_hash_def::pool
+static object_allocator<shared_hash_def> shared_hash_def_pool
("shared_hash_def pool", 256);
/* Alloc pool for struct loc_exp_dep_s for NOT_ONEPART variables. */
-pool_allocator<loc_exp_dep> loc_exp_dep::pool ("loc_exp_dep pool", 64);
+object_allocator<loc_exp_dep> loc_exp_dep_pool ("loc_exp_dep pool", 64);
/* Changed variables, notes will be emitted for them. */
static variable_table_type *changed_variables;
}
/* Return the variable pool to be used for a dv of type ONEPART. */
-static inline pool_allocator <variable_def> &
+static inline pool_allocator &
onepart_pool (onepart_enum_t onepart)
{
return onepart ? valvar_pool : var_pool;
}
+/* Allocate a variable_def from the corresponding variable pool. */
+static inline variable_def *
+onepart_pool_allocate (onepart_enum_t onepart)
+{
+ return (variable_def*) onepart_pool (onepart).allocate ();
+}
+
/* Build a decl_or_value out of a decl. */
static inline decl_or_value
dv_from_decl (tree decl)
variable new_var;
int i;
- new_var = onepart_pool (var->onepart).allocate ();
+ new_var = onepart_pool_allocate (var->onepart);
new_var->dv = var->dv;
new_var->refcount = 1;
var->refcount--;
{
if (node)
{
- dvar = onepart_pool (onepart).allocate ();
+ dvar = onepart_pool_allocate (onepart);
dvar->dv = dv;
dvar->refcount = 1;
dvar->n_var_parts = 1;
INSERT);
if (!*slot)
{
- variable var = onepart_pool (ONEPART_VALUE).allocate ();
+ variable var = onepart_pool_allocate (ONEPART_VALUE);
var->dv = dv;
var->refcount = 1;
var->n_var_parts = 1;
gcc_checking_assert (onepart == ONEPART_VALUE || onepart == ONEPART_DEXPR);
- empty_var = onepart_pool (onepart).allocate ();
+ empty_var = onepart_pool_allocate (onepart);
empty_var->dv = dv;
empty_var->refcount = 1;
empty_var->n_var_parts = 0;
if (!empty_var)
{
- empty_var = onepart_pool (onepart).allocate ();
+ empty_var = onepart_pool_allocate (onepart);
empty_var->dv = var->dv;
empty_var->refcount = 1;
empty_var->n_var_parts = 0;
if (!var)
{
/* Create new variable information. */
- var = onepart_pool (onepart).allocate ();
+ var = onepart_pool_allocate (onepart);
var->dv = dv;
var->refcount = 1;
var->n_var_parts = 1;
if (!empty_var)
{
- empty_var = onepart_pool (old_var->onepart).allocate ();
+ empty_var = onepart_pool_allocate (old_var->onepart);
empty_var->dv = old_var->dv;
empty_var->refcount = 0;
empty_var->n_var_parts = 0;
empty_shared_hash->htab = NULL;
delete changed_variables;
changed_variables = NULL;
- attrs_def::pool.release ();
+ attrs_def_pool.release ();
var_pool.release ();
- location_chain_def::pool.release ();
- shared_hash_def::pool.release ();
+ location_chain_def_pool.release ();
+ shared_hash_def_pool.release ();
if (MAY_HAVE_DEBUG_INSNS)
{
if (global_get_addr_cache)
delete global_get_addr_cache;
global_get_addr_cache = NULL;
- loc_exp_dep::pool.release ();
+ loc_exp_dep_pool.release ();
valvar_pool.release ();
preserved_values.release ();
cselib_finish ();