+2015-06-01 Martin Liska <mliska@suse.cz>
+
+ * tree-ssa-reassoc.c (add_to_ops_vec): Use new type-based pool allocator.
+ (add_repeat_to_ops_vec): Likewise.
+ (get_ops): Likewise.
+ (maybe_optimize_range_tests): Likewise.
+ (init_reassoc): Likewise.
+ (fini_reassoc): Likewise.
+
2015-06-01 Martin Liska <mliska@suse.cz>
* tree-ssa-pre.c (get_or_alloc_expr_for_name): Use new type-based pool allocator.
unsigned int count;
} *operand_entry_t;
-static alloc_pool operand_entry_pool;
+static pool_allocator<operand_entry> operand_entry_pool ("operand entry pool",
+ 30);
/* This is used to assign a unique ID to each struct operand_entry
so that qsort results are identical on different hosts. */
static void
add_to_ops_vec (vec<operand_entry_t> *ops, tree op)
{
- operand_entry_t oe = (operand_entry_t) pool_alloc (operand_entry_pool);
+ operand_entry_t oe = operand_entry_pool.allocate ();
oe->op = op;
oe->rank = get_rank (op);
add_repeat_to_ops_vec (vec<operand_entry_t> *ops, tree op,
HOST_WIDE_INT repeat)
{
- operand_entry_t oe = (operand_entry_t) pool_alloc (operand_entry_pool);
+ operand_entry_t oe = operand_entry_pool.allocate ();
oe->op = op;
oe->rank = get_rank (op);
&& !get_ops (rhs[i], code, ops, loop)
&& has_single_use (rhs[i]))
{
- operand_entry_t oe = (operand_entry_t) pool_alloc (operand_entry_pool);
+ operand_entry_t oe = operand_entry_pool.allocate ();
oe->op = rhs[i];
oe->rank = code;
&& has_single_use (rhs))
{
/* Otherwise, push the _234 range test itself. */
- operand_entry_t oe
- = (operand_entry_t) pool_alloc (operand_entry_pool);
+ operand_entry_t oe = operand_entry_pool.allocate ();
oe->op = rhs;
oe->rank = code;
loop_containing_stmt (stmt))))
{
/* Or push the GIMPLE_COND stmt itself. */
- operand_entry_t oe
- = (operand_entry_t) pool_alloc (operand_entry_pool);
+ operand_entry_t oe = operand_entry_pool.allocate ();
oe->op = NULL;
oe->rank = (e->flags & EDGE_TRUE_VALUE)
memset (&reassociate_stats, 0, sizeof (reassociate_stats));
- operand_entry_pool = create_alloc_pool ("operand entry pool",
- sizeof (struct operand_entry), 30);
next_operand_entry_id = 0;
/* Reverse RPO (Reverse Post Order) will give us something where
reassociate_stats.pows_created);
delete operand_rank;
- free_alloc_pool (operand_entry_pool);
+ operand_entry_pool.release ();
free (bb_rank);
plus_negates.release ();
free_dominance_info (CDI_POST_DOMINATORS);