it gets run. It also has no need for the iterative solver.
----------------------------------------------------------------------------*/
+#define SCAN_PROBLEM_DATA_BLOCK_SIZE 512
+
/* Problem data for the scanning dataflow function. */
struct df_scan_problem_data
{
- alloc_pool ref_base_pool;
- alloc_pool ref_artificial_pool;
- alloc_pool ref_regular_pool;
- alloc_pool insn_pool;
- alloc_pool reg_pool;
- alloc_pool mw_reg_pool;
+ pool_allocator<df_base_ref> *ref_base_pool;
+ pool_allocator<df_artificial_ref> *ref_artificial_pool;
+ pool_allocator<df_regular_ref> *ref_regular_pool;
+ pool_allocator<df_insn_info> *insn_pool;
+ pool_allocator<df_reg_info> *reg_pool;
+ pool_allocator<df_mw_hardreg> *mw_reg_pool;
+
bitmap_obstack reg_bitmaps;
bitmap_obstack insn_bitmaps;
};
bitmap_clear (&df->insns_to_rescan);
bitmap_clear (&df->insns_to_notes_rescan);
- free_alloc_pool (problem_data->ref_base_pool);
- free_alloc_pool (problem_data->ref_artificial_pool);
- free_alloc_pool (problem_data->ref_regular_pool);
- free_alloc_pool (problem_data->insn_pool);
- free_alloc_pool (problem_data->reg_pool);
- free_alloc_pool (problem_data->mw_reg_pool);
+ delete problem_data->ref_base_pool;
+ delete problem_data->ref_artificial_pool;
+ delete problem_data->ref_regular_pool;
+ delete problem_data->insn_pool;
+ delete problem_data->reg_pool;
+ delete problem_data->mw_reg_pool;
bitmap_obstack_release (&problem_data->reg_bitmaps);
bitmap_obstack_release (&problem_data->insn_bitmaps);
free (df_scan->problem_data);
{
struct df_scan_problem_data *problem_data;
unsigned int insn_num = get_max_uid () + 1;
- unsigned int block_size = 512;
basic_block bb;
/* Given the number of pools, this is really faster than tearing
df_scan->problem_data = problem_data;
df_scan->computed = true;
- problem_data->ref_base_pool
- = create_alloc_pool ("df_scan ref base",
- sizeof (struct df_base_ref), block_size);
- problem_data->ref_artificial_pool
- = create_alloc_pool ("df_scan ref artificial",
- sizeof (struct df_artificial_ref), block_size);
- problem_data->ref_regular_pool
- = create_alloc_pool ("df_scan ref regular",
- sizeof (struct df_regular_ref), block_size);
- problem_data->insn_pool
- = create_alloc_pool ("df_scan insn",
- sizeof (struct df_insn_info), block_size);
- problem_data->reg_pool
- = create_alloc_pool ("df_scan reg",
- sizeof (struct df_reg_info), block_size);
- problem_data->mw_reg_pool
- = create_alloc_pool ("df_scan mw_reg",
- sizeof (struct df_mw_hardreg), block_size / 16);
+ problem_data->ref_base_pool = new pool_allocator<df_base_ref>
+ ("df_scan ref base", SCAN_PROBLEM_DATA_BLOCK_SIZE);
+ problem_data->ref_artificial_pool = new pool_allocator<df_artificial_ref>
+ ("df_scan ref artificial", SCAN_PROBLEM_DATA_BLOCK_SIZE);
+ problem_data->ref_regular_pool = new pool_allocator<df_regular_ref>
+ ("df_scan ref regular", SCAN_PROBLEM_DATA_BLOCK_SIZE);
+ problem_data->insn_pool = new pool_allocator<df_insn_info>
+ ("df_scan insn", SCAN_PROBLEM_DATA_BLOCK_SIZE);
+ problem_data->reg_pool = new pool_allocator<df_reg_info>
+ ("df_scan reg", SCAN_PROBLEM_DATA_BLOCK_SIZE);
+ problem_data->mw_reg_pool = new pool_allocator<df_mw_hardreg>
+ ("df_scan mw_reg", SCAN_PROBLEM_DATA_BLOCK_SIZE / 16);
bitmap_obstack_initialize (&problem_data->reg_bitmaps);
bitmap_obstack_initialize (&problem_data->insn_bitmaps);
{
struct df_reg_info *reg_info;
- reg_info = (struct df_reg_info *) pool_alloc (problem_data->reg_pool);
+ // TODO
+ reg_info = problem_data->reg_pool->allocate ();
memset (reg_info, 0, sizeof (struct df_reg_info));
df->def_regs[i] = reg_info;
- reg_info = (struct df_reg_info *) pool_alloc (problem_data->reg_pool);
+ reg_info = problem_data->reg_pool->allocate ();
memset (reg_info, 0, sizeof (struct df_reg_info));
df->use_regs[i] = reg_info;
- reg_info = (struct df_reg_info *) pool_alloc (problem_data->reg_pool);
+ reg_info = problem_data->reg_pool->allocate ();
memset (reg_info, 0, sizeof (struct df_reg_info));
df->eq_use_regs[i] = reg_info;
df->def_info.begin[i] = 0;
switch (DF_REF_CLASS (ref))
{
case DF_REF_BASE:
- pool_free (problem_data->ref_base_pool, ref);
+ problem_data->ref_base_pool->remove ((df_base_ref *) (ref));
break;
case DF_REF_ARTIFICIAL:
- pool_free (problem_data->ref_artificial_pool, ref);
+ problem_data->ref_artificial_pool->remove
+ ((df_artificial_ref *) (ref));
break;
case DF_REF_REGULAR:
- pool_free (problem_data->ref_regular_pool, ref);
+ problem_data->ref_regular_pool->remove
+ ((df_regular_ref *) (ref));
break;
}
}
insn_rec = DF_INSN_INFO_GET (insn);
if (!insn_rec)
{
- insn_rec = (struct df_insn_info *) pool_alloc (problem_data->insn_pool);
+ insn_rec = problem_data->insn_pool->allocate ();
DF_INSN_INFO_SET (insn, insn_rec);
}
memset (insn_rec, 0, sizeof (struct df_insn_info));
for (; hardregs; hardregs = next)
{
next = DF_MWS_NEXT (hardregs);
- pool_free (problem_data->mw_reg_pool, hardregs);
+ problem_data->mw_reg_pool->remove (hardregs);
}
}
df_ref_chain_delete (insn_info->uses);
df_ref_chain_delete (insn_info->eq_uses);
- pool_free (problem_data->insn_pool, insn_info);
+ problem_data->insn_pool->remove (insn_info);
DF_INSN_UID_SET (uid, NULL);
}
}
FOR_EACH_VEC_ELT (collection_rec->eq_use_vec, ix, ref)
df_free_ref (ref);
FOR_EACH_VEC_ELT (collection_rec->mw_vec, ix, mw)
- pool_free (problem_data->mw_reg_pool, mw);
+ problem_data->mw_reg_pool->remove (mw);
collection_rec->def_vec.release ();
collection_rec->use_vec.release ();
if (mw->flags & DF_REF_IN_NOTE)
{
*mw_ptr = DF_MWS_NEXT (mw);
- pool_free (problem_data->mw_reg_pool, mw);
+ problem_data->mw_reg_pool->remove (mw);
}
else
mw_ptr = &DF_MWS_NEXT (mw);
while (i + dist + 1 < count
&& df_mw_equal_p ((*mw_vec)[i], (*mw_vec)[i + dist + 1]))
{
- pool_free (problem_data->mw_reg_pool,
- (*mw_vec)[i + dist + 1]);
+ problem_data->mw_reg_pool->remove ((*mw_vec)[i + dist + 1]);
dist++;
}
/* Copy it down to the next position. */
switch (cl)
{
case DF_REF_BASE:
- this_ref = (df_ref) pool_alloc (problem_data->ref_base_pool);
+ this_ref = (df_ref) (problem_data->ref_base_pool->allocate ());
gcc_checking_assert (loc == NULL);
break;
case DF_REF_ARTIFICIAL:
- this_ref = (df_ref) pool_alloc (problem_data->ref_artificial_pool);
+ this_ref = (df_ref) (problem_data->ref_artificial_pool->allocate ());
this_ref->artificial_ref.bb = bb;
gcc_checking_assert (loc == NULL);
break;
case DF_REF_REGULAR:
- this_ref = (df_ref) pool_alloc (problem_data->ref_regular_pool);
+ this_ref = (df_ref) (problem_data->ref_regular_pool->allocate ());
this_ref->regular_ref.loc = loc;
gcc_checking_assert (loc);
break;
ref_flags |= DF_REF_PARTIAL;
ref_flags |= DF_REF_MW_HARDREG;
- hardreg = (struct df_mw_hardreg *) pool_alloc (problem_data->mw_reg_pool);
+ hardreg = problem_data->mw_reg_pool->allocate ();
hardreg->type = ref_type;
hardreg->flags = ref_flags;
hardreg->mw_reg = reg;