/* Control flow graph manipulation code for GNU compiler.
- Copyright (C) 1987-2013 Free Software Foundation, Inc.
+ Copyright (C) 1987-2015 Free Software Foundation, Inc.
This file is part of GCC.
#include "config.h"
#include "system.h"
#include "coretypes.h"
-#include "obstack.h"
-#include "ggc.h"
-#include "hash-table.h"
+#include "backend.h"
#include "alloc-pool.h"
+#include "alias.h"
+#include "cfghooks.h"
#include "tree.h"
-#include "basic-block.h"
+#include "hard-reg-set.h"
#include "df.h"
+#include "options.h"
+#include "cfganal.h"
#include "cfgloop.h" /* FIXME: For struct loop. */
#include "dumpfile.h"
init_flow (struct function *the_fun)
{
if (!the_fun->cfg)
- the_fun->cfg = ggc_alloc_cleared_control_flow_graph ();
- n_edges_for_function (the_fun) = 0;
- ENTRY_BLOCK_PTR_FOR_FUNCTION (the_fun)
- = ggc_alloc_cleared_basic_block_def ();
- ENTRY_BLOCK_PTR_FOR_FUNCTION (the_fun)->index = ENTRY_BLOCK;
- EXIT_BLOCK_PTR_FOR_FUNCTION (the_fun)
- = ggc_alloc_cleared_basic_block_def ();
- EXIT_BLOCK_PTR_FOR_FUNCTION (the_fun)->index = EXIT_BLOCK;
- ENTRY_BLOCK_PTR_FOR_FUNCTION (the_fun)->next_bb
- = EXIT_BLOCK_PTR_FOR_FUNCTION (the_fun);
- EXIT_BLOCK_PTR_FOR_FUNCTION (the_fun)->prev_bb
- = ENTRY_BLOCK_PTR_FOR_FUNCTION (the_fun);
+ the_fun->cfg = ggc_cleared_alloc<control_flow_graph> ();
+ n_edges_for_fn (the_fun) = 0;
+ ENTRY_BLOCK_PTR_FOR_FN (the_fun)
+ = ggc_cleared_alloc<basic_block_def> ();
+ ENTRY_BLOCK_PTR_FOR_FN (the_fun)->index = ENTRY_BLOCK;
+ EXIT_BLOCK_PTR_FOR_FN (the_fun)
+ = ggc_cleared_alloc<basic_block_def> ();
+ EXIT_BLOCK_PTR_FOR_FN (the_fun)->index = EXIT_BLOCK;
+ ENTRY_BLOCK_PTR_FOR_FN (the_fun)->next_bb
+ = EXIT_BLOCK_PTR_FOR_FN (the_fun);
+ EXIT_BLOCK_PTR_FOR_FN (the_fun)->prev_bb
+ = ENTRY_BLOCK_PTR_FOR_FN (the_fun);
}
\f
/* Helper function for remove_edge and clear_edges. Frees edge structure
static void
free_edge (edge e)
{
- n_edges--;
+ n_edges_for_fn (cfun)--;
ggc_free (e);
}
edge e;
edge_iterator ei;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
FOR_EACH_EDGE (e, ei, bb->succs)
free_edge (e);
vec_safe_truncate (bb->preds, 0);
}
- FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs)
+ FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs)
free_edge (e);
- vec_safe_truncate (EXIT_BLOCK_PTR->preds, 0);
- vec_safe_truncate (ENTRY_BLOCK_PTR->succs, 0);
+ vec_safe_truncate (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds, 0);
+ vec_safe_truncate (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs, 0);
- gcc_assert (!n_edges);
+ gcc_assert (!n_edges_for_fn (cfun));
}
\f
/* Allocate memory for basic_block. */
alloc_block (void)
{
basic_block bb;
- bb = ggc_alloc_cleared_basic_block_def ();
+ bb = ggc_cleared_alloc<basic_block_def> ();
return bb;
}
{
int i;
- SET_BASIC_BLOCK (ENTRY_BLOCK, ENTRY_BLOCK_PTR);
- SET_BASIC_BLOCK (EXIT_BLOCK, EXIT_BLOCK_PTR);
+ SET_BASIC_BLOCK_FOR_FN (cfun, ENTRY_BLOCK, ENTRY_BLOCK_PTR_FOR_FN (cfun));
+ SET_BASIC_BLOCK_FOR_FN (cfun, EXIT_BLOCK, EXIT_BLOCK_PTR_FOR_FN (cfun));
if (df)
df_compact_blocks ();
basic_block bb;
i = NUM_FIXED_BLOCKS;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
- SET_BASIC_BLOCK (i, bb);
+ SET_BASIC_BLOCK_FOR_FN (cfun, i, bb);
bb->index = i;
i++;
}
- gcc_assert (i == n_basic_blocks);
+ gcc_assert (i == n_basic_blocks_for_fn (cfun));
- for (; i < last_basic_block; i++)
- SET_BASIC_BLOCK (i, NULL);
+ for (; i < last_basic_block_for_fn (cfun); i++)
+ SET_BASIC_BLOCK_FOR_FN (cfun, i, NULL);
}
- last_basic_block = n_basic_blocks;
+ last_basic_block_for_fn (cfun) = n_basic_blocks_for_fn (cfun);
}
/* Remove block B from the basic block array. */
expunge_block (basic_block b)
{
unlink_block (b);
- SET_BASIC_BLOCK (b->index, NULL);
- n_basic_blocks--;
+ SET_BASIC_BLOCK_FOR_FN (cfun, b->index, NULL);
+ n_basic_blocks_for_fn (cfun)--;
/* We should be able to ggc_free here, but we are not.
The dead SSA_NAMES are left pointing to dead statements that are pointing
to dead basic blocks making garbage collector to die.
unchecked_make_edge (basic_block src, basic_block dst, int flags)
{
edge e;
- e = ggc_alloc_cleared_edge_def ();
- n_edges++;
+ e = ggc_cleared_alloc<edge_def> ();
+ n_edges_for_fn (cfun)++;
e->src = src;
e->dest = dst;
cached_make_edge (sbitmap edge_cache, basic_block src, basic_block dst, int flags)
{
if (edge_cache == NULL
- || src == ENTRY_BLOCK_PTR
- || dst == EXIT_BLOCK_PTR)
+ || src == ENTRY_BLOCK_PTR_FOR_FN (cfun)
+ || dst == EXIT_BLOCK_PTR_FOR_FN (cfun))
return make_edge (src, dst, flags);
/* Does the requested edge already exist? */
{
basic_block bb;
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
bb->flags &= BB_FLAGS_TO_PRESERVE;
}
\f
memset ((void *) s_indent, ' ', (size_t) indent);
s_indent[indent] = '\0';
- if (profile_status_for_function (fun) == PROFILE_ABSENT)
+ if (profile_status_for_fn (fun) == PROFILE_ABSENT)
return;
- if (bb != EXIT_BLOCK_PTR_FOR_FUNCTION (fun))
+ if (bb != EXIT_BLOCK_PTR_FOR_FN (fun))
{
FOR_EACH_EDGE (e, ei, bb->succs)
sum += e->probability;
(flags & TDF_COMMENT) ? ";; " : "", s_indent,
(int) lsum, (int) bb->count);
}
- if (bb != ENTRY_BLOCK_PTR_FOR_FUNCTION (fun))
+ if (bb != ENTRY_BLOCK_PTR_FOR_FN (fun))
{
sum = 0;
FOR_EACH_EDGE (e, ei, bb->preds)
(flags & TDF_COMMENT) ? ";; " : "", s_indent,
(int) lsum, (int) bb->count);
}
+ if (BB_PARTITION (bb) == BB_COLD_PARTITION)
+ {
+ /* Warn about inconsistencies in the partitioning that are
+ currently caused by profile insanities created via optimization. */
+ if (!probably_never_executed_bb_p (fun, bb))
+ fprintf (file, "%s%sBlock in cold partition with hot count\n",
+ (flags & TDF_COMMENT) ? ";; " : "", s_indent);
+ FOR_EACH_EDGE (e, ei, bb->preds)
+ {
+ if (!probably_never_executed_edge_p (fun, e))
+ fprintf (file,
+ "%s%sBlock in cold partition with incoming hot edge\n",
+ (flags & TDF_COMMENT) ? ";; " : "", s_indent);
+ }
+ }
}
\f
void
&& (flags & TDF_SLIM) == 0)
do_details = true;
- /* ENTRY_BLOCK_PTR/EXIT_BLOCK_PTR depend on cfun.
- Compare against ENTRY_BLOCK/EXIT_BLOCK to avoid that dependency. */
if (side->index == ENTRY_BLOCK)
fputs (" ENTRY", file);
else if (side->index == EXIT_BLOCK)
if (e->count && do_details)
{
fputs (" count:", file);
- fprintf (file, HOST_WIDEST_INT_PRINT_DEC, e->count);
+ fprintf (file, "%" PRId64, e->count);
}
if (e->flags && do_details)
{
basic_block bb;
- FOR_ALL_BB (bb)
+ FOR_ALL_BB_FN (bb, cfun)
alloc_aux_for_block (bb, size);
}
}
{
basic_block bb;
- FOR_ALL_BB (bb)
+ FOR_ALL_BB_FN (bb, cfun)
bb->aux = NULL;
}
{
basic_block bb;
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun),
+ EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
{
edge e;
edge_iterator ei;
basic_block bb;
edge e;
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
+ FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun),
+ EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
{
edge_iterator ei;
FOR_EACH_EDGE (e, ei, bb->succs)
DEBUG_FUNCTION basic_block
debug_bb_n (int n)
{
- basic_block bb = BASIC_BLOCK (n);
+ basic_block bb = BASIC_BLOCK_FOR_FN (cfun, n);
debug_bb (bb);
return bb;
}
if (flags & TDF_DETAILS)
{
struct function *fun = DECL_STRUCT_FUNCTION (current_function_decl);
- fprintf (outf, ", count " HOST_WIDEST_INT_PRINT_DEC,
- (HOST_WIDEST_INT) bb->count);
+ fprintf (outf, ", count " "%" PRId64,
+ (int64_t) bb->count);
fprintf (outf, ", freq %i", bb->frequency);
if (maybe_hot_bb_p (fun, bb))
fputs (", maybe hot", outf);
fputs (", probably never executed", outf);
}
fputc ('\n', outf);
- if (TDF_DETAILS)
- check_bb_profile (bb, outf, indent, flags);
if (flags & TDF_DETAILS)
{
+ check_bb_profile (bb, outf, indent, flags);
if (flags & TDF_COMMENT)
fputs (";; ", outf);
fprintf (outf, "%s prev block ", s_indent);
{
basic_block bb;
- FOR_EACH_BB (bb)
+ FOR_EACH_BB_FN (bb, cfun)
{
dump_bb_info (file, bb, 0,
flags & (TDF_COMMENT | TDF_DETAILS),
/* Compute the probability of TAKEN_EDGE being reached via threaded edge.
Watch for overflows. */
if (bb->frequency)
- /* Update to use GCOV_COMPUTE_SCALE. */
- prob = edge_frequency * REG_BR_PROB_BASE / bb->frequency;
+ prob = GCOV_COMPUTE_SCALE (edge_frequency, bb->frequency);
else
prob = 0;
if (prob > taken_edge->probability)
/* numbers smaller than this value are safe to multiply without getting
64bit overflow. */
-#define MAX_SAFE_MULTIPLIER (1 << (sizeof (HOST_WIDEST_INT) * 4 - 1))
+#define MAX_SAFE_MULTIPLIER (1 << (sizeof (int64_t) * 4 - 1))
/* Multiply all frequencies of basic blocks in array BBS of length NBBS
by NUM/DEN, in gcov_type arithmetic. More accurate than previous
int index2;
};
-struct bb_copy_hasher : typed_noop_remove <htab_bb_copy_original_entry>
+struct bb_copy_hasher : nofree_ptr_hash <htab_bb_copy_original_entry>
{
- typedef htab_bb_copy_original_entry value_type;
- typedef htab_bb_copy_original_entry compare_type;
- static inline hashval_t hash (const value_type *);
- static inline bool equal (const value_type *existing,
- const compare_type * candidate);
+ static inline hashval_t hash (const htab_bb_copy_original_entry *);
+ static inline bool equal (const htab_bb_copy_original_entry *existing,
+ const htab_bb_copy_original_entry * candidate);
};
inline hashval_t
-bb_copy_hasher::hash (const value_type *data)
+bb_copy_hasher::hash (const htab_bb_copy_original_entry *data)
{
return data->index1;
}
inline bool
-bb_copy_hasher::equal (const value_type *data, const compare_type *data2)
+bb_copy_hasher::equal (const htab_bb_copy_original_entry *data,
+ const htab_bb_copy_original_entry *data2)
{
return data->index1 == data2->index1;
}
/* Data structures used to maintain mapping between basic blocks and
copies. */
-static hash_table <bb_copy_hasher> bb_original;
-static hash_table <bb_copy_hasher> bb_copy;
+static hash_table<bb_copy_hasher> *bb_original;
+static hash_table<bb_copy_hasher> *bb_copy;
/* And between loops and copies. */
-static hash_table <bb_copy_hasher> loop_copy;
-static alloc_pool original_copy_bb_pool;
-
+static hash_table<bb_copy_hasher> *loop_copy;
+static object_allocator<htab_bb_copy_original_entry> *original_copy_bb_pool;
/* Initialize the data structures to maintain mapping between blocks
and its copies. */
void
initialize_original_copy_tables (void)
{
- gcc_assert (!original_copy_bb_pool);
- original_copy_bb_pool
- = create_alloc_pool ("original_copy",
- sizeof (struct htab_bb_copy_original_entry), 10);
- bb_original.create (10);
- bb_copy.create (10);
- loop_copy.create (10);
+ original_copy_bb_pool = new object_allocator<htab_bb_copy_original_entry>
+ ("original_copy");
+ bb_original = new hash_table<bb_copy_hasher> (10);
+ bb_copy = new hash_table<bb_copy_hasher> (10);
+ loop_copy = new hash_table<bb_copy_hasher> (10);
}
/* Free the data structures to maintain mapping between blocks and
free_original_copy_tables (void)
{
gcc_assert (original_copy_bb_pool);
- bb_copy.dispose ();
- bb_original.dispose ();
- loop_copy.dispose ();
- free_alloc_pool (original_copy_bb_pool);
+ delete bb_copy;
+ bb_copy = NULL;
+ delete bb_original;
+ bb_copy = NULL;
+ delete loop_copy;
+ loop_copy = NULL;
+ delete original_copy_bb_pool;
original_copy_bb_pool = NULL;
}
/* Removes the value associated with OBJ from table TAB. */
static void
-copy_original_table_clear (hash_table <bb_copy_hasher> tab, unsigned obj)
+copy_original_table_clear (hash_table<bb_copy_hasher> *tab, unsigned obj)
{
htab_bb_copy_original_entry **slot;
struct htab_bb_copy_original_entry key, *elt;
return;
key.index1 = obj;
- slot = tab.find_slot (&key, NO_INSERT);
+ slot = tab->find_slot (&key, NO_INSERT);
if (!slot)
return;
elt = *slot;
- tab.clear_slot (slot);
- pool_free (original_copy_bb_pool, elt);
+ tab->clear_slot (slot);
+ original_copy_bb_pool->remove (elt);
}
/* Sets the value associated with OBJ in table TAB to VAL.
Do nothing when data structures are not initialized. */
static void
-copy_original_table_set (hash_table <bb_copy_hasher> tab,
+copy_original_table_set (hash_table<bb_copy_hasher> *tab,
unsigned obj, unsigned val)
{
struct htab_bb_copy_original_entry **slot;
return;
key.index1 = obj;
- slot = tab.find_slot (&key, INSERT);
+ slot = tab->find_slot (&key, INSERT);
if (!*slot)
{
- *slot = (struct htab_bb_copy_original_entry *)
- pool_alloc (original_copy_bb_pool);
+ *slot = original_copy_bb_pool->allocate ();
(*slot)->index1 = obj;
}
(*slot)->index2 = val;
gcc_assert (original_copy_bb_pool);
key.index1 = bb->index;
- entry = bb_original.find (&key);
+ entry = bb_original->find (&key);
if (entry)
- return BASIC_BLOCK (entry->index2);
+ return BASIC_BLOCK_FOR_FN (cfun, entry->index2);
else
return NULL;
}
gcc_assert (original_copy_bb_pool);
key.index1 = bb->index;
- entry = bb_copy.find (&key);
+ entry = bb_copy->find (&key);
if (entry)
- return BASIC_BLOCK (entry->index2);
+ return BASIC_BLOCK_FOR_FN (cfun, entry->index2);
else
return NULL;
}
gcc_assert (original_copy_bb_pool);
key.index1 = loop->num;
- entry = loop_copy.find (&key);
+ entry = loop_copy->find (&key);
if (entry)
return get_loop (cfun, entry->index2);
else