/* Calculate branch probabilities, and basic block execution counts.
- Copyright (C) 1990-2018 Free Software Foundation, Inc.
+ Copyright (C) 1990-2020 Free Software Foundation, Inc.
Contributed by James E. Wilson, UC Berkeley/Cygnus Support;
based on some ideas from Dain Samples of UC Berkeley.
Further mangling by Bob Manson, Cygnus Support.
/* Counter summary from the last set of coverage counts read. */
-const gcov_summary *profile_info;
-
-/* Counter working set information computed from the current counter
- summary. Not initialized unless profile_info summary is non-NULL. */
-static gcov_working_set_t gcov_working_sets[NUM_GCOV_WORKING_SETS];
+gcov_summary *profile_info;
/* Collect statistics on the performance of this pass for the entire source
file. */
static int total_hist_br_prob[20];
static int total_num_branches;
-/* Helper function to update gcov_working_sets. */
-
-void add_working_set (gcov_working_set_t *set) {
- int i = 0;
- for (; i < NUM_GCOV_WORKING_SETS; i++)
- gcov_working_sets[i] = set[i];
-}
-
/* Forward declarations. */
static void find_spanning_tree (struct edge_list *);
switch (hist->type)
{
case HIST_TYPE_INTERVAL:
- gimple_gen_interval_profiler (hist, t, 0);
+ gimple_gen_interval_profiler (hist, t);
break;
case HIST_TYPE_POW2:
- gimple_gen_pow2_profiler (hist, t, 0);
+ gimple_gen_pow2_profiler (hist, t);
break;
- case HIST_TYPE_SINGLE_VALUE:
- gimple_gen_one_value_profiler (hist, t, 0);
+ case HIST_TYPE_TOPN_VALUES:
+ gimple_gen_topn_values_profiler (hist, t);
break;
case HIST_TYPE_INDIR_CALL:
- case HIST_TYPE_INDIR_CALL_TOPN:
- gimple_gen_ic_profiler (hist, t, 0);
+ gimple_gen_ic_profiler (hist, t);
break;
case HIST_TYPE_AVERAGE:
- gimple_gen_average_profiler (hist, t, 0);
+ gimple_gen_average_profiler (hist, t);
break;
case HIST_TYPE_IOR:
- gimple_gen_ior_profiler (hist, t, 0);
+ gimple_gen_ior_profiler (hist, t);
break;
case HIST_TYPE_TIME_PROFILE:
- gimple_gen_time_profiler (t, 0);
+ gimple_gen_time_profiler (t);
break;
default:
}
\f
-/* Fill the working set information into the profile_info structure. */
-
-void
-get_working_sets (void)
-{
- unsigned ws_ix, pctinc, pct;
- gcov_working_set_t *ws_info;
-
- if (!profile_info)
- return;
-
- compute_working_sets (profile_info, gcov_working_sets);
-
- if (dump_file)
- {
- fprintf (dump_file, "Counter working sets:\n");
- /* Multiply the percentage by 100 to avoid float. */
- pctinc = 100 * 100 / NUM_GCOV_WORKING_SETS;
- for (ws_ix = 0, pct = pctinc; ws_ix < NUM_GCOV_WORKING_SETS;
- ws_ix++, pct += pctinc)
- {
- if (ws_ix == NUM_GCOV_WORKING_SETS - 1)
- pct = 9990;
- ws_info = &gcov_working_sets[ws_ix];
- /* Print out the percentage using int arithmatic to avoid float. */
- fprintf (dump_file, "\t\t%u.%02u%%: num counts=%u, min counter="
- "%" PRId64 "\n",
- pct / 100, pct - (pct / 100 * 100),
- ws_info->num_counters,
- (int64_t)ws_info->min_counter);
- }
- }
-}
-
-/* Given a the desired percentage of the full profile (sum_all from the
- summary), multiplied by 10 to avoid float in PCT_TIMES_10, returns
- the corresponding working set information. If an exact match for
- the percentage isn't found, the closest value is used. */
-
-gcov_working_set_t *
-find_working_set (unsigned pct_times_10)
-{
- unsigned i;
- if (!profile_info)
- return NULL;
- gcc_assert (pct_times_10 <= 1000);
- if (pct_times_10 >= 999)
- return &gcov_working_sets[NUM_GCOV_WORKING_SETS - 1];
- i = pct_times_10 * NUM_GCOV_WORKING_SETS / 1000;
- if (!i)
- return &gcov_working_sets[0];
- return &gcov_working_sets[i - 1];
-}
-
/* Computes hybrid profile for all matching entries in da_file.
CFG_CHECKSUM is the precomputed checksum for the CFG. */
num_edges++;
}
- counts = get_coverage_counts (GCOV_COUNTER_ARCS, num_edges, cfg_checksum,
- lineno_checksum, &profile_info);
+ counts = get_coverage_counts (GCOV_COUNTER_ARCS, cfg_checksum,
+ lineno_checksum, num_edges);
if (!counts)
return NULL;
- get_working_sets ();
-
- if (dump_file && profile_info)
- fprintf (dump_file, "Merged %u profiles with maximal count %u.\n",
- profile_info->runs, (unsigned) profile_info->sum_max);
-
return counts;
}
-
static bool
is_edge_inconsistent (vec<edge, va_gc> *edges)
{
{
num_edges++;
if (exec_counts)
- {
- edge_gcov_count (e) = exec_counts[exec_counts_pos++];
- if (edge_gcov_count (e) > profile_info->sum_max)
- {
- if (flag_profile_correction)
- {
- static bool informed = 0;
- if (dump_enabled_p () && !informed)
- {
- dump_location_t loc
- = dump_location_t::from_location_t
- (input_location);
- dump_printf_loc (MSG_NOTE, loc,
- "corrupted profile info: edge count"
- " exceeds maximal count\n");
- }
- informed = 1;
- }
- else
- error ("corrupted profile info: edge from %i to %i exceeds maximal count",
- bb->index, e->dest->index);
- }
- }
+ edge_gcov_count (e) = exec_counts[exec_counts_pos++];
else
edge_gcov_count (e) = 0;
return;
}
- bb_gcov_counts.safe_grow_cleared (last_basic_block_for_fn (cfun));
+ bb_gcov_counts.safe_grow_cleared (last_basic_block_for_fn (cfun), true);
edge_gcov_counts = new hash_map<edge,gcov_type>;
- if (profile_info->sum_all < profile_info->sum_max)
- {
- error ("corrupted profile info: sum_all is smaller than sum_max");
- exec_counts = NULL;
- }
-
/* Attach extra info block to each bb. */
alloc_aux_for_blocks (sizeof (struct bb_profile_info));
FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
{
informed = 1;
dump_printf_loc (MSG_NOTE,
- dump_location_t::from_location_t (input_location),
+ dump_user_location_t::from_location_t (input_location),
"correcting inconsistent profile data\n");
}
correct_negative_edge_counts ();
}
if (bb_gcov_count (bb))
{
+ bool set_to_guessed = false;
FOR_EACH_EDGE (e, ei, bb->succs)
- e->probability = profile_probability::probability_in_gcov_type
- (edge_gcov_count (e), bb_gcov_count (bb));
+ {
+ bool prev_never = e->probability == profile_probability::never ();
+ e->probability = profile_probability::probability_in_gcov_type
+ (edge_gcov_count (e), bb_gcov_count (bb));
+ if (e->probability == profile_probability::never ()
+ && !prev_never
+ && flag_profile_partial_training)
+ set_to_guessed = true;
+ }
+ if (set_to_guessed)
+ FOR_EACH_EDGE (e, ei, bb->succs)
+ e->probability = e->probability.guessed ();
if (bb->index >= NUM_FIXED_BLOCKS
&& block_ends_with_condjump_p (bb)
&& EDGE_COUNT (bb->succs) >= 2)
}
}
+ if (exec_counts
+ && (bb_gcov_count (ENTRY_BLOCK_PTR_FOR_FN (cfun))
+ || !flag_profile_partial_training))
+ profile_status_for_fn (cfun) = PROFILE_READ;
+
/* If we have real data, use them! */
if (bb_gcov_count (ENTRY_BLOCK_PTR_FOR_FN (cfun))
|| !flag_guess_branch_prob)
FOR_ALL_BB_FN (bb, cfun)
- bb->count = profile_count::from_gcov_type (bb_gcov_count (bb));
+ if (bb_gcov_count (bb) || !flag_profile_partial_training)
+ bb->count = profile_count::from_gcov_type (bb_gcov_count (bb));
+ else
+ bb->count = profile_count::guessed_zero ();
/* If function was not trained, preserve local estimates including statically
determined zero counts. */
- else
+ else if (profile_status_for_fn (cfun) == PROFILE_READ
+ && !flag_profile_partial_training)
FOR_ALL_BB_FN (bb, cfun)
if (!(bb->count == profile_count::zero ()))
bb->count = bb->count.global0 ();
if (dump_file)
{
+ fprintf (dump_file, " Profile feedback for function");
+ fprintf (dump_file, ((profile_status_for_fn (cfun) == PROFILE_READ)
+ ? " is available \n"
+ : " is not available \n"));
+
fprintf (dump_file, "%d branches\n", num_branches);
if (num_branches)
for (i = 0; i < 10; i++)
free_aux_for_blocks ();
}
+/* Sort the histogram value and count for TOPN and INDIR_CALL type. */
+
+static void
+sort_hist_values (histogram_value hist)
+{
+ gcc_assert (hist->type == HIST_TYPE_TOPN_VALUES
+ || hist->type == HIST_TYPE_INDIR_CALL);
+
+ int counters = hist->hvalue.counters[1];
+ for (int i = 0; i < counters - 1; i++)
+ /* Hist value is organized as:
+ [total_executions, N, value1, counter1, ..., valueN, counterN]
+ Use decrease bubble sort to rearrange it. The sort starts from <value1,
+ counter1> and compares counter first. If counter is same, compares the
+ value, exchange it if small to keep stable. */
+
+ {
+ bool swapped = false;
+ for (int j = 0; j < counters - 1 - i; j++)
+ {
+ gcov_type *p = &hist->hvalue.counters[2 * j + 2];
+ if (p[1] < p[3] || (p[1] == p[3] && p[0] < p[2]))
+ {
+ std::swap (p[0], p[2]);
+ std::swap (p[1], p[3]);
+ swapped = true;
+ }
+ }
+ if (!swapped)
+ break;
+ }
+}
/* Load value histograms values whose description is stored in VALUES array
from .gcda file.
continue;
}
- histogram_counts[t] =
- get_coverage_counts (COUNTER_FOR_HIST_TYPE (t),
- n_histogram_counters[t], cfg_checksum,
- lineno_checksum, NULL);
+ histogram_counts[t] = get_coverage_counts (COUNTER_FOR_HIST_TYPE (t),
+ cfg_checksum,
+ lineno_checksum,
+ n_histogram_counters[t]);
if (histogram_counts[t])
any = 1;
act_count[t] = histogram_counts[t];
gimple *stmt = hist->hvalue.stmt;
t = (int) hist->type;
+ bool topn_p = (hist->type == HIST_TYPE_TOPN_VALUES
+ || hist->type == HIST_TYPE_INDIR_CALL);
- aact_count = act_count[t];
+ /* TOP N counter uses variable number of counters. */
+ if (topn_p)
+ {
+ unsigned total_size;
+ if (act_count[t])
+ total_size = 2 + 2 * act_count[t][1];
+ else
+ total_size = 2;
+ gimple_add_histogram_value (cfun, stmt, hist);
+ hist->n_counters = total_size;
+ hist->hvalue.counters = XNEWVEC (gcov_type, hist->n_counters);
+ for (j = 0; j < hist->n_counters; j++)
+ if (act_count[t])
+ hist->hvalue.counters[j] = act_count[t][j];
+ else
+ hist->hvalue.counters[j] = 0;
+ act_count[t] += hist->n_counters;
+ sort_hist_values (hist);
+ }
+ else
+ {
+ aact_count = act_count[t];
- if (act_count[t])
- act_count[t] += hist->n_counters;
+ if (act_count[t])
+ act_count[t] += hist->n_counters;
- gimple_add_histogram_value (cfun, stmt, hist);
- hist->hvalue.counters = XNEWVEC (gcov_type, hist->n_counters);
- for (j = 0; j < hist->n_counters; j++)
- if (aact_count)
- hist->hvalue.counters[j] = aact_count[j];
- else
- hist->hvalue.counters[j] = 0;
+ gimple_add_histogram_value (cfun, stmt, hist);
+ hist->hvalue.counters = XNEWVEC (gcov_type, hist->n_counters);
+ for (j = 0; j < hist->n_counters; j++)
+ if (aact_count)
+ hist->hvalue.counters[j] = aact_count[j];
+ else
+ hist->hvalue.counters[j] = 0;
+ }
/* Time profiler counter is not related to any statement,
so that we have to read the counter and set the value to
if (hist->type == HIST_TYPE_TIME_PROFILE)
{
node = cgraph_node::get (hist->fun->decl);
- node->tp_first_run = hist->hvalue.counters[0];
+ if (hist->hvalue.counters[0] >= 0
+ && hist->hvalue.counters[0] < INT_MAX / 2)
+ node->tp_first_run = hist->hvalue.counters[0];
+ else
+ {
+ if (flag_profile_correction)
+ error ("corrupted profile info: invalid time profile");
+ node->tp_first_run = 0;
+ }
if (dump_file)
fprintf (dump_file, "Read tp_first_run: %d\n", node->tp_first_run);
free (histogram_counts[t]);
}
+/* Location triplet which records a location. */
+struct location_triplet
+{
+ const char *filename;
+ int lineno;
+ int bb_index;
+};
+
+/* Traits class for streamed_locations hash set below. */
+
+struct location_triplet_hash : typed_noop_remove <location_triplet>
+{
+ typedef location_triplet value_type;
+ typedef location_triplet compare_type;
+
+ static hashval_t
+ hash (const location_triplet &ref)
+ {
+ inchash::hash hstate (0);
+ if (ref.filename)
+ hstate.add_int (strlen (ref.filename));
+ hstate.add_int (ref.lineno);
+ hstate.add_int (ref.bb_index);
+ return hstate.end ();
+ }
+
+ static bool
+ equal (const location_triplet &ref1, const location_triplet &ref2)
+ {
+ return ref1.lineno == ref2.lineno
+ && ref1.bb_index == ref2.bb_index
+ && ref1.filename != NULL
+ && ref2.filename != NULL
+ && strcmp (ref1.filename, ref2.filename) == 0;
+ }
+
+ static void
+ mark_deleted (location_triplet &ref)
+ {
+ ref.lineno = -1;
+ }
+
+ static const bool empty_zero_p = false;
+
+ static void
+ mark_empty (location_triplet &ref)
+ {
+ ref.lineno = -2;
+ }
+
+ static bool
+ is_deleted (const location_triplet &ref)
+ {
+ return ref.lineno == -1;
+ }
+
+ static bool
+ is_empty (const location_triplet &ref)
+ {
+ return ref.lineno == -2;
+ }
+};
+
+
+
+
/* When passed NULL as file_name, initialize.
When passed something else, output the necessary commands to change
line to LINE and offset to FILE_NAME. */
static void
-output_location (char const *file_name, int line,
+output_location (hash_set<location_triplet_hash> *streamed_locations,
+ char const *file_name, int line,
gcov_position_t *offset, basic_block bb)
{
static char const *prev_file_name;
static int prev_line;
bool name_differs, line_differs;
+ location_triplet triplet;
+ triplet.filename = file_name;
+ triplet.lineno = line;
+ triplet.bb_index = bb ? bb->index : 0;
+
+ if (streamed_locations->add (triplet))
+ return;
+
if (!file_name)
{
prev_file_name = NULL;
return e2->dest->index - e1->dest->index;
}
+/* Only read execution count for thunks. */
+
+void
+read_thunk_profile (struct cgraph_node *node)
+{
+ tree old = current_function_decl;
+ current_function_decl = node->decl;
+ gcov_type *counts = get_coverage_counts (GCOV_COUNTER_ARCS, 0, 0, 1);
+ if (counts)
+ {
+ node->callees->count = node->count
+ = profile_count::from_gcov_type (counts[0]);
+ free (counts);
+ }
+ current_function_decl = old;
+ return;
+}
+
+
/* Instrument and/or analyze program behavior based on program the CFG.
This function creates a representation of the control flow graph (of
Main entry point of this file. */
void
-branch_prob (void)
+branch_prob (bool thunk)
{
basic_block bb;
unsigned i;
flow_call_edges_add (NULL);
add_noreturn_fake_exit_edges ();
- /* We can't handle cyclic regions constructed using abnormal edges.
- To avoid these we replace every source of abnormal edge by a fake
- edge from entry node and every destination by fake edge to exit.
- This keeps graph acyclic and our calculation exact for all normal
- edges except for exit and entrance ones.
-
- We also add fake exit edges for each call and asm statement in the
- basic, since it may not return. */
+ hash_set <location_triplet_hash> streamed_locations;
- FOR_EACH_BB_FN (bb, cfun)
+ if (!thunk)
{
- int need_exit_edge = 0, need_entry_edge = 0;
- int have_exit_edge = 0, have_entry_edge = 0;
- edge e;
- edge_iterator ei;
+ /* We can't handle cyclic regions constructed using abnormal edges.
+ To avoid these we replace every source of abnormal edge by a fake
+ edge from entry node and every destination by fake edge to exit.
+ This keeps graph acyclic and our calculation exact for all normal
+ edges except for exit and entrance ones.
- /* Functions returning multiple times are not handled by extra edges.
- Instead we simply allow negative counts on edges from exit to the
- block past call and corresponding probabilities. We can't go
- with the extra edges because that would result in flowgraph that
- needs to have fake edges outside the spanning tree. */
+ We also add fake exit edges for each call and asm statement in the
+ basic, since it may not return. */
- FOR_EACH_EDGE (e, ei, bb->succs)
+ FOR_EACH_BB_FN (bb, cfun)
{
- gimple_stmt_iterator gsi;
- gimple *last = NULL;
-
- /* It may happen that there are compiler generated statements
- without a locus at all. Go through the basic block from the
- last to the first statement looking for a locus. */
- for (gsi = gsi_last_nondebug_bb (bb);
- !gsi_end_p (gsi);
- gsi_prev_nondebug (&gsi))
+ int need_exit_edge = 0, need_entry_edge = 0;
+ int have_exit_edge = 0, have_entry_edge = 0;
+ edge e;
+ edge_iterator ei;
+
+ /* Functions returning multiple times are not handled by extra edges.
+ Instead we simply allow negative counts on edges from exit to the
+ block past call and corresponding probabilities. We can't go
+ with the extra edges because that would result in flowgraph that
+ needs to have fake edges outside the spanning tree. */
+
+ FOR_EACH_EDGE (e, ei, bb->succs)
{
- last = gsi_stmt (gsi);
- if (!RESERVED_LOCATION_P (gimple_location (last)))
- break;
- }
+ gimple_stmt_iterator gsi;
+ gimple *last = NULL;
+
+ /* It may happen that there are compiler generated statements
+ without a locus at all. Go through the basic block from the
+ last to the first statement looking for a locus. */
+ for (gsi = gsi_last_nondebug_bb (bb);
+ !gsi_end_p (gsi);
+ gsi_prev_nondebug (&gsi))
+ {
+ last = gsi_stmt (gsi);
+ if (!RESERVED_LOCATION_P (gimple_location (last)))
+ break;
+ }
- /* Edge with goto locus might get wrong coverage info unless
- it is the only edge out of BB.
- Don't do that when the locuses match, so
- if (blah) goto something;
- is not computed twice. */
- if (last
- && gimple_has_location (last)
- && !RESERVED_LOCATION_P (e->goto_locus)
- && !single_succ_p (bb)
- && (LOCATION_FILE (e->goto_locus)
- != LOCATION_FILE (gimple_location (last))
- || (LOCATION_LINE (e->goto_locus)
- != LOCATION_LINE (gimple_location (last)))))
+ /* Edge with goto locus might get wrong coverage info unless
+ it is the only edge out of BB.
+ Don't do that when the locuses match, so
+ if (blah) goto something;
+ is not computed twice. */
+ if (last
+ && gimple_has_location (last)
+ && !RESERVED_LOCATION_P (e->goto_locus)
+ && !single_succ_p (bb)
+ && (LOCATION_FILE (e->goto_locus)
+ != LOCATION_FILE (gimple_location (last))
+ || (LOCATION_LINE (e->goto_locus)
+ != LOCATION_LINE (gimple_location (last)))))
+ {
+ basic_block new_bb = split_edge (e);
+ edge ne = single_succ_edge (new_bb);
+ ne->goto_locus = e->goto_locus;
+ }
+ if ((e->flags & (EDGE_ABNORMAL | EDGE_ABNORMAL_CALL))
+ && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
+ need_exit_edge = 1;
+ if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
+ have_exit_edge = 1;
+ }
+ FOR_EACH_EDGE (e, ei, bb->preds)
{
- basic_block new_bb = split_edge (e);
- edge ne = single_succ_edge (new_bb);
- ne->goto_locus = e->goto_locus;
+ if ((e->flags & (EDGE_ABNORMAL | EDGE_ABNORMAL_CALL))
+ && e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun))
+ need_entry_edge = 1;
+ if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
+ have_entry_edge = 1;
}
- if ((e->flags & (EDGE_ABNORMAL | EDGE_ABNORMAL_CALL))
- && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
- need_exit_edge = 1;
- if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
- have_exit_edge = 1;
- }
- FOR_EACH_EDGE (e, ei, bb->preds)
- {
- if ((e->flags & (EDGE_ABNORMAL | EDGE_ABNORMAL_CALL))
- && e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun))
- need_entry_edge = 1;
- if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
- have_entry_edge = 1;
- }
- if (need_exit_edge && !have_exit_edge)
- {
- if (dump_file)
- fprintf (dump_file, "Adding fake exit edge to bb %i\n",
- bb->index);
- make_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), EDGE_FAKE);
- }
- if (need_entry_edge && !have_entry_edge)
- {
- if (dump_file)
- fprintf (dump_file, "Adding fake entry edge to bb %i\n",
- bb->index);
- make_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun), bb, EDGE_FAKE);
- /* Avoid bbs that have both fake entry edge and also some
- exit edge. One of those edges wouldn't be added to the
- spanning tree, but we can't instrument any of them. */
- if (have_exit_edge || need_exit_edge)
+ if (need_exit_edge && !have_exit_edge)
{
- gimple_stmt_iterator gsi;
- gimple *first;
-
- gsi = gsi_start_nondebug_after_labels_bb (bb);
- gcc_checking_assert (!gsi_end_p (gsi));
- first = gsi_stmt (gsi);
- /* Don't split the bbs containing __builtin_setjmp_receiver
- or ABNORMAL_DISPATCHER calls. These are very
- special and don't expect anything to be inserted before
- them. */
- if (is_gimple_call (first)
- && (gimple_call_builtin_p (first, BUILT_IN_SETJMP_RECEIVER)
- || (gimple_call_flags (first) & ECF_RETURNS_TWICE)
- || (gimple_call_internal_p (first)
- && (gimple_call_internal_fn (first)
- == IFN_ABNORMAL_DISPATCHER))))
- continue;
-
if (dump_file)
- fprintf (dump_file, "Splitting bb %i after labels\n",
+ fprintf (dump_file, "Adding fake exit edge to bb %i\n",
bb->index);
- split_block_after_labels (bb);
+ make_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), EDGE_FAKE);
+ }
+ if (need_entry_edge && !have_entry_edge)
+ {
+ if (dump_file)
+ fprintf (dump_file, "Adding fake entry edge to bb %i\n",
+ bb->index);
+ make_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun), bb, EDGE_FAKE);
+ /* Avoid bbs that have both fake entry edge and also some
+ exit edge. One of those edges wouldn't be added to the
+ spanning tree, but we can't instrument any of them. */
+ if (have_exit_edge || need_exit_edge)
+ {
+ gimple_stmt_iterator gsi;
+ gimple *first;
+
+ gsi = gsi_start_nondebug_after_labels_bb (bb);
+ gcc_checking_assert (!gsi_end_p (gsi));
+ first = gsi_stmt (gsi);
+ /* Don't split the bbs containing __builtin_setjmp_receiver
+ or ABNORMAL_DISPATCHER calls. These are very
+ special and don't expect anything to be inserted before
+ them. */
+ if (is_gimple_call (first)
+ && (gimple_call_builtin_p (first, BUILT_IN_SETJMP_RECEIVER)
+ || (gimple_call_flags (first) & ECF_RETURNS_TWICE)
+ || (gimple_call_internal_p (first)
+ && (gimple_call_internal_fn (first)
+ == IFN_ABNORMAL_DISPATCHER))))
+ continue;
+
+ if (dump_file)
+ fprintf (dump_file, "Splitting bb %i after labels\n",
+ bb->index);
+ split_block_after_labels (bb);
+ }
}
}
}
on the spanning tree. We insert as many abnormal and critical edges
as possible to minimize number of edge splits necessary. */
- find_spanning_tree (el);
+ if (!thunk)
+ find_spanning_tree (el);
+ else
+ {
+ edge e;
+ edge_iterator ei;
+ /* Keep only edge from entry block to be instrumented. */
+ FOR_EACH_BB_FN (bb, cfun)
+ FOR_EACH_EDGE (e, ei, bb->succs)
+ EDGE_INFO (e)->ignore = true;
+ }
+
/* Fake edges that are not on the tree will not be instrumented, so
mark them ignored. */
the checksum in only once place, since it depends on the shape
of the control flow which can change during
various transformations. */
- cfg_checksum = coverage_compute_cfg_checksum (cfun);
- lineno_checksum = coverage_compute_lineno_checksum ();
+ if (thunk)
+ {
+ /* At stream in time we do not have CFG, so we cannot do checksums. */
+ cfg_checksum = 0;
+ lineno_checksum = 0;
+ }
+ else
+ {
+ cfg_checksum = coverage_compute_cfg_checksum (cfun);
+ lineno_checksum = coverage_compute_lineno_checksum ();
+ }
/* Write the data from which gcov can reconstruct the basic block
graph and function line numbers (the gcno file). */
/* Line numbers. */
/* Initialize the output. */
- output_location (NULL, 0, NULL, NULL);
+ output_location (&streamed_locations, NULL, 0, NULL, NULL);
hash_set<int_hash <location_t, 0, 2> > seen_locations;
location_t loc = DECL_SOURCE_LOCATION (current_function_decl);
seen_locations.add (loc);
expanded_location curr_location = expand_location (loc);
- output_location (curr_location.file, curr_location.line,
- &offset, bb);
+ output_location (&streamed_locations, curr_location.file,
+ MAX (1, curr_location.line), &offset, bb);
}
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
if (!RESERVED_LOCATION_P (loc))
{
seen_locations.add (loc);
- output_location (gimple_filename (stmt), gimple_lineno (stmt),
- &offset, bb);
+ output_location (&streamed_locations, gimple_filename (stmt),
+ MAX (1, gimple_lineno (stmt)), &offset, bb);
}
}
&& !seen_locations.contains (loc))
{
expanded_location curr_location = expand_location (loc);
- output_location (curr_location.file, curr_location.line,
- &offset, bb);
+ output_location (&streamed_locations, curr_location.file,
+ MAX (1, curr_location.line), &offset, bb);
}
if (offset)
values.release ();
free_edge_list (el);
coverage_end_function (lineno_checksum, cfg_checksum);
- if (flag_branch_probabilities && profile_info)
+ if (flag_branch_probabilities
+ && (profile_status_for_fn (cfun) == PROFILE_READ))
{
- struct loop *loop;
+ class loop *loop;
if (dump_file && (dump_flags & TDF_DETAILS))
report_predictor_hitrates ();
- profile_status_for_fn (cfun) = PROFILE_READ;
/* At this moment we have precise loop iteration count estimates.
Record them to loop structure before the profile gets out of date. */
FOR_EACH_LOOP (loop, 0)
- if (loop->header->count > 0)
+ if (loop->header->count > 0 && loop->header->count.reliable_p ())
{
gcov_type nit = expected_loop_iterations_unbounded (loop);
widest_int bound = gcov_type_to_wide_int (nit);