re PR target/65697 (__atomic memory barriers not strong enough for __sync builtins)
[gcc.git] / gcc / profile.c
index a5029a1037ba728deda9ef8c1f4d70b3934e940f..754326b3a362eec8b2d0ee1b57a39aa8994b3b77 100644 (file)
@@ -1,7 +1,5 @@
 /* Calculate branch probabilities, and basic block execution counts.
-   Copyright (C) 1990, 1991, 1992, 1993, 1994, 1996, 1997, 1998, 1999,
-   2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008, 2009, 2010, 2011, 2012
-   Free Software Foundation, Inc.
+   Copyright (C) 1990-2015 Free Software Foundation, Inc.
    Contributed by James E. Wilson, UC Berkeley/Cygnus Support;
    based on some ideas from Dain Samples of UC Berkeley.
    Further mangling by Bob Manson, Cygnus Support.
@@ -56,20 +54,42 @@ along with GCC; see the file COPYING3.  If not see
 #include "rtl.h"
 #include "flags.h"
 #include "regs.h"
-#include "expr.h"
+#include "symtab.h"
+#include "hard-reg-set.h"
 #include "function.h"
+#include "alias.h"
+#include "tree.h"
+#include "insn-config.h"
+#include "expmed.h"
+#include "dojump.h"
+#include "explow.h"
+#include "calls.h"
+#include "emit-rtl.h"
+#include "varasm.h"
+#include "stmt.h"
+#include "expr.h"
+#include "predict.h"
+#include "dominance.h"
+#include "cfg.h"
+#include "cfganal.h"
 #include "basic-block.h"
 #include "diagnostic-core.h"
 #include "coverage.h"
 #include "value-prof.h"
-#include "tree.h"
-#include "tree-flow.h"
+#include "fold-const.h"
+#include "tree-ssa-alias.h"
+#include "internal-fn.h"
+#include "gimple-expr.h"
+#include "gimple.h"
+#include "gimple-iterator.h"
+#include "tree-cfg.h"
 #include "cfgloop.h"
 #include "dumpfile.h"
+#include "cgraph.h"
 
 #include "profile.h"
 
-struct bb_info {
+struct bb_profile_info {
   unsigned int count_valid : 1;
 
   /* Number of successor and predecessor edges.  */
@@ -77,18 +97,13 @@ struct bb_info {
   gcov_type pred_count;
 };
 
-#define BB_INFO(b)  ((struct bb_info *) (b)->aux)
+#define BB_INFO(b)  ((struct bb_profile_info *) (b)->aux)
 
 
 /* Counter summary from the last set of coverage counts read.  */
 
 const struct gcov_ctr_summary *profile_info;
 
-/* Number of data points in the working set summary array. Using 128
-   provides information for at least every 1% increment of the total
-   profile size. The last entry is hardwired to 99.9% of the total.  */
-#define NUM_GCOV_WORKING_SETS 128
-
 /* Counter working set information computed from the current counter
    summary. Not initialized unless profile_info summary is non-NULL.  */
 static gcov_working_set_t gcov_working_sets[NUM_GCOV_WORKING_SETS];
@@ -106,6 +121,14 @@ static int total_num_times_called;
 static int total_hist_br_prob[20];
 static int total_num_branches;
 
+/* Helper function to update gcov_working_sets.  */
+
+void add_working_set (gcov_working_set_t *set) {
+  int i = 0;
+  for (; i < NUM_GCOV_WORKING_SETS; i++)
+    gcov_working_sets[i] = set[i];
+}
+
 /* Forward declarations.  */
 static void find_spanning_tree (struct edge_list *);
 
@@ -121,14 +144,14 @@ instrument_edges (struct edge_list *el)
   int num_edges = NUM_EDGES (el);
   basic_block bb;
 
-  FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
+  FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
     {
       edge e;
       edge_iterator ei;
 
       FOR_EACH_EDGE (e, ei, bb->succs)
        {
-         struct edge_info *inf = EDGE_INFO (e);
+         struct edge_profile_info *inf = EDGE_INFO (e);
 
          if (!inf->ignore && !inf->on_tree)
            {
@@ -156,9 +179,9 @@ instrument_values (histogram_values values)
 
   /* Emit code to generate the histograms before the insns.  */
 
-  for (i = 0; i < VEC_length (histogram_value, values); i++)
+  for (i = 0; i < values.length (); i++)
     {
-      histogram_value hist = VEC_index (histogram_value, values, i);
+      histogram_value hist = values[i];
       unsigned t = COUNTER_FOR_HIST_TYPE (hist->type);
 
       if (!coverage_counter_alloc (t, hist->n_counters))
@@ -183,6 +206,7 @@ instrument_values (histogram_values values)
          break;
 
        case HIST_TYPE_INDIR_CALL:
+       case HIST_TYPE_INDIR_CALL_TOPN:
          gimple_gen_ic_profiler (hist, t, 0);
          break;
 
@@ -194,6 +218,16 @@ instrument_values (histogram_values values)
          gimple_gen_ior_profiler (hist, t, 0);
          break;
 
+  case HIST_TYPE_TIME_PROFILE:
+    {
+      basic_block bb =
+     split_edge (single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
+      gimple_stmt_iterator gsi = gsi_start_bb (bb);
+
+      gimple_gen_time_profiler (t, 0, gsi);
+      break;
+    }
+
        default:
          gcc_unreachable ();
        }
@@ -201,110 +235,18 @@ instrument_values (histogram_values values)
 }
 \f
 
-/* Compute the working set information from the counter histogram in
-   the profile summary. This is an array of information corresponding to a
-   range of percentages of the total execution count (sum_all), and includes
-   the number of counters required to cover that working set percentage and
-   the minimum counter value in that working set.  */
+/* Fill the working set information into the profile_info structure.  */
 
-static void
-compute_working_sets (void)
+void
+get_working_sets (void)
 {
-  gcov_type working_set_cum_values[NUM_GCOV_WORKING_SETS];
-  gcov_type ws_cum_hotness_incr;
-  gcov_type cum, tmp_cum;
-  const gcov_bucket_type *histo_bucket;
-  unsigned ws_ix, c_num, count, pctinc, pct;
-  int h_ix;
+  unsigned ws_ix, pctinc, pct;
   gcov_working_set_t *ws_info;
 
   if (!profile_info)
     return;
 
-  /* Compute the amount of sum_all that the cumulative hotness grows
-     by in each successive working set entry, which depends on the
-     number of working set entries.  */
-  ws_cum_hotness_incr = profile_info->sum_all / NUM_GCOV_WORKING_SETS;
-
-  /* Next fill in an array of the cumulative hotness values corresponding
-     to each working set summary entry we are going to compute below.
-     Skip 0% statistics, which can be extrapolated from the
-     rest of the summary data.  */
-  cum = ws_cum_hotness_incr;
-  for (ws_ix = 0; ws_ix < NUM_GCOV_WORKING_SETS;
-       ws_ix++, cum += ws_cum_hotness_incr)
-    working_set_cum_values[ws_ix] = cum;
-  /* The last summary entry is reserved for (roughly) 99.9% of the
-     working set. Divide by 1024 so it becomes a shift, which gives
-     almost exactly 99.9%.  */
-  working_set_cum_values[NUM_GCOV_WORKING_SETS-1]
-      = profile_info->sum_all - profile_info->sum_all/1024;
-
-  /* Next, walk through the histogram in decending order of hotness
-     and compute the statistics for the working set summary array.
-     As histogram entries are accumulated, we check to see which
-     working set entries have had their expected cum_value reached
-     and fill them in, walking the working set entries in increasing
-     size of cum_value.  */
-  ws_ix = 0; /* The current entry into the working set array.  */
-  cum = 0; /* The current accumulated counter sum.  */
-  count = 0; /* The current accumulated count of block counters.  */
-  for (h_ix = GCOV_HISTOGRAM_SIZE - 1;
-       h_ix >= 0 && ws_ix < NUM_GCOV_WORKING_SETS; h_ix--)
-    {
-      histo_bucket = &profile_info->histogram[h_ix];
-
-      /* If we haven't reached the required cumulative counter value for
-         the current working set percentage, simply accumulate this histogram
-         entry into the running sums and continue to the next histogram
-         entry.  */
-      if (cum + histo_bucket->cum_value < working_set_cum_values[ws_ix])
-        {
-          cum += histo_bucket->cum_value;
-          count += histo_bucket->num_counters;
-          continue;
-        }
-
-      /* If adding the current histogram entry's cumulative counter value
-         causes us to exceed the current working set size, then estimate
-         how many of this histogram entry's counter values are required to
-         reach the working set size, and fill in working set entries
-         as we reach their expected cumulative value.  */
-      for (c_num = 0, tmp_cum = cum;
-           c_num < histo_bucket->num_counters && ws_ix < NUM_GCOV_WORKING_SETS;
-           c_num++)
-        {
-          count++;
-          /* If we haven't reached the last histogram entry counter, add
-             in the minimum value again. This will underestimate the
-             cumulative sum so far, because many of the counter values in this
-             entry may have been larger than the minimum. We could add in the
-             average value every time, but that would require an expensive
-             divide operation.  */
-          if (c_num + 1 < histo_bucket->num_counters)
-            tmp_cum += histo_bucket->min_value;
-          /* If we have reached the last histogram entry counter, then add
-             in the entire cumulative value.  */
-          else
-            tmp_cum = cum + histo_bucket->cum_value;
-
-          /* Next walk through successive working set entries and fill in
-            the statistics for any whose size we have reached by accumulating
-            this histogram counter.  */
-          while (tmp_cum >= working_set_cum_values[ws_ix]
-                 && ws_ix < NUM_GCOV_WORKING_SETS)
-            {
-              gcov_working_sets[ws_ix].num_counters = count;
-              gcov_working_sets[ws_ix].min_counter
-                  = histo_bucket->min_value;
-              ws_ix++;
-            }
-        }
-      /* Finally, update the running cumulative value since we were
-         using a temporary above.  */
-      cum += histo_bucket->cum_value;
-    }
-  gcc_assert (ws_ix == NUM_GCOV_WORKING_SETS);
+  compute_working_sets (profile_info, gcov_working_sets);
 
   if (dump_file)
     {
@@ -319,10 +261,10 @@ compute_working_sets (void)
           ws_info = &gcov_working_sets[ws_ix];
           /* Print out the percentage using int arithmatic to avoid float.  */
           fprintf (dump_file, "\t\t%u.%02u%%: num counts=%u, min counter="
-                   HOST_WIDEST_INT_PRINT_DEC "\n",
+                   "%" PRId64 "\n",
                    pct / 100, pct - (pct / 100 * 100),
                    ws_info->num_counters,
-                   (HOST_WIDEST_INT)ws_info->min_counter);
+                   (int64_t)ws_info->min_counter);
         }
     }
 }
@@ -359,7 +301,7 @@ get_exec_counts (unsigned cfg_checksum, unsigned lineno_checksum)
   gcov_type *counts;
 
   /* Count the edges to be (possibly) instrumented.  */
-  FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
+  FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
     {
       edge e;
       edge_iterator ei;
@@ -374,18 +316,18 @@ get_exec_counts (unsigned cfg_checksum, unsigned lineno_checksum)
   if (!counts)
     return NULL;
 
-  compute_working_sets();
+  get_working_sets ();
 
   if (dump_file && profile_info)
-    fprintf(dump_file, "Merged %u profiles with maximal count %u.\n",
-           profile_info->runs, (unsigned) profile_info->sum_max);
+    fprintf (dump_file, "Merged %u profiles with maximal count %u.\n",
+            profile_info->runs, (unsigned) profile_info->sum_max);
 
   return counts;
 }
 
 
 static bool
-is_edge_inconsistent (VEC(edge,gc) *edges)
+is_edge_inconsistent (vec<edge, va_gc> *edges)
 {
   edge e;
   edge_iterator ei;
@@ -400,7 +342,7 @@ is_edge_inconsistent (VEC(edge,gc) *edges)
              if (dump_file)
                {
                  fprintf (dump_file,
-                          "Edge %i->%i is inconsistent, count"HOST_WIDEST_INT_PRINT_DEC,
+                          "Edge %i->%i is inconsistent, count%" PRId64,
                           e->src->index, e->dest->index, e->count);
                  dump_bb (dump_file, e->src, 0, TDF_DETAILS);
                  dump_bb (dump_file, e->dest, 0, TDF_DETAILS);
@@ -419,7 +361,7 @@ correct_negative_edge_counts (void)
   edge e;
   edge_iterator ei;
 
-  FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
+  FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
     {
       FOR_EACH_EDGE (e, ei, bb->succs)
         {
@@ -436,7 +378,7 @@ is_inconsistent (void)
 {
   basic_block bb;
   bool inconsistent = false;
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       inconsistent |= is_edge_inconsistent (bb->preds);
       if (!dump_file && inconsistent)
@@ -449,7 +391,7 @@ is_inconsistent (void)
          if (dump_file)
            {
              fprintf (dump_file, "BB %i count is negative "
-                      HOST_WIDEST_INT_PRINT_DEC,
+                      "%" PRId64,
                       bb->index,
                       bb->count);
              dump_bb (dump_file, bb, 0, TDF_DETAILS);
@@ -461,7 +403,7 @@ is_inconsistent (void)
          if (dump_file)
            {
              fprintf (dump_file, "BB %i count does not match sum of incoming edges "
-                      HOST_WIDEST_INT_PRINT_DEC" should be " HOST_WIDEST_INT_PRINT_DEC,
+                      "%" PRId64" should be %" PRId64,
                       bb->index,
                       bb->count,
                       sum_edge_counts (bb->preds));
@@ -470,12 +412,13 @@ is_inconsistent (void)
          inconsistent = true;
        }
       if (bb->count != sum_edge_counts (bb->succs) &&
-          ! (find_edge (bb, EXIT_BLOCK_PTR) != NULL && block_ends_with_call_p (bb)))
+         ! (find_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun)) != NULL
+            && block_ends_with_call_p (bb)))
        {
          if (dump_file)
            {
              fprintf (dump_file, "BB %i count does not match sum of outgoing edges "
-                      HOST_WIDEST_INT_PRINT_DEC" should be " HOST_WIDEST_INT_PRINT_DEC,
+                      "%" PRId64" should be %" PRId64,
                       bb->index,
                       bb->count,
                       sum_edge_counts (bb->succs));
@@ -495,7 +438,7 @@ static void
 set_bb_counts (void)
 {
   basic_block bb;
-  FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
+  FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
     {
       bb->count = sum_edge_counts (bb->succs);
       gcc_assert (bb->count >= 0);
@@ -514,7 +457,7 @@ read_profile_edge_counts (gcov_type *exec_counts)
   /* The first count in the .da file is the number of times that the function
      was entered.  This is the exec_count for block zero.  */
 
-  FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
+  FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
     {
       edge e;
       edge_iterator ei;
@@ -531,9 +474,10 @@ read_profile_edge_counts (gcov_type *exec_counts)
                    if (flag_profile_correction)
                      {
                        static bool informed = 0;
-                       if (!informed)
-                         inform (input_location,
-                                 "corrupted profile info: edge count exceeds maximal count");
+                       if (dump_enabled_p () && !informed)
+                         dump_printf_loc (MSG_NOTE, input_location,
+                                           "corrupted profile info: edge count"
+                                           " exceeds maximal count\n");
                        informed = 1;
                      }
                    else
@@ -551,8 +495,8 @@ read_profile_edge_counts (gcov_type *exec_counts)
              {
                fprintf (dump_file, "\nRead edge from %i to %i, count:",
                         bb->index, e->dest->index);
-               fprintf (dump_file, HOST_WIDEST_INT_PRINT_DEC,
-                        (HOST_WIDEST_INT) e->count);
+               fprintf (dump_file, "%" PRId64,
+                        (int64_t) e->count);
              }
          }
     }
@@ -577,7 +521,7 @@ compute_frequency_overlap (void)
   int overlap = 0;
   basic_block bb;
 
-  FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
+  FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
     {
       count_total += bb->count;
       freq_total += bb->frequency;
@@ -586,7 +530,7 @@ compute_frequency_overlap (void)
   if (count_total == 0 || freq_total == 0)
     return 0;
 
-  FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
+  FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
     overlap += MIN (bb->count * OVERLAP_BASE / count_total,
                    bb->frequency * OVERLAP_BASE / freq_total);
 
@@ -614,11 +558,6 @@ compute_branch_probabilities (unsigned cfg_checksum, unsigned lineno_checksum)
   /* Very simple sanity checks so we catch bugs in our profiling code.  */
   if (!profile_info)
     return;
-  if (profile_info->run_max * profile_info->runs < profile_info->sum_max)
-    {
-      error ("corrupted profile info: run_max * runs < sum_max");
-      exec_counts = NULL;
-    }
 
   if (profile_info->sum_all < profile_info->sum_max)
     {
@@ -627,8 +566,8 @@ compute_branch_probabilities (unsigned cfg_checksum, unsigned lineno_checksum)
     }
 
   /* Attach extra info block to each bb.  */
-  alloc_aux_for_blocks (sizeof (struct bb_info));
-  FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
+  alloc_aux_for_blocks (sizeof (struct bb_profile_info));
+  FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
     {
       edge e;
       edge_iterator ei;
@@ -642,8 +581,8 @@ compute_branch_probabilities (unsigned cfg_checksum, unsigned lineno_checksum)
     }
 
   /* Avoid predicting entry on exit nodes.  */
-  BB_INFO (EXIT_BLOCK_PTR)->succ_count = 2;
-  BB_INFO (ENTRY_BLOCK_PTR)->pred_count = 2;
+  BB_INFO (EXIT_BLOCK_PTR_FOR_FN (cfun))->succ_count = 2;
+  BB_INFO (ENTRY_BLOCK_PTR_FOR_FN (cfun))->pred_count = 2;
 
   num_edges = read_profile_edge_counts (exec_counts);
 
@@ -673,9 +612,9 @@ compute_branch_probabilities (unsigned cfg_checksum, unsigned lineno_checksum)
     {
       passes++;
       changes = 0;
-      FOR_BB_BETWEEN (bb, EXIT_BLOCK_PTR, NULL, prev_bb)
+      FOR_BB_BETWEEN (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), NULL, prev_bb)
        {
-         struct bb_info *bi = BB_INFO (bb);
+         struct bb_profile_info *bi = BB_INFO (bb);
          if (! bi->count_valid)
            {
              if (bi->succ_count == 0)
@@ -777,7 +716,7 @@ compute_branch_probabilities (unsigned cfg_checksum, unsigned lineno_checksum)
 
   /* If the graph has been correctly solved, every block will have a
      succ and pred count of zero.  */
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       gcc_assert (!BB_INFO (bb)->succ_count && !BB_INFO (bb)->pred_count);
     }
@@ -791,10 +730,11 @@ compute_branch_probabilities (unsigned cfg_checksum, unsigned lineno_checksum)
        {
          /* Inconsistency detected. Make it flow-consistent. */
          static int informed = 0;
-         if (informed == 0)
+         if (dump_enabled_p () && informed == 0)
            {
              informed = 1;
-             inform (input_location, "correcting inconsistent profile data");
+             dump_printf_loc (MSG_NOTE, input_location,
+                              "correcting inconsistent profile data\n");
            }
          correct_negative_edge_counts ();
          /* Set bb counts to the sum of the outgoing edge counts */
@@ -814,7 +754,7 @@ compute_branch_probabilities (unsigned cfg_checksum, unsigned lineno_checksum)
     hist_br_prob[i] = 0;
   num_branches = 0;
 
-  FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
+  FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
     {
       edge e;
       edge_iterator ei;
@@ -833,9 +773,9 @@ compute_branch_probabilities (unsigned cfg_checksum, unsigned lineno_checksum)
             already present.  We get negative frequency from the entry
             point.  */
          if ((e->count < 0
-              && e->dest == EXIT_BLOCK_PTR)
+              && e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
              || (e->count > bb->count
-                 && e->dest != EXIT_BLOCK_PTR))
+                 && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)))
            {
              if (block_ends_with_call_p (bb))
                e->count = e->count < 0 ? 0 : bb->count;
@@ -851,7 +791,7 @@ compute_branch_probabilities (unsigned cfg_checksum, unsigned lineno_checksum)
       if (bb->count)
        {
          FOR_EACH_EDGE (e, ei, bb->succs)
-           e->probability = (e->count * REG_BR_PROB_BASE + bb->count / 2) / bb->count;
+           e->probability = GCOV_COMPUTE_SCALE (e->count, bb->count);
          if (bb->index >= NUM_FIXED_BLOCKS
              && block_ends_with_condjump_p (bb)
              && EDGE_COUNT (bb->succs) >= 2)
@@ -881,7 +821,7 @@ compute_branch_probabilities (unsigned cfg_checksum, unsigned lineno_checksum)
         give all abnormals frequency of 0, otherwise distribute the
         frequency over abnormals (this is the case of noreturn
         calls).  */
-      else if (profile_status == PROFILE_ABSENT)
+      else if (profile_status_for_fn (cfun) == PROFILE_ABSENT)
        {
          int total = 0;
 
@@ -909,7 +849,7 @@ compute_branch_probabilities (unsigned cfg_checksum, unsigned lineno_checksum)
        }
     }
   counts_to_freqs ();
-  profile_status = PROFILE_READ;
+  profile_status_for_fn (cfun) = PROFILE_READ;
   compute_function_frequency ();
 
   if (dump_file)
@@ -946,13 +886,14 @@ compute_value_histograms (histogram_values values, unsigned cfg_checksum,
   gcov_type *histogram_counts[GCOV_N_VALUE_COUNTERS];
   gcov_type *act_count[GCOV_N_VALUE_COUNTERS];
   gcov_type *aact_count;
+  struct cgraph_node *node;
 
   for (t = 0; t < GCOV_N_VALUE_COUNTERS; t++)
     n_histogram_counters[t] = 0;
 
-  for (i = 0; i < VEC_length (histogram_value, values); i++)
+  for (i = 0; i < values.length (); i++)
     {
-      histogram_value hist = VEC_index (histogram_value, values, i);
+      histogram_value hist = values[i];
       n_histogram_counters[(int) hist->type] += hist->n_counters;
     }
 
@@ -976,20 +917,37 @@ compute_value_histograms (histogram_values values, unsigned cfg_checksum,
   if (!any)
     return;
 
-  for (i = 0; i < VEC_length (histogram_value, values); i++)
+  for (i = 0; i < values.length (); i++)
     {
-      histogram_value hist = VEC_index (histogram_value, values, i);
+      histogram_value hist = values[i];
       gimple stmt = hist->hvalue.stmt;
 
       t = (int) hist->type;
 
       aact_count = act_count[t];
-      act_count[t] += hist->n_counters;
+
+      if (act_count[t])
+        act_count[t] += hist->n_counters;
 
       gimple_add_histogram_value (cfun, stmt, hist);
       hist->hvalue.counters =  XNEWVEC (gcov_type, hist->n_counters);
       for (j = 0; j < hist->n_counters; j++)
-       hist->hvalue.counters[j] = aact_count[j];
+        if (aact_count)
+          hist->hvalue.counters[j] = aact_count[j];
+        else
+          hist->hvalue.counters[j] = 0;
+
+      /* Time profiler counter is not related to any statement,
+         so that we have to read the counter and set the value to
+         the corresponding call graph node.  */
+      if (hist->type == HIST_TYPE_TIME_PROFILE)
+        {
+         node = cgraph_node::get (hist->fun->decl);
+         node->tp_first_run = hist->hvalue.counters[0];
+
+          if (dump_file)
+            fprintf (dump_file, "Read tp_first_run: %d\n", node->tp_first_run);
+        }
     }
 
   for (t = 0; t < GCOV_N_VALUE_COUNTERS; t++)
@@ -1069,7 +1027,7 @@ branch_prob (void)
   unsigned num_edges, ignored_edges;
   unsigned num_instrumented;
   struct edge_list *el;
-  histogram_values values = NULL;
+  histogram_values values = histogram_values ();
   unsigned cfg_checksum, lineno_checksum;
 
   total_num_times_called++;
@@ -1086,7 +1044,7 @@ branch_prob (void)
      We also add fake exit edges for each call and asm statement in the
      basic, since it may not return.  */
 
-  FOR_EACH_BB (bb)
+  FOR_EACH_BB_FN (bb, cfun)
     {
       int need_exit_edge = 0, need_entry_edge = 0;
       int have_exit_edge = 0, have_entry_edge = 0;
@@ -1123,7 +1081,7 @@ branch_prob (void)
             is not computed twice.  */
          if (last
              && gimple_has_location (last)
-             && e->goto_locus != UNKNOWN_LOCATION
+             && LOCATION_LOCUS (e->goto_locus) != UNKNOWN_LOCATION
              && !single_succ_p (bb)
              && (LOCATION_FILE (e->goto_locus)
                  != LOCATION_FILE (gimple_location (last))
@@ -1133,20 +1091,19 @@ branch_prob (void)
              basic_block new_bb = split_edge (e);
              edge ne = single_succ_edge (new_bb);
              ne->goto_locus = e->goto_locus;
-             ne->goto_block = e->goto_block;
            }
          if ((e->flags & (EDGE_ABNORMAL | EDGE_ABNORMAL_CALL))
-              && e->dest != EXIT_BLOCK_PTR)
+              && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
            need_exit_edge = 1;
-         if (e->dest == EXIT_BLOCK_PTR)
+         if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
            have_exit_edge = 1;
        }
       FOR_EACH_EDGE (e, ei, bb->preds)
        {
          if ((e->flags & (EDGE_ABNORMAL | EDGE_ABNORMAL_CALL))
-              && e->src != ENTRY_BLOCK_PTR)
+              && e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun))
            need_entry_edge = 1;
-         if (e->src == ENTRY_BLOCK_PTR)
+         if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
            have_entry_edge = 1;
        }
 
@@ -1155,14 +1112,14 @@ branch_prob (void)
          if (dump_file)
            fprintf (dump_file, "Adding fake exit edge to bb %i\n",
                     bb->index);
-         make_edge (bb, EXIT_BLOCK_PTR, EDGE_FAKE);
+         make_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), EDGE_FAKE);
        }
       if (need_entry_edge && !have_entry_edge)
        {
          if (dump_file)
            fprintf (dump_file, "Adding fake entry edge to bb %i\n",
                     bb->index);
-         make_edge (ENTRY_BLOCK_PTR, bb, EDGE_FAKE);
+         make_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun), bb, EDGE_FAKE);
          /* Avoid bbs that have both fake entry edge and also some
             exit edge.  One of those edges wouldn't be added to the
             spanning tree, but we can't instrument any of them.  */
@@ -1170,40 +1127,33 @@ branch_prob (void)
            {
              gimple_stmt_iterator gsi;
              gimple first;
-             tree fndecl;
 
-             gsi = gsi_after_labels (bb);
+             gsi = gsi_start_nondebug_after_labels_bb (bb);
              gcc_checking_assert (!gsi_end_p (gsi));
              first = gsi_stmt (gsi);
-             if (is_gimple_debug (first))
-               {
-                 gsi_next_nondebug (&gsi);
-                 gcc_checking_assert (!gsi_end_p (gsi));
-                 first = gsi_stmt (gsi);
-               }
              /* Don't split the bbs containing __builtin_setjmp_receiver
-                or __builtin_setjmp_dispatcher calls.  These are very
+                or ABNORMAL_DISPATCHER calls.  These are very
                 special and don't expect anything to be inserted before
                 them.  */
-             if (!is_gimple_call (first)
-                 || (fndecl = gimple_call_fndecl (first)) == NULL
-                 || DECL_BUILT_IN_CLASS (fndecl) != BUILT_IN_NORMAL
-                 || (DECL_FUNCTION_CODE (fndecl) != BUILT_IN_SETJMP_RECEIVER
-                     && (DECL_FUNCTION_CODE (fndecl)
-                         != BUILT_IN_SETJMP_DISPATCHER)))
-               {
-                 if (dump_file)
-                   fprintf (dump_file, "Splitting bb %i after labels\n",
-                            bb->index);
-                 split_block_after_labels (bb);
-               }
+             if (is_gimple_call (first)
+                 && (gimple_call_builtin_p (first, BUILT_IN_SETJMP_RECEIVER)
+                     || (gimple_call_flags (first) & ECF_RETURNS_TWICE)
+                     || (gimple_call_internal_p (first)
+                         && (gimple_call_internal_fn (first)
+                             == IFN_ABNORMAL_DISPATCHER))))
+               continue;
+
+             if (dump_file)
+               fprintf (dump_file, "Splitting bb %i after labels\n",
+                        bb->index);
+             split_block_after_labels (bb);
            }
        }
     }
 
   el = create_edge_list ();
   num_edges = NUM_EDGES (el);
-  alloc_aux_for_edges (sizeof (struct edge_info));
+  alloc_aux_for_edges (sizeof (struct edge_profile_info));
 
   /* The basic blocks are expected to be numbered sequentially.  */
   compact_blocks ();
@@ -1216,7 +1166,8 @@ branch_prob (void)
 
       /* Mark edges we've replaced by fake edges above as ignored.  */
       if ((e->flags & (EDGE_ABNORMAL | EDGE_ABNORMAL_CALL))
-         && e->src != ENTRY_BLOCK_PTR && e->dest != EXIT_BLOCK_PTR)
+         && e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
+         && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
        {
          EDGE_INFO (e)->ignore = 1;
          ignored_edges++;
@@ -1234,7 +1185,7 @@ branch_prob (void)
   for (num_instrumented = i = 0; i < num_edges; i++)
     {
       edge e = INDEX_EDGE (el, i);
-      struct edge_info *inf = EDGE_INFO (e);
+      struct edge_profile_info *inf = EDGE_INFO (e);
 
       if (inf->ignore || inf->on_tree)
        /*NOP*/;
@@ -1247,9 +1198,9 @@ branch_prob (void)
        num_instrumented++;
     }
 
-  total_num_blocks += n_basic_blocks;
+  total_num_blocks += n_basic_blocks_for_fn (cfun);
   if (dump_file)
-    fprintf (dump_file, "%d basic blocks\n", n_basic_blocks);
+    fprintf (dump_file, "%d basic blocks\n", n_basic_blocks_for_fn (cfun));
 
   total_num_edges += num_edges;
   if (dump_file)
@@ -1267,7 +1218,7 @@ branch_prob (void)
      the checksum in only once place, since it depends on the shape
      of the control flow which can change during 
      various transformations.  */
-  cfg_checksum = coverage_compute_cfg_checksum ();
+  cfg_checksum = coverage_compute_cfg_checksum (cfun);
   lineno_checksum = coverage_compute_lineno_checksum ();
 
   /* Write the data from which gcov can reconstruct the basic block
@@ -1278,12 +1229,13 @@ branch_prob (void)
 
       /* Basic block flags */
       offset = gcov_write_tag (GCOV_TAG_BLOCKS);
-      for (i = 0; i != (unsigned) (n_basic_blocks); i++)
+      for (i = 0; i != (unsigned) (n_basic_blocks_for_fn (cfun)); i++)
        gcov_write_unsigned (0);
       gcov_write_length (offset);
 
       /* Arcs */
-      FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
+      FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun),
+                     EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
        {
          edge e;
          edge_iterator ei;
@@ -1293,7 +1245,7 @@ branch_prob (void)
 
          FOR_EACH_EDGE (e, ei, bb->succs)
            {
-             struct edge_info *i = EDGE_INFO (e);
+             struct edge_profile_info *i = EDGE_INFO (e);
              if (!i->ignore)
                {
                  unsigned flag_bits = 0;
@@ -1322,12 +1274,12 @@ branch_prob (void)
       /* Initialize the output.  */
       output_location (NULL, 0, NULL, NULL);
 
-      FOR_EACH_BB (bb)
+      FOR_EACH_BB_FN (bb, cfun)
        {
          gimple_stmt_iterator gsi;
          gcov_position_t offset = 0;
 
-         if (bb == ENTRY_BLOCK_PTR->next_bb)
+         if (bb == ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb)
            {
              expanded_location curr_location =
                expand_location (DECL_SOURCE_LOCATION (current_function_decl));
@@ -1345,7 +1297,8 @@ branch_prob (void)
 
          /* Notice GOTO expressions eliminated while constructing the CFG.  */
          if (single_succ_p (bb)
-             && single_succ_edge (bb)->goto_locus != UNKNOWN_LOCATION)
+             && LOCATION_LOCUS (single_succ_edge (bb)->goto_locus)
+                != UNKNOWN_LOCATION)
            {
              expanded_location curr_location
                = expand_location (single_succ_edge (bb)->goto_locus);
@@ -1396,7 +1349,7 @@ branch_prob (void)
 
   free_aux_for_edges ();
 
-  VEC_free (histogram_value, heap, values);
+  values.release ();
   free_edge_list (el);
   coverage_end_function (lineno_checksum, cfg_checksum);
 }
@@ -1450,20 +1403,20 @@ find_spanning_tree (struct edge_list *el)
   basic_block bb;
 
   /* We use aux field for standard union-find algorithm.  */
-  FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
+  FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
     bb->aux = bb;
 
   /* Add fake edge exit to entry we can't instrument.  */
-  union_groups (EXIT_BLOCK_PTR, ENTRY_BLOCK_PTR);
+  union_groups (EXIT_BLOCK_PTR_FOR_FN (cfun), ENTRY_BLOCK_PTR_FOR_FN (cfun));
 
   /* First add all abnormal edges to the tree unless they form a cycle. Also
-     add all edges to EXIT_BLOCK_PTR to avoid inserting profiling code behind
+     add all edges to the exit block to avoid inserting profiling code behind
      setting return value from function.  */
   for (i = 0; i < num_edges; i++)
     {
       edge e = INDEX_EDGE (el, i);
       if (((e->flags & (EDGE_ABNORMAL | EDGE_ABNORMAL_CALL | EDGE_FAKE))
-          || e->dest == EXIT_BLOCK_PTR)
+          || e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
          && !EDGE_INFO (e)->ignore
          && (find_group (e->src) != find_group (e->dest)))
        {