Update copyright years.
[gcc.git] / gcc / profile.c
1 /* Calculate branch probabilities, and basic block execution counts.
2 Copyright (C) 1990-2015 Free Software Foundation, Inc.
3 Contributed by James E. Wilson, UC Berkeley/Cygnus Support;
4 based on some ideas from Dain Samples of UC Berkeley.
5 Further mangling by Bob Manson, Cygnus Support.
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
13
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
22
23 /* Generate basic block profile instrumentation and auxiliary files.
24 Profile generation is optimized, so that not all arcs in the basic
25 block graph need instrumenting. First, the BB graph is closed with
26 one entry (function start), and one exit (function exit). Any
27 ABNORMAL_EDGE cannot be instrumented (because there is no control
28 path to place the code). We close the graph by inserting fake
29 EDGE_FAKE edges to the EXIT_BLOCK, from the sources of abnormal
30 edges that do not go to the exit_block. We ignore such abnormal
31 edges. Naturally these fake edges are never directly traversed,
32 and so *cannot* be directly instrumented. Some other graph
33 massaging is done. To optimize the instrumentation we generate the
34 BB minimal span tree, only edges that are not on the span tree
35 (plus the entry point) need instrumenting. From that information
36 all other edge counts can be deduced. By construction all fake
37 edges must be on the spanning tree. We also attempt to place
38 EDGE_CRITICAL edges on the spanning tree.
39
40 The auxiliary files generated are <dumpbase>.gcno (at compile time)
41 and <dumpbase>.gcda (at run time). The format is
42 described in full in gcov-io.h. */
43
44 /* ??? Register allocation should use basic block execution counts to
45 give preference to the most commonly executed blocks. */
46
47 /* ??? Should calculate branch probabilities before instrumenting code, since
48 then we can use arc counts to help decide which arcs to instrument. */
49
50 #include "config.h"
51 #include "system.h"
52 #include "coretypes.h"
53 #include "tm.h"
54 #include "rtl.h"
55 #include "flags.h"
56 #include "regs.h"
57 #include "expr.h"
58 #include "hashtab.h"
59 #include "hash-set.h"
60 #include "vec.h"
61 #include "machmode.h"
62 #include "hard-reg-set.h"
63 #include "input.h"
64 #include "function.h"
65 #include "predict.h"
66 #include "dominance.h"
67 #include "cfg.h"
68 #include "cfganal.h"
69 #include "basic-block.h"
70 #include "diagnostic-core.h"
71 #include "coverage.h"
72 #include "value-prof.h"
73 #include "tree.h"
74 #include "tree-ssa-alias.h"
75 #include "internal-fn.h"
76 #include "gimple-expr.h"
77 #include "is-a.h"
78 #include "gimple.h"
79 #include "gimple-iterator.h"
80 #include "tree-cfg.h"
81 #include "cfgloop.h"
82 #include "dumpfile.h"
83 #include "hash-map.h"
84 #include "plugin-api.h"
85 #include "ipa-ref.h"
86 #include "cgraph.h"
87
88 #include "profile.h"
89
90 struct bb_profile_info {
91 unsigned int count_valid : 1;
92
93 /* Number of successor and predecessor edges. */
94 gcov_type succ_count;
95 gcov_type pred_count;
96 };
97
98 #define BB_INFO(b) ((struct bb_profile_info *) (b)->aux)
99
100
101 /* Counter summary from the last set of coverage counts read. */
102
103 const struct gcov_ctr_summary *profile_info;
104
105 /* Counter working set information computed from the current counter
106 summary. Not initialized unless profile_info summary is non-NULL. */
107 static gcov_working_set_t gcov_working_sets[NUM_GCOV_WORKING_SETS];
108
109 /* Collect statistics on the performance of this pass for the entire source
110 file. */
111
112 static int total_num_blocks;
113 static int total_num_edges;
114 static int total_num_edges_ignored;
115 static int total_num_edges_instrumented;
116 static int total_num_blocks_created;
117 static int total_num_passes;
118 static int total_num_times_called;
119 static int total_hist_br_prob[20];
120 static int total_num_branches;
121
122 /* Helper function to update gcov_working_sets. */
123
124 void add_working_set (gcov_working_set_t *set) {
125 int i = 0;
126 for (; i < NUM_GCOV_WORKING_SETS; i++)
127 gcov_working_sets[i] = set[i];
128 }
129
130 /* Forward declarations. */
131 static void find_spanning_tree (struct edge_list *);
132
133 /* Add edge instrumentation code to the entire insn chain.
134
135 F is the first insn of the chain.
136 NUM_BLOCKS is the number of basic blocks found in F. */
137
138 static unsigned
139 instrument_edges (struct edge_list *el)
140 {
141 unsigned num_instr_edges = 0;
142 int num_edges = NUM_EDGES (el);
143 basic_block bb;
144
145 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
146 {
147 edge e;
148 edge_iterator ei;
149
150 FOR_EACH_EDGE (e, ei, bb->succs)
151 {
152 struct edge_profile_info *inf = EDGE_INFO (e);
153
154 if (!inf->ignore && !inf->on_tree)
155 {
156 gcc_assert (!(e->flags & EDGE_ABNORMAL));
157 if (dump_file)
158 fprintf (dump_file, "Edge %d to %d instrumented%s\n",
159 e->src->index, e->dest->index,
160 EDGE_CRITICAL_P (e) ? " (and split)" : "");
161 gimple_gen_edge_profiler (num_instr_edges++, e);
162 }
163 }
164 }
165
166 total_num_blocks_created += num_edges;
167 if (dump_file)
168 fprintf (dump_file, "%d edges instrumented\n", num_instr_edges);
169 return num_instr_edges;
170 }
171
172 /* Add code to measure histograms for values in list VALUES. */
173 static void
174 instrument_values (histogram_values values)
175 {
176 unsigned i;
177
178 /* Emit code to generate the histograms before the insns. */
179
180 for (i = 0; i < values.length (); i++)
181 {
182 histogram_value hist = values[i];
183 unsigned t = COUNTER_FOR_HIST_TYPE (hist->type);
184
185 if (!coverage_counter_alloc (t, hist->n_counters))
186 continue;
187
188 switch (hist->type)
189 {
190 case HIST_TYPE_INTERVAL:
191 gimple_gen_interval_profiler (hist, t, 0);
192 break;
193
194 case HIST_TYPE_POW2:
195 gimple_gen_pow2_profiler (hist, t, 0);
196 break;
197
198 case HIST_TYPE_SINGLE_VALUE:
199 gimple_gen_one_value_profiler (hist, t, 0);
200 break;
201
202 case HIST_TYPE_CONST_DELTA:
203 gimple_gen_const_delta_profiler (hist, t, 0);
204 break;
205
206 case HIST_TYPE_INDIR_CALL:
207 case HIST_TYPE_INDIR_CALL_TOPN:
208 gimple_gen_ic_profiler (hist, t, 0);
209 break;
210
211 case HIST_TYPE_AVERAGE:
212 gimple_gen_average_profiler (hist, t, 0);
213 break;
214
215 case HIST_TYPE_IOR:
216 gimple_gen_ior_profiler (hist, t, 0);
217 break;
218
219 case HIST_TYPE_TIME_PROFILE:
220 {
221 basic_block bb =
222 split_edge (single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
223 gimple_stmt_iterator gsi = gsi_start_bb (bb);
224
225 gimple_gen_time_profiler (t, 0, gsi);
226 break;
227 }
228
229 default:
230 gcc_unreachable ();
231 }
232 }
233 }
234 \f
235
236 /* Fill the working set information into the profile_info structure. */
237
238 void
239 get_working_sets (void)
240 {
241 unsigned ws_ix, pctinc, pct;
242 gcov_working_set_t *ws_info;
243
244 if (!profile_info)
245 return;
246
247 compute_working_sets (profile_info, gcov_working_sets);
248
249 if (dump_file)
250 {
251 fprintf (dump_file, "Counter working sets:\n");
252 /* Multiply the percentage by 100 to avoid float. */
253 pctinc = 100 * 100 / NUM_GCOV_WORKING_SETS;
254 for (ws_ix = 0, pct = pctinc; ws_ix < NUM_GCOV_WORKING_SETS;
255 ws_ix++, pct += pctinc)
256 {
257 if (ws_ix == NUM_GCOV_WORKING_SETS - 1)
258 pct = 9990;
259 ws_info = &gcov_working_sets[ws_ix];
260 /* Print out the percentage using int arithmatic to avoid float. */
261 fprintf (dump_file, "\t\t%u.%02u%%: num counts=%u, min counter="
262 "%"PRId64 "\n",
263 pct / 100, pct - (pct / 100 * 100),
264 ws_info->num_counters,
265 (int64_t)ws_info->min_counter);
266 }
267 }
268 }
269
270 /* Given a the desired percentage of the full profile (sum_all from the
271 summary), multiplied by 10 to avoid float in PCT_TIMES_10, returns
272 the corresponding working set information. If an exact match for
273 the percentage isn't found, the closest value is used. */
274
275 gcov_working_set_t *
276 find_working_set (unsigned pct_times_10)
277 {
278 unsigned i;
279 if (!profile_info)
280 return NULL;
281 gcc_assert (pct_times_10 <= 1000);
282 if (pct_times_10 >= 999)
283 return &gcov_working_sets[NUM_GCOV_WORKING_SETS - 1];
284 i = pct_times_10 * NUM_GCOV_WORKING_SETS / 1000;
285 if (!i)
286 return &gcov_working_sets[0];
287 return &gcov_working_sets[i - 1];
288 }
289
290 /* Computes hybrid profile for all matching entries in da_file.
291
292 CFG_CHECKSUM is the precomputed checksum for the CFG. */
293
294 static gcov_type *
295 get_exec_counts (unsigned cfg_checksum, unsigned lineno_checksum)
296 {
297 unsigned num_edges = 0;
298 basic_block bb;
299 gcov_type *counts;
300
301 /* Count the edges to be (possibly) instrumented. */
302 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
303 {
304 edge e;
305 edge_iterator ei;
306
307 FOR_EACH_EDGE (e, ei, bb->succs)
308 if (!EDGE_INFO (e)->ignore && !EDGE_INFO (e)->on_tree)
309 num_edges++;
310 }
311
312 counts = get_coverage_counts (GCOV_COUNTER_ARCS, num_edges, cfg_checksum,
313 lineno_checksum, &profile_info);
314 if (!counts)
315 return NULL;
316
317 get_working_sets ();
318
319 if (dump_file && profile_info)
320 fprintf (dump_file, "Merged %u profiles with maximal count %u.\n",
321 profile_info->runs, (unsigned) profile_info->sum_max);
322
323 return counts;
324 }
325
326
327 static bool
328 is_edge_inconsistent (vec<edge, va_gc> *edges)
329 {
330 edge e;
331 edge_iterator ei;
332 FOR_EACH_EDGE (e, ei, edges)
333 {
334 if (!EDGE_INFO (e)->ignore)
335 {
336 if (e->count < 0
337 && (!(e->flags & EDGE_FAKE)
338 || !block_ends_with_call_p (e->src)))
339 {
340 if (dump_file)
341 {
342 fprintf (dump_file,
343 "Edge %i->%i is inconsistent, count%"PRId64,
344 e->src->index, e->dest->index, e->count);
345 dump_bb (dump_file, e->src, 0, TDF_DETAILS);
346 dump_bb (dump_file, e->dest, 0, TDF_DETAILS);
347 }
348 return true;
349 }
350 }
351 }
352 return false;
353 }
354
355 static void
356 correct_negative_edge_counts (void)
357 {
358 basic_block bb;
359 edge e;
360 edge_iterator ei;
361
362 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
363 {
364 FOR_EACH_EDGE (e, ei, bb->succs)
365 {
366 if (e->count < 0)
367 e->count = 0;
368 }
369 }
370 }
371
372 /* Check consistency.
373 Return true if inconsistency is found. */
374 static bool
375 is_inconsistent (void)
376 {
377 basic_block bb;
378 bool inconsistent = false;
379 FOR_EACH_BB_FN (bb, cfun)
380 {
381 inconsistent |= is_edge_inconsistent (bb->preds);
382 if (!dump_file && inconsistent)
383 return true;
384 inconsistent |= is_edge_inconsistent (bb->succs);
385 if (!dump_file && inconsistent)
386 return true;
387 if (bb->count < 0)
388 {
389 if (dump_file)
390 {
391 fprintf (dump_file, "BB %i count is negative "
392 "%"PRId64,
393 bb->index,
394 bb->count);
395 dump_bb (dump_file, bb, 0, TDF_DETAILS);
396 }
397 inconsistent = true;
398 }
399 if (bb->count != sum_edge_counts (bb->preds))
400 {
401 if (dump_file)
402 {
403 fprintf (dump_file, "BB %i count does not match sum of incoming edges "
404 "%"PRId64" should be %"PRId64,
405 bb->index,
406 bb->count,
407 sum_edge_counts (bb->preds));
408 dump_bb (dump_file, bb, 0, TDF_DETAILS);
409 }
410 inconsistent = true;
411 }
412 if (bb->count != sum_edge_counts (bb->succs) &&
413 ! (find_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun)) != NULL
414 && block_ends_with_call_p (bb)))
415 {
416 if (dump_file)
417 {
418 fprintf (dump_file, "BB %i count does not match sum of outgoing edges "
419 "%"PRId64" should be %"PRId64,
420 bb->index,
421 bb->count,
422 sum_edge_counts (bb->succs));
423 dump_bb (dump_file, bb, 0, TDF_DETAILS);
424 }
425 inconsistent = true;
426 }
427 if (!dump_file && inconsistent)
428 return true;
429 }
430
431 return inconsistent;
432 }
433
434 /* Set each basic block count to the sum of its outgoing edge counts */
435 static void
436 set_bb_counts (void)
437 {
438 basic_block bb;
439 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
440 {
441 bb->count = sum_edge_counts (bb->succs);
442 gcc_assert (bb->count >= 0);
443 }
444 }
445
446 /* Reads profile data and returns total number of edge counts read */
447 static int
448 read_profile_edge_counts (gcov_type *exec_counts)
449 {
450 basic_block bb;
451 int num_edges = 0;
452 int exec_counts_pos = 0;
453 /* For each edge not on the spanning tree, set its execution count from
454 the .da file. */
455 /* The first count in the .da file is the number of times that the function
456 was entered. This is the exec_count for block zero. */
457
458 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
459 {
460 edge e;
461 edge_iterator ei;
462
463 FOR_EACH_EDGE (e, ei, bb->succs)
464 if (!EDGE_INFO (e)->ignore && !EDGE_INFO (e)->on_tree)
465 {
466 num_edges++;
467 if (exec_counts)
468 {
469 e->count = exec_counts[exec_counts_pos++];
470 if (e->count > profile_info->sum_max)
471 {
472 if (flag_profile_correction)
473 {
474 static bool informed = 0;
475 if (dump_enabled_p () && !informed)
476 dump_printf_loc (MSG_NOTE, input_location,
477 "corrupted profile info: edge count"
478 " exceeds maximal count\n");
479 informed = 1;
480 }
481 else
482 error ("corrupted profile info: edge from %i to %i exceeds maximal count",
483 bb->index, e->dest->index);
484 }
485 }
486 else
487 e->count = 0;
488
489 EDGE_INFO (e)->count_valid = 1;
490 BB_INFO (bb)->succ_count--;
491 BB_INFO (e->dest)->pred_count--;
492 if (dump_file)
493 {
494 fprintf (dump_file, "\nRead edge from %i to %i, count:",
495 bb->index, e->dest->index);
496 fprintf (dump_file, "%"PRId64,
497 (int64_t) e->count);
498 }
499 }
500 }
501
502 return num_edges;
503 }
504
505 #define OVERLAP_BASE 10000
506
507 /* Compare the static estimated profile to the actual profile, and
508 return the "degree of overlap" measure between them.
509
510 Degree of overlap is a number between 0 and OVERLAP_BASE. It is
511 the sum of each basic block's minimum relative weights between
512 two profiles. And overlap of OVERLAP_BASE means two profiles are
513 identical. */
514
515 static int
516 compute_frequency_overlap (void)
517 {
518 gcov_type count_total = 0, freq_total = 0;
519 int overlap = 0;
520 basic_block bb;
521
522 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
523 {
524 count_total += bb->count;
525 freq_total += bb->frequency;
526 }
527
528 if (count_total == 0 || freq_total == 0)
529 return 0;
530
531 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
532 overlap += MIN (bb->count * OVERLAP_BASE / count_total,
533 bb->frequency * OVERLAP_BASE / freq_total);
534
535 return overlap;
536 }
537
538 /* Compute the branch probabilities for the various branches.
539 Annotate them accordingly.
540
541 CFG_CHECKSUM is the precomputed checksum for the CFG. */
542
543 static void
544 compute_branch_probabilities (unsigned cfg_checksum, unsigned lineno_checksum)
545 {
546 basic_block bb;
547 int i;
548 int num_edges = 0;
549 int changes;
550 int passes;
551 int hist_br_prob[20];
552 int num_branches;
553 gcov_type *exec_counts = get_exec_counts (cfg_checksum, lineno_checksum);
554 int inconsistent = 0;
555
556 /* Very simple sanity checks so we catch bugs in our profiling code. */
557 if (!profile_info)
558 return;
559
560 if (profile_info->sum_all < profile_info->sum_max)
561 {
562 error ("corrupted profile info: sum_all is smaller than sum_max");
563 exec_counts = NULL;
564 }
565
566 /* Attach extra info block to each bb. */
567 alloc_aux_for_blocks (sizeof (struct bb_profile_info));
568 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
569 {
570 edge e;
571 edge_iterator ei;
572
573 FOR_EACH_EDGE (e, ei, bb->succs)
574 if (!EDGE_INFO (e)->ignore)
575 BB_INFO (bb)->succ_count++;
576 FOR_EACH_EDGE (e, ei, bb->preds)
577 if (!EDGE_INFO (e)->ignore)
578 BB_INFO (bb)->pred_count++;
579 }
580
581 /* Avoid predicting entry on exit nodes. */
582 BB_INFO (EXIT_BLOCK_PTR_FOR_FN (cfun))->succ_count = 2;
583 BB_INFO (ENTRY_BLOCK_PTR_FOR_FN (cfun))->pred_count = 2;
584
585 num_edges = read_profile_edge_counts (exec_counts);
586
587 if (dump_file)
588 fprintf (dump_file, "\n%d edge counts read\n", num_edges);
589
590 /* For every block in the file,
591 - if every exit/entrance edge has a known count, then set the block count
592 - if the block count is known, and every exit/entrance edge but one has
593 a known execution count, then set the count of the remaining edge
594
595 As edge counts are set, decrement the succ/pred count, but don't delete
596 the edge, that way we can easily tell when all edges are known, or only
597 one edge is unknown. */
598
599 /* The order that the basic blocks are iterated through is important.
600 Since the code that finds spanning trees starts with block 0, low numbered
601 edges are put on the spanning tree in preference to high numbered edges.
602 Hence, most instrumented edges are at the end. Graph solving works much
603 faster if we propagate numbers from the end to the start.
604
605 This takes an average of slightly more than 3 passes. */
606
607 changes = 1;
608 passes = 0;
609 while (changes)
610 {
611 passes++;
612 changes = 0;
613 FOR_BB_BETWEEN (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), NULL, prev_bb)
614 {
615 struct bb_profile_info *bi = BB_INFO (bb);
616 if (! bi->count_valid)
617 {
618 if (bi->succ_count == 0)
619 {
620 edge e;
621 edge_iterator ei;
622 gcov_type total = 0;
623
624 FOR_EACH_EDGE (e, ei, bb->succs)
625 total += e->count;
626 bb->count = total;
627 bi->count_valid = 1;
628 changes = 1;
629 }
630 else if (bi->pred_count == 0)
631 {
632 edge e;
633 edge_iterator ei;
634 gcov_type total = 0;
635
636 FOR_EACH_EDGE (e, ei, bb->preds)
637 total += e->count;
638 bb->count = total;
639 bi->count_valid = 1;
640 changes = 1;
641 }
642 }
643 if (bi->count_valid)
644 {
645 if (bi->succ_count == 1)
646 {
647 edge e;
648 edge_iterator ei;
649 gcov_type total = 0;
650
651 /* One of the counts will be invalid, but it is zero,
652 so adding it in also doesn't hurt. */
653 FOR_EACH_EDGE (e, ei, bb->succs)
654 total += e->count;
655
656 /* Search for the invalid edge, and set its count. */
657 FOR_EACH_EDGE (e, ei, bb->succs)
658 if (! EDGE_INFO (e)->count_valid && ! EDGE_INFO (e)->ignore)
659 break;
660
661 /* Calculate count for remaining edge by conservation. */
662 total = bb->count - total;
663
664 gcc_assert (e);
665 EDGE_INFO (e)->count_valid = 1;
666 e->count = total;
667 bi->succ_count--;
668
669 BB_INFO (e->dest)->pred_count--;
670 changes = 1;
671 }
672 if (bi->pred_count == 1)
673 {
674 edge e;
675 edge_iterator ei;
676 gcov_type total = 0;
677
678 /* One of the counts will be invalid, but it is zero,
679 so adding it in also doesn't hurt. */
680 FOR_EACH_EDGE (e, ei, bb->preds)
681 total += e->count;
682
683 /* Search for the invalid edge, and set its count. */
684 FOR_EACH_EDGE (e, ei, bb->preds)
685 if (!EDGE_INFO (e)->count_valid && !EDGE_INFO (e)->ignore)
686 break;
687
688 /* Calculate count for remaining edge by conservation. */
689 total = bb->count - total + e->count;
690
691 gcc_assert (e);
692 EDGE_INFO (e)->count_valid = 1;
693 e->count = total;
694 bi->pred_count--;
695
696 BB_INFO (e->src)->succ_count--;
697 changes = 1;
698 }
699 }
700 }
701 }
702 if (dump_file)
703 {
704 int overlap = compute_frequency_overlap ();
705 gimple_dump_cfg (dump_file, dump_flags);
706 fprintf (dump_file, "Static profile overlap: %d.%d%%\n",
707 overlap / (OVERLAP_BASE / 100),
708 overlap % (OVERLAP_BASE / 100));
709 }
710
711 total_num_passes += passes;
712 if (dump_file)
713 fprintf (dump_file, "Graph solving took %d passes.\n\n", passes);
714
715 /* If the graph has been correctly solved, every block will have a
716 succ and pred count of zero. */
717 FOR_EACH_BB_FN (bb, cfun)
718 {
719 gcc_assert (!BB_INFO (bb)->succ_count && !BB_INFO (bb)->pred_count);
720 }
721
722 /* Check for inconsistent basic block counts */
723 inconsistent = is_inconsistent ();
724
725 if (inconsistent)
726 {
727 if (flag_profile_correction)
728 {
729 /* Inconsistency detected. Make it flow-consistent. */
730 static int informed = 0;
731 if (dump_enabled_p () && informed == 0)
732 {
733 informed = 1;
734 dump_printf_loc (MSG_NOTE, input_location,
735 "correcting inconsistent profile data\n");
736 }
737 correct_negative_edge_counts ();
738 /* Set bb counts to the sum of the outgoing edge counts */
739 set_bb_counts ();
740 if (dump_file)
741 fprintf (dump_file, "\nCalling mcf_smooth_cfg\n");
742 mcf_smooth_cfg ();
743 }
744 else
745 error ("corrupted profile info: profile data is not flow-consistent");
746 }
747
748 /* For every edge, calculate its branch probability and add a reg_note
749 to the branch insn to indicate this. */
750
751 for (i = 0; i < 20; i++)
752 hist_br_prob[i] = 0;
753 num_branches = 0;
754
755 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
756 {
757 edge e;
758 edge_iterator ei;
759
760 if (bb->count < 0)
761 {
762 error ("corrupted profile info: number of iterations for basic block %d thought to be %i",
763 bb->index, (int)bb->count);
764 bb->count = 0;
765 }
766 FOR_EACH_EDGE (e, ei, bb->succs)
767 {
768 /* Function may return twice in the cased the called function is
769 setjmp or calls fork, but we can't represent this by extra
770 edge from the entry, since extra edge from the exit is
771 already present. We get negative frequency from the entry
772 point. */
773 if ((e->count < 0
774 && e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
775 || (e->count > bb->count
776 && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)))
777 {
778 if (block_ends_with_call_p (bb))
779 e->count = e->count < 0 ? 0 : bb->count;
780 }
781 if (e->count < 0 || e->count > bb->count)
782 {
783 error ("corrupted profile info: number of executions for edge %d-%d thought to be %i",
784 e->src->index, e->dest->index,
785 (int)e->count);
786 e->count = bb->count / 2;
787 }
788 }
789 if (bb->count)
790 {
791 FOR_EACH_EDGE (e, ei, bb->succs)
792 e->probability = GCOV_COMPUTE_SCALE (e->count, bb->count);
793 if (bb->index >= NUM_FIXED_BLOCKS
794 && block_ends_with_condjump_p (bb)
795 && EDGE_COUNT (bb->succs) >= 2)
796 {
797 int prob;
798 edge e;
799 int index;
800
801 /* Find the branch edge. It is possible that we do have fake
802 edges here. */
803 FOR_EACH_EDGE (e, ei, bb->succs)
804 if (!(e->flags & (EDGE_FAKE | EDGE_FALLTHRU)))
805 break;
806
807 prob = e->probability;
808 index = prob * 20 / REG_BR_PROB_BASE;
809
810 if (index == 20)
811 index = 19;
812 hist_br_prob[index]++;
813
814 num_branches++;
815 }
816 }
817 /* As a last resort, distribute the probabilities evenly.
818 Use simple heuristics that if there are normal edges,
819 give all abnormals frequency of 0, otherwise distribute the
820 frequency over abnormals (this is the case of noreturn
821 calls). */
822 else if (profile_status_for_fn (cfun) == PROFILE_ABSENT)
823 {
824 int total = 0;
825
826 FOR_EACH_EDGE (e, ei, bb->succs)
827 if (!(e->flags & (EDGE_COMPLEX | EDGE_FAKE)))
828 total ++;
829 if (total)
830 {
831 FOR_EACH_EDGE (e, ei, bb->succs)
832 if (!(e->flags & (EDGE_COMPLEX | EDGE_FAKE)))
833 e->probability = REG_BR_PROB_BASE / total;
834 else
835 e->probability = 0;
836 }
837 else
838 {
839 total += EDGE_COUNT (bb->succs);
840 FOR_EACH_EDGE (e, ei, bb->succs)
841 e->probability = REG_BR_PROB_BASE / total;
842 }
843 if (bb->index >= NUM_FIXED_BLOCKS
844 && block_ends_with_condjump_p (bb)
845 && EDGE_COUNT (bb->succs) >= 2)
846 num_branches++;
847 }
848 }
849 counts_to_freqs ();
850 profile_status_for_fn (cfun) = PROFILE_READ;
851 compute_function_frequency ();
852
853 if (dump_file)
854 {
855 fprintf (dump_file, "%d branches\n", num_branches);
856 if (num_branches)
857 for (i = 0; i < 10; i++)
858 fprintf (dump_file, "%d%% branches in range %d-%d%%\n",
859 (hist_br_prob[i] + hist_br_prob[19-i]) * 100 / num_branches,
860 5 * i, 5 * i + 5);
861
862 total_num_branches += num_branches;
863 for (i = 0; i < 20; i++)
864 total_hist_br_prob[i] += hist_br_prob[i];
865
866 fputc ('\n', dump_file);
867 fputc ('\n', dump_file);
868 }
869
870 free_aux_for_blocks ();
871 }
872
873 /* Load value histograms values whose description is stored in VALUES array
874 from .gcda file.
875
876 CFG_CHECKSUM is the precomputed checksum for the CFG. */
877
878 static void
879 compute_value_histograms (histogram_values values, unsigned cfg_checksum,
880 unsigned lineno_checksum)
881 {
882 unsigned i, j, t, any;
883 unsigned n_histogram_counters[GCOV_N_VALUE_COUNTERS];
884 gcov_type *histogram_counts[GCOV_N_VALUE_COUNTERS];
885 gcov_type *act_count[GCOV_N_VALUE_COUNTERS];
886 gcov_type *aact_count;
887 struct cgraph_node *node;
888
889 for (t = 0; t < GCOV_N_VALUE_COUNTERS; t++)
890 n_histogram_counters[t] = 0;
891
892 for (i = 0; i < values.length (); i++)
893 {
894 histogram_value hist = values[i];
895 n_histogram_counters[(int) hist->type] += hist->n_counters;
896 }
897
898 any = 0;
899 for (t = 0; t < GCOV_N_VALUE_COUNTERS; t++)
900 {
901 if (!n_histogram_counters[t])
902 {
903 histogram_counts[t] = NULL;
904 continue;
905 }
906
907 histogram_counts[t] =
908 get_coverage_counts (COUNTER_FOR_HIST_TYPE (t),
909 n_histogram_counters[t], cfg_checksum,
910 lineno_checksum, NULL);
911 if (histogram_counts[t])
912 any = 1;
913 act_count[t] = histogram_counts[t];
914 }
915 if (!any)
916 return;
917
918 for (i = 0; i < values.length (); i++)
919 {
920 histogram_value hist = values[i];
921 gimple stmt = hist->hvalue.stmt;
922
923 t = (int) hist->type;
924
925 aact_count = act_count[t];
926
927 if (act_count[t])
928 act_count[t] += hist->n_counters;
929
930 gimple_add_histogram_value (cfun, stmt, hist);
931 hist->hvalue.counters = XNEWVEC (gcov_type, hist->n_counters);
932 for (j = 0; j < hist->n_counters; j++)
933 if (aact_count)
934 hist->hvalue.counters[j] = aact_count[j];
935 else
936 hist->hvalue.counters[j] = 0;
937
938 /* Time profiler counter is not related to any statement,
939 so that we have to read the counter and set the value to
940 the corresponding call graph node. */
941 if (hist->type == HIST_TYPE_TIME_PROFILE)
942 {
943 node = cgraph_node::get (hist->fun->decl);
944 node->tp_first_run = hist->hvalue.counters[0];
945
946 if (dump_file)
947 fprintf (dump_file, "Read tp_first_run: %d\n", node->tp_first_run);
948 }
949 }
950
951 for (t = 0; t < GCOV_N_VALUE_COUNTERS; t++)
952 free (histogram_counts[t]);
953 }
954
955 /* When passed NULL as file_name, initialize.
956 When passed something else, output the necessary commands to change
957 line to LINE and offset to FILE_NAME. */
958 static void
959 output_location (char const *file_name, int line,
960 gcov_position_t *offset, basic_block bb)
961 {
962 static char const *prev_file_name;
963 static int prev_line;
964 bool name_differs, line_differs;
965
966 if (!file_name)
967 {
968 prev_file_name = NULL;
969 prev_line = -1;
970 return;
971 }
972
973 name_differs = !prev_file_name || filename_cmp (file_name, prev_file_name);
974 line_differs = prev_line != line;
975
976 if (name_differs || line_differs)
977 {
978 if (!*offset)
979 {
980 *offset = gcov_write_tag (GCOV_TAG_LINES);
981 gcov_write_unsigned (bb->index);
982 name_differs = line_differs=true;
983 }
984
985 /* If this is a new source file, then output the
986 file's name to the .bb file. */
987 if (name_differs)
988 {
989 prev_file_name = file_name;
990 gcov_write_unsigned (0);
991 gcov_write_string (prev_file_name);
992 }
993 if (line_differs)
994 {
995 gcov_write_unsigned (line);
996 prev_line = line;
997 }
998 }
999 }
1000
1001 /* Instrument and/or analyze program behavior based on program the CFG.
1002
1003 This function creates a representation of the control flow graph (of
1004 the function being compiled) that is suitable for the instrumentation
1005 of edges and/or converting measured edge counts to counts on the
1006 complete CFG.
1007
1008 When FLAG_PROFILE_ARCS is nonzero, this function instruments the edges in
1009 the flow graph that are needed to reconstruct the dynamic behavior of the
1010 flow graph. This data is written to the gcno file for gcov.
1011
1012 When FLAG_BRANCH_PROBABILITIES is nonzero, this function reads auxiliary
1013 information from the gcda file containing edge count information from
1014 previous executions of the function being compiled. In this case, the
1015 control flow graph is annotated with actual execution counts by
1016 compute_branch_probabilities().
1017
1018 Main entry point of this file. */
1019
1020 void
1021 branch_prob (void)
1022 {
1023 basic_block bb;
1024 unsigned i;
1025 unsigned num_edges, ignored_edges;
1026 unsigned num_instrumented;
1027 struct edge_list *el;
1028 histogram_values values = histogram_values ();
1029 unsigned cfg_checksum, lineno_checksum;
1030
1031 total_num_times_called++;
1032
1033 flow_call_edges_add (NULL);
1034 add_noreturn_fake_exit_edges ();
1035
1036 /* We can't handle cyclic regions constructed using abnormal edges.
1037 To avoid these we replace every source of abnormal edge by a fake
1038 edge from entry node and every destination by fake edge to exit.
1039 This keeps graph acyclic and our calculation exact for all normal
1040 edges except for exit and entrance ones.
1041
1042 We also add fake exit edges for each call and asm statement in the
1043 basic, since it may not return. */
1044
1045 FOR_EACH_BB_FN (bb, cfun)
1046 {
1047 int need_exit_edge = 0, need_entry_edge = 0;
1048 int have_exit_edge = 0, have_entry_edge = 0;
1049 edge e;
1050 edge_iterator ei;
1051
1052 /* Functions returning multiple times are not handled by extra edges.
1053 Instead we simply allow negative counts on edges from exit to the
1054 block past call and corresponding probabilities. We can't go
1055 with the extra edges because that would result in flowgraph that
1056 needs to have fake edges outside the spanning tree. */
1057
1058 FOR_EACH_EDGE (e, ei, bb->succs)
1059 {
1060 gimple_stmt_iterator gsi;
1061 gimple last = NULL;
1062
1063 /* It may happen that there are compiler generated statements
1064 without a locus at all. Go through the basic block from the
1065 last to the first statement looking for a locus. */
1066 for (gsi = gsi_last_nondebug_bb (bb);
1067 !gsi_end_p (gsi);
1068 gsi_prev_nondebug (&gsi))
1069 {
1070 last = gsi_stmt (gsi);
1071 if (gimple_has_location (last))
1072 break;
1073 }
1074
1075 /* Edge with goto locus might get wrong coverage info unless
1076 it is the only edge out of BB.
1077 Don't do that when the locuses match, so
1078 if (blah) goto something;
1079 is not computed twice. */
1080 if (last
1081 && gimple_has_location (last)
1082 && LOCATION_LOCUS (e->goto_locus) != UNKNOWN_LOCATION
1083 && !single_succ_p (bb)
1084 && (LOCATION_FILE (e->goto_locus)
1085 != LOCATION_FILE (gimple_location (last))
1086 || (LOCATION_LINE (e->goto_locus)
1087 != LOCATION_LINE (gimple_location (last)))))
1088 {
1089 basic_block new_bb = split_edge (e);
1090 edge ne = single_succ_edge (new_bb);
1091 ne->goto_locus = e->goto_locus;
1092 }
1093 if ((e->flags & (EDGE_ABNORMAL | EDGE_ABNORMAL_CALL))
1094 && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1095 need_exit_edge = 1;
1096 if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
1097 have_exit_edge = 1;
1098 }
1099 FOR_EACH_EDGE (e, ei, bb->preds)
1100 {
1101 if ((e->flags & (EDGE_ABNORMAL | EDGE_ABNORMAL_CALL))
1102 && e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun))
1103 need_entry_edge = 1;
1104 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
1105 have_entry_edge = 1;
1106 }
1107
1108 if (need_exit_edge && !have_exit_edge)
1109 {
1110 if (dump_file)
1111 fprintf (dump_file, "Adding fake exit edge to bb %i\n",
1112 bb->index);
1113 make_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), EDGE_FAKE);
1114 }
1115 if (need_entry_edge && !have_entry_edge)
1116 {
1117 if (dump_file)
1118 fprintf (dump_file, "Adding fake entry edge to bb %i\n",
1119 bb->index);
1120 make_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun), bb, EDGE_FAKE);
1121 /* Avoid bbs that have both fake entry edge and also some
1122 exit edge. One of those edges wouldn't be added to the
1123 spanning tree, but we can't instrument any of them. */
1124 if (have_exit_edge || need_exit_edge)
1125 {
1126 gimple_stmt_iterator gsi;
1127 gimple first;
1128
1129 gsi = gsi_start_nondebug_after_labels_bb (bb);
1130 gcc_checking_assert (!gsi_end_p (gsi));
1131 first = gsi_stmt (gsi);
1132 /* Don't split the bbs containing __builtin_setjmp_receiver
1133 or ABNORMAL_DISPATCHER calls. These are very
1134 special and don't expect anything to be inserted before
1135 them. */
1136 if (is_gimple_call (first)
1137 && (gimple_call_builtin_p (first, BUILT_IN_SETJMP_RECEIVER)
1138 || (gimple_call_flags (first) & ECF_RETURNS_TWICE)
1139 || (gimple_call_internal_p (first)
1140 && (gimple_call_internal_fn (first)
1141 == IFN_ABNORMAL_DISPATCHER))))
1142 continue;
1143
1144 if (dump_file)
1145 fprintf (dump_file, "Splitting bb %i after labels\n",
1146 bb->index);
1147 split_block_after_labels (bb);
1148 }
1149 }
1150 }
1151
1152 el = create_edge_list ();
1153 num_edges = NUM_EDGES (el);
1154 alloc_aux_for_edges (sizeof (struct edge_profile_info));
1155
1156 /* The basic blocks are expected to be numbered sequentially. */
1157 compact_blocks ();
1158
1159 ignored_edges = 0;
1160 for (i = 0 ; i < num_edges ; i++)
1161 {
1162 edge e = INDEX_EDGE (el, i);
1163 e->count = 0;
1164
1165 /* Mark edges we've replaced by fake edges above as ignored. */
1166 if ((e->flags & (EDGE_ABNORMAL | EDGE_ABNORMAL_CALL))
1167 && e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
1168 && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1169 {
1170 EDGE_INFO (e)->ignore = 1;
1171 ignored_edges++;
1172 }
1173 }
1174
1175 /* Create spanning tree from basic block graph, mark each edge that is
1176 on the spanning tree. We insert as many abnormal and critical edges
1177 as possible to minimize number of edge splits necessary. */
1178
1179 find_spanning_tree (el);
1180
1181 /* Fake edges that are not on the tree will not be instrumented, so
1182 mark them ignored. */
1183 for (num_instrumented = i = 0; i < num_edges; i++)
1184 {
1185 edge e = INDEX_EDGE (el, i);
1186 struct edge_profile_info *inf = EDGE_INFO (e);
1187
1188 if (inf->ignore || inf->on_tree)
1189 /*NOP*/;
1190 else if (e->flags & EDGE_FAKE)
1191 {
1192 inf->ignore = 1;
1193 ignored_edges++;
1194 }
1195 else
1196 num_instrumented++;
1197 }
1198
1199 total_num_blocks += n_basic_blocks_for_fn (cfun);
1200 if (dump_file)
1201 fprintf (dump_file, "%d basic blocks\n", n_basic_blocks_for_fn (cfun));
1202
1203 total_num_edges += num_edges;
1204 if (dump_file)
1205 fprintf (dump_file, "%d edges\n", num_edges);
1206
1207 total_num_edges_ignored += ignored_edges;
1208 if (dump_file)
1209 fprintf (dump_file, "%d ignored edges\n", ignored_edges);
1210
1211 total_num_edges_instrumented += num_instrumented;
1212 if (dump_file)
1213 fprintf (dump_file, "%d instrumentation edges\n", num_instrumented);
1214
1215 /* Compute two different checksums. Note that we want to compute
1216 the checksum in only once place, since it depends on the shape
1217 of the control flow which can change during
1218 various transformations. */
1219 cfg_checksum = coverage_compute_cfg_checksum (cfun);
1220 lineno_checksum = coverage_compute_lineno_checksum ();
1221
1222 /* Write the data from which gcov can reconstruct the basic block
1223 graph and function line numbers (the gcno file). */
1224 if (coverage_begin_function (lineno_checksum, cfg_checksum))
1225 {
1226 gcov_position_t offset;
1227
1228 /* Basic block flags */
1229 offset = gcov_write_tag (GCOV_TAG_BLOCKS);
1230 for (i = 0; i != (unsigned) (n_basic_blocks_for_fn (cfun)); i++)
1231 gcov_write_unsigned (0);
1232 gcov_write_length (offset);
1233
1234 /* Arcs */
1235 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun),
1236 EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
1237 {
1238 edge e;
1239 edge_iterator ei;
1240
1241 offset = gcov_write_tag (GCOV_TAG_ARCS);
1242 gcov_write_unsigned (bb->index);
1243
1244 FOR_EACH_EDGE (e, ei, bb->succs)
1245 {
1246 struct edge_profile_info *i = EDGE_INFO (e);
1247 if (!i->ignore)
1248 {
1249 unsigned flag_bits = 0;
1250
1251 if (i->on_tree)
1252 flag_bits |= GCOV_ARC_ON_TREE;
1253 if (e->flags & EDGE_FAKE)
1254 flag_bits |= GCOV_ARC_FAKE;
1255 if (e->flags & EDGE_FALLTHRU)
1256 flag_bits |= GCOV_ARC_FALLTHROUGH;
1257 /* On trees we don't have fallthru flags, but we can
1258 recompute them from CFG shape. */
1259 if (e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)
1260 && e->src->next_bb == e->dest)
1261 flag_bits |= GCOV_ARC_FALLTHROUGH;
1262
1263 gcov_write_unsigned (e->dest->index);
1264 gcov_write_unsigned (flag_bits);
1265 }
1266 }
1267
1268 gcov_write_length (offset);
1269 }
1270
1271 /* Line numbers. */
1272 /* Initialize the output. */
1273 output_location (NULL, 0, NULL, NULL);
1274
1275 FOR_EACH_BB_FN (bb, cfun)
1276 {
1277 gimple_stmt_iterator gsi;
1278 gcov_position_t offset = 0;
1279
1280 if (bb == ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb)
1281 {
1282 expanded_location curr_location =
1283 expand_location (DECL_SOURCE_LOCATION (current_function_decl));
1284 output_location (curr_location.file, curr_location.line,
1285 &offset, bb);
1286 }
1287
1288 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1289 {
1290 gimple stmt = gsi_stmt (gsi);
1291 if (gimple_has_location (stmt))
1292 output_location (gimple_filename (stmt), gimple_lineno (stmt),
1293 &offset, bb);
1294 }
1295
1296 /* Notice GOTO expressions eliminated while constructing the CFG. */
1297 if (single_succ_p (bb)
1298 && LOCATION_LOCUS (single_succ_edge (bb)->goto_locus)
1299 != UNKNOWN_LOCATION)
1300 {
1301 expanded_location curr_location
1302 = expand_location (single_succ_edge (bb)->goto_locus);
1303 output_location (curr_location.file, curr_location.line,
1304 &offset, bb);
1305 }
1306
1307 if (offset)
1308 {
1309 /* A file of NULL indicates the end of run. */
1310 gcov_write_unsigned (0);
1311 gcov_write_string (NULL);
1312 gcov_write_length (offset);
1313 }
1314 }
1315 }
1316
1317 if (flag_profile_values)
1318 gimple_find_values_to_profile (&values);
1319
1320 if (flag_branch_probabilities)
1321 {
1322 compute_branch_probabilities (cfg_checksum, lineno_checksum);
1323 if (flag_profile_values)
1324 compute_value_histograms (values, cfg_checksum, lineno_checksum);
1325 }
1326
1327 remove_fake_edges ();
1328
1329 /* For each edge not on the spanning tree, add counting code. */
1330 if (profile_arc_flag
1331 && coverage_counter_alloc (GCOV_COUNTER_ARCS, num_instrumented))
1332 {
1333 unsigned n_instrumented;
1334
1335 gimple_init_edge_profiler ();
1336
1337 n_instrumented = instrument_edges (el);
1338
1339 gcc_assert (n_instrumented == num_instrumented);
1340
1341 if (flag_profile_values)
1342 instrument_values (values);
1343
1344 /* Commit changes done by instrumentation. */
1345 gsi_commit_edge_inserts ();
1346 }
1347
1348 free_aux_for_edges ();
1349
1350 values.release ();
1351 free_edge_list (el);
1352 coverage_end_function (lineno_checksum, cfg_checksum);
1353 }
1354 \f
1355 /* Union find algorithm implementation for the basic blocks using
1356 aux fields. */
1357
1358 static basic_block
1359 find_group (basic_block bb)
1360 {
1361 basic_block group = bb, bb1;
1362
1363 while ((basic_block) group->aux != group)
1364 group = (basic_block) group->aux;
1365
1366 /* Compress path. */
1367 while ((basic_block) bb->aux != group)
1368 {
1369 bb1 = (basic_block) bb->aux;
1370 bb->aux = (void *) group;
1371 bb = bb1;
1372 }
1373 return group;
1374 }
1375
1376 static void
1377 union_groups (basic_block bb1, basic_block bb2)
1378 {
1379 basic_block bb1g = find_group (bb1);
1380 basic_block bb2g = find_group (bb2);
1381
1382 /* ??? I don't have a place for the rank field. OK. Lets go w/o it,
1383 this code is unlikely going to be performance problem anyway. */
1384 gcc_assert (bb1g != bb2g);
1385
1386 bb1g->aux = bb2g;
1387 }
1388 \f
1389 /* This function searches all of the edges in the program flow graph, and puts
1390 as many bad edges as possible onto the spanning tree. Bad edges include
1391 abnormals edges, which can't be instrumented at the moment. Since it is
1392 possible for fake edges to form a cycle, we will have to develop some
1393 better way in the future. Also put critical edges to the tree, since they
1394 are more expensive to instrument. */
1395
1396 static void
1397 find_spanning_tree (struct edge_list *el)
1398 {
1399 int i;
1400 int num_edges = NUM_EDGES (el);
1401 basic_block bb;
1402
1403 /* We use aux field for standard union-find algorithm. */
1404 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
1405 bb->aux = bb;
1406
1407 /* Add fake edge exit to entry we can't instrument. */
1408 union_groups (EXIT_BLOCK_PTR_FOR_FN (cfun), ENTRY_BLOCK_PTR_FOR_FN (cfun));
1409
1410 /* First add all abnormal edges to the tree unless they form a cycle. Also
1411 add all edges to the exit block to avoid inserting profiling code behind
1412 setting return value from function. */
1413 for (i = 0; i < num_edges; i++)
1414 {
1415 edge e = INDEX_EDGE (el, i);
1416 if (((e->flags & (EDGE_ABNORMAL | EDGE_ABNORMAL_CALL | EDGE_FAKE))
1417 || e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
1418 && !EDGE_INFO (e)->ignore
1419 && (find_group (e->src) != find_group (e->dest)))
1420 {
1421 if (dump_file)
1422 fprintf (dump_file, "Abnormal edge %d to %d put to tree\n",
1423 e->src->index, e->dest->index);
1424 EDGE_INFO (e)->on_tree = 1;
1425 union_groups (e->src, e->dest);
1426 }
1427 }
1428
1429 /* Now insert all critical edges to the tree unless they form a cycle. */
1430 for (i = 0; i < num_edges; i++)
1431 {
1432 edge e = INDEX_EDGE (el, i);
1433 if (EDGE_CRITICAL_P (e) && !EDGE_INFO (e)->ignore
1434 && find_group (e->src) != find_group (e->dest))
1435 {
1436 if (dump_file)
1437 fprintf (dump_file, "Critical edge %d to %d put to tree\n",
1438 e->src->index, e->dest->index);
1439 EDGE_INFO (e)->on_tree = 1;
1440 union_groups (e->src, e->dest);
1441 }
1442 }
1443
1444 /* And now the rest. */
1445 for (i = 0; i < num_edges; i++)
1446 {
1447 edge e = INDEX_EDGE (el, i);
1448 if (!EDGE_INFO (e)->ignore
1449 && find_group (e->src) != find_group (e->dest))
1450 {
1451 if (dump_file)
1452 fprintf (dump_file, "Normal edge %d to %d put to tree\n",
1453 e->src->index, e->dest->index);
1454 EDGE_INFO (e)->on_tree = 1;
1455 union_groups (e->src, e->dest);
1456 }
1457 }
1458
1459 clear_aux_for_blocks ();
1460 }
1461 \f
1462 /* Perform file-level initialization for branch-prob processing. */
1463
1464 void
1465 init_branch_prob (void)
1466 {
1467 int i;
1468
1469 total_num_blocks = 0;
1470 total_num_edges = 0;
1471 total_num_edges_ignored = 0;
1472 total_num_edges_instrumented = 0;
1473 total_num_blocks_created = 0;
1474 total_num_passes = 0;
1475 total_num_times_called = 0;
1476 total_num_branches = 0;
1477 for (i = 0; i < 20; i++)
1478 total_hist_br_prob[i] = 0;
1479 }
1480
1481 /* Performs file-level cleanup after branch-prob processing
1482 is completed. */
1483
1484 void
1485 end_branch_prob (void)
1486 {
1487 if (dump_file)
1488 {
1489 fprintf (dump_file, "\n");
1490 fprintf (dump_file, "Total number of blocks: %d\n",
1491 total_num_blocks);
1492 fprintf (dump_file, "Total number of edges: %d\n", total_num_edges);
1493 fprintf (dump_file, "Total number of ignored edges: %d\n",
1494 total_num_edges_ignored);
1495 fprintf (dump_file, "Total number of instrumented edges: %d\n",
1496 total_num_edges_instrumented);
1497 fprintf (dump_file, "Total number of blocks created: %d\n",
1498 total_num_blocks_created);
1499 fprintf (dump_file, "Total number of graph solution passes: %d\n",
1500 total_num_passes);
1501 if (total_num_times_called != 0)
1502 fprintf (dump_file, "Average number of graph solution passes: %d\n",
1503 (total_num_passes + (total_num_times_called >> 1))
1504 / total_num_times_called);
1505 fprintf (dump_file, "Total number of branches: %d\n",
1506 total_num_branches);
1507 if (total_num_branches)
1508 {
1509 int i;
1510
1511 for (i = 0; i < 10; i++)
1512 fprintf (dump_file, "%d%% branches in range %d-%d%%\n",
1513 (total_hist_br_prob[i] + total_hist_br_prob[19-i]) * 100
1514 / total_num_branches, 5*i, 5*i+5);
1515 }
1516 }
1517 }