Update copyright years.
[gcc.git] / gcc / cfg.c
1 /* Control flow graph manipulation code for GNU compiler.
2 Copyright (C) 1987-2018 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 /* This file contains low level functions to manipulate the CFG and
21 analyze it. All other modules should not transform the data structure
22 directly and use abstraction instead. The file is supposed to be
23 ordered bottom-up and should not contain any code dependent on a
24 particular intermediate language (RTL or trees).
25
26 Available functionality:
27 - Initialization/deallocation
28 init_flow, clear_edges
29 - Low level basic block manipulation
30 alloc_block, expunge_block
31 - Edge manipulation
32 make_edge, make_single_succ_edge, cached_make_edge, remove_edge
33 - Low level edge redirection (without updating instruction chain)
34 redirect_edge_succ, redirect_edge_succ_nodup, redirect_edge_pred
35 - Dumping and debugging
36 dump_flow_info, debug_flow_info, dump_edge_info
37 - Allocation of AUX fields for basic blocks
38 alloc_aux_for_blocks, free_aux_for_blocks, alloc_aux_for_block
39 - clear_bb_flags
40 - Consistency checking
41 verify_flow_info
42 - Dumping and debugging
43 print_rtl_with_bb, dump_bb, debug_bb, debug_bb_n
44
45 TODO: Document these "Available functionality" functions in the files
46 that implement them.
47 */
48 \f
49 #include "config.h"
50 #include "system.h"
51 #include "coretypes.h"
52 #include "backend.h"
53 #include "hard-reg-set.h"
54 #include "tree.h"
55 #include "cfghooks.h"
56 #include "df.h"
57 #include "cfganal.h"
58 #include "cfgloop.h" /* FIXME: For struct loop. */
59 #include "dumpfile.h"
60
61 \f
62
63 /* Called once at initialization time. */
64
65 void
66 init_flow (struct function *the_fun)
67 {
68 if (!the_fun->cfg)
69 the_fun->cfg = ggc_cleared_alloc<control_flow_graph> ();
70 n_edges_for_fn (the_fun) = 0;
71 the_fun->cfg->count_max = profile_count::uninitialized ();
72 ENTRY_BLOCK_PTR_FOR_FN (the_fun)
73 = alloc_block ();
74 ENTRY_BLOCK_PTR_FOR_FN (the_fun)->index = ENTRY_BLOCK;
75 EXIT_BLOCK_PTR_FOR_FN (the_fun)
76 = alloc_block ();
77 EXIT_BLOCK_PTR_FOR_FN (the_fun)->index = EXIT_BLOCK;
78 ENTRY_BLOCK_PTR_FOR_FN (the_fun)->next_bb
79 = EXIT_BLOCK_PTR_FOR_FN (the_fun);
80 EXIT_BLOCK_PTR_FOR_FN (the_fun)->prev_bb
81 = ENTRY_BLOCK_PTR_FOR_FN (the_fun);
82 }
83 \f
84 /* Helper function for remove_edge and clear_edges. Frees edge structure
85 without actually removing it from the pred/succ arrays. */
86
87 static void
88 free_edge (function *fn, edge e)
89 {
90 n_edges_for_fn (fn)--;
91 ggc_free (e);
92 }
93
94 /* Free the memory associated with the edge structures. */
95
96 void
97 clear_edges (struct function *fn)
98 {
99 basic_block bb;
100 edge e;
101 edge_iterator ei;
102
103 FOR_EACH_BB_FN (bb, fn)
104 {
105 FOR_EACH_EDGE (e, ei, bb->succs)
106 free_edge (fn, e);
107 vec_safe_truncate (bb->succs, 0);
108 vec_safe_truncate (bb->preds, 0);
109 }
110
111 FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR_FOR_FN (fn)->succs)
112 free_edge (fn, e);
113 vec_safe_truncate (EXIT_BLOCK_PTR_FOR_FN (fn)->preds, 0);
114 vec_safe_truncate (ENTRY_BLOCK_PTR_FOR_FN (fn)->succs, 0);
115
116 gcc_assert (!n_edges_for_fn (fn));
117 }
118 \f
119 /* Allocate memory for basic_block. */
120
121 basic_block
122 alloc_block (void)
123 {
124 basic_block bb;
125 bb = ggc_cleared_alloc<basic_block_def> ();
126 bb->count = profile_count::uninitialized ();
127 return bb;
128 }
129
130 /* Link block B to chain after AFTER. */
131 void
132 link_block (basic_block b, basic_block after)
133 {
134 b->next_bb = after->next_bb;
135 b->prev_bb = after;
136 after->next_bb = b;
137 b->next_bb->prev_bb = b;
138 }
139
140 /* Unlink block B from chain. */
141 void
142 unlink_block (basic_block b)
143 {
144 b->next_bb->prev_bb = b->prev_bb;
145 b->prev_bb->next_bb = b->next_bb;
146 b->prev_bb = NULL;
147 b->next_bb = NULL;
148 }
149
150 /* Sequentially order blocks and compact the arrays. */
151 void
152 compact_blocks (void)
153 {
154 int i;
155
156 SET_BASIC_BLOCK_FOR_FN (cfun, ENTRY_BLOCK, ENTRY_BLOCK_PTR_FOR_FN (cfun));
157 SET_BASIC_BLOCK_FOR_FN (cfun, EXIT_BLOCK, EXIT_BLOCK_PTR_FOR_FN (cfun));
158
159 if (df)
160 df_compact_blocks ();
161 else
162 {
163 basic_block bb;
164
165 i = NUM_FIXED_BLOCKS;
166 FOR_EACH_BB_FN (bb, cfun)
167 {
168 SET_BASIC_BLOCK_FOR_FN (cfun, i, bb);
169 bb->index = i;
170 i++;
171 }
172 gcc_assert (i == n_basic_blocks_for_fn (cfun));
173
174 for (; i < last_basic_block_for_fn (cfun); i++)
175 SET_BASIC_BLOCK_FOR_FN (cfun, i, NULL);
176 }
177 last_basic_block_for_fn (cfun) = n_basic_blocks_for_fn (cfun);
178 }
179
180 /* Remove block B from the basic block array. */
181
182 void
183 expunge_block (basic_block b)
184 {
185 unlink_block (b);
186 SET_BASIC_BLOCK_FOR_FN (cfun, b->index, NULL);
187 n_basic_blocks_for_fn (cfun)--;
188 /* We should be able to ggc_free here, but we are not.
189 The dead SSA_NAMES are left pointing to dead statements that are pointing
190 to dead basic blocks making garbage collector to die.
191 We should be able to release all dead SSA_NAMES and at the same time we should
192 clear out BB pointer of dead statements consistently. */
193 }
194 \f
195 /* Connect E to E->src. */
196
197 static inline void
198 connect_src (edge e)
199 {
200 vec_safe_push (e->src->succs, e);
201 df_mark_solutions_dirty ();
202 }
203
204 /* Connect E to E->dest. */
205
206 static inline void
207 connect_dest (edge e)
208 {
209 basic_block dest = e->dest;
210 vec_safe_push (dest->preds, e);
211 e->dest_idx = EDGE_COUNT (dest->preds) - 1;
212 df_mark_solutions_dirty ();
213 }
214
215 /* Disconnect edge E from E->src. */
216
217 static inline void
218 disconnect_src (edge e)
219 {
220 basic_block src = e->src;
221 edge_iterator ei;
222 edge tmp;
223
224 for (ei = ei_start (src->succs); (tmp = ei_safe_edge (ei)); )
225 {
226 if (tmp == e)
227 {
228 src->succs->unordered_remove (ei.index);
229 df_mark_solutions_dirty ();
230 return;
231 }
232 else
233 ei_next (&ei);
234 }
235
236 gcc_unreachable ();
237 }
238
239 /* Disconnect edge E from E->dest. */
240
241 static inline void
242 disconnect_dest (edge e)
243 {
244 basic_block dest = e->dest;
245 unsigned int dest_idx = e->dest_idx;
246
247 dest->preds->unordered_remove (dest_idx);
248
249 /* If we removed an edge in the middle of the edge vector, we need
250 to update dest_idx of the edge that moved into the "hole". */
251 if (dest_idx < EDGE_COUNT (dest->preds))
252 EDGE_PRED (dest, dest_idx)->dest_idx = dest_idx;
253 df_mark_solutions_dirty ();
254 }
255
256 /* Create an edge connecting SRC and DEST with flags FLAGS. Return newly
257 created edge. Use this only if you are sure that this edge can't
258 possibly already exist. */
259
260 edge
261 unchecked_make_edge (basic_block src, basic_block dst, int flags)
262 {
263 edge e;
264 e = ggc_cleared_alloc<edge_def> ();
265 n_edges_for_fn (cfun)++;
266
267 e->probability = profile_probability::uninitialized ();
268 e->src = src;
269 e->dest = dst;
270 e->flags = flags;
271
272 connect_src (e);
273 connect_dest (e);
274
275 execute_on_growing_pred (e);
276 return e;
277 }
278
279 /* Create an edge connecting SRC and DST with FLAGS optionally using
280 edge cache CACHE. Return the new edge, NULL if already exist. */
281
282 edge
283 cached_make_edge (sbitmap edge_cache, basic_block src, basic_block dst, int flags)
284 {
285 if (edge_cache == NULL
286 || src == ENTRY_BLOCK_PTR_FOR_FN (cfun)
287 || dst == EXIT_BLOCK_PTR_FOR_FN (cfun))
288 return make_edge (src, dst, flags);
289
290 /* Does the requested edge already exist? */
291 if (! bitmap_bit_p (edge_cache, dst->index))
292 {
293 /* The edge does not exist. Create one and update the
294 cache. */
295 bitmap_set_bit (edge_cache, dst->index);
296 return unchecked_make_edge (src, dst, flags);
297 }
298
299 /* At this point, we know that the requested edge exists. Adjust
300 flags if necessary. */
301 if (flags)
302 {
303 edge e = find_edge (src, dst);
304 e->flags |= flags;
305 }
306
307 return NULL;
308 }
309
310 /* Create an edge connecting SRC and DEST with flags FLAGS. Return newly
311 created edge or NULL if already exist. */
312
313 edge
314 make_edge (basic_block src, basic_block dest, int flags)
315 {
316 edge e = find_edge (src, dest);
317
318 /* Make sure we don't add duplicate edges. */
319 if (e)
320 {
321 e->flags |= flags;
322 return NULL;
323 }
324
325 return unchecked_make_edge (src, dest, flags);
326 }
327
328 /* Create an edge connecting SRC to DEST and set probability by knowing
329 that it is the single edge leaving SRC. */
330
331 edge
332 make_single_succ_edge (basic_block src, basic_block dest, int flags)
333 {
334 edge e = make_edge (src, dest, flags);
335
336 e->probability = profile_probability::always ();
337 return e;
338 }
339
340 /* This function will remove an edge from the flow graph. */
341
342 void
343 remove_edge_raw (edge e)
344 {
345 remove_predictions_associated_with_edge (e);
346 execute_on_shrinking_pred (e);
347
348 disconnect_src (e);
349 disconnect_dest (e);
350
351 free_edge (cfun, e);
352 }
353
354 /* Redirect an edge's successor from one block to another. */
355
356 void
357 redirect_edge_succ (edge e, basic_block new_succ)
358 {
359 execute_on_shrinking_pred (e);
360
361 disconnect_dest (e);
362
363 e->dest = new_succ;
364
365 /* Reconnect the edge to the new successor block. */
366 connect_dest (e);
367
368 execute_on_growing_pred (e);
369 }
370
371 /* Redirect an edge's predecessor from one block to another. */
372
373 void
374 redirect_edge_pred (edge e, basic_block new_pred)
375 {
376 disconnect_src (e);
377
378 e->src = new_pred;
379
380 /* Reconnect the edge to the new predecessor block. */
381 connect_src (e);
382 }
383
384 /* Clear all basic block flags that do not have to be preserved. */
385 void
386 clear_bb_flags (void)
387 {
388 basic_block bb;
389
390 FOR_ALL_BB_FN (bb, cfun)
391 bb->flags &= BB_FLAGS_TO_PRESERVE;
392 }
393 \f
394 /* Check the consistency of profile information. We can't do that
395 in verify_flow_info, as the counts may get invalid for incompletely
396 solved graphs, later eliminating of conditionals or roundoff errors.
397 It is still practical to have them reported for debugging of simple
398 testcases. */
399 static void
400 check_bb_profile (basic_block bb, FILE * file, int indent)
401 {
402 edge e;
403 edge_iterator ei;
404 struct function *fun = DECL_STRUCT_FUNCTION (current_function_decl);
405 char *s_indent = (char *) alloca ((size_t) indent + 1);
406 memset ((void *) s_indent, ' ', (size_t) indent);
407 s_indent[indent] = '\0';
408
409 if (profile_status_for_fn (fun) == PROFILE_ABSENT)
410 return;
411
412 if (bb != EXIT_BLOCK_PTR_FOR_FN (fun))
413 {
414 bool found = false;
415 profile_probability sum = profile_probability::never ();
416 int isum = 0;
417
418 FOR_EACH_EDGE (e, ei, bb->succs)
419 {
420 if (!(e->flags & (EDGE_EH | EDGE_FAKE)))
421 found = true;
422 sum += e->probability;
423 if (e->probability.initialized_p ())
424 isum += e->probability.to_reg_br_prob_base ();
425 }
426 /* Only report mismatches for non-EH control flow. If there are only EH
427 edges it means that the BB ends by noreturn call. Here the control
428 flow may just terminate. */
429 if (found)
430 {
431 if (sum.differs_from_p (profile_probability::always ()))
432 {
433 fprintf (file,
434 ";; %sInvalid sum of outgoing probabilities ",
435 s_indent);
436 sum.dump (file);
437 fprintf (file, "\n");
438 }
439 /* Probabilities caps to 100% and thus the previous test will never
440 fire if the sum of probabilities is too large. */
441 else if (isum > REG_BR_PROB_BASE + 100)
442 {
443 fprintf (file,
444 ";; %sInvalid sum of outgoing probabilities %.1f%%\n",
445 s_indent, isum * 100.0 / REG_BR_PROB_BASE);
446 }
447 }
448 }
449 if (bb != ENTRY_BLOCK_PTR_FOR_FN (fun))
450 {
451 profile_count sum = profile_count::zero ();
452 FOR_EACH_EDGE (e, ei, bb->preds)
453 sum += e->count ();
454 if (sum.differs_from_p (bb->count))
455 {
456 fprintf (file, ";; %sInvalid sum of incoming counts ",
457 s_indent);
458 sum.dump (file);
459 fprintf (file, ", should be ");
460 bb->count.dump (file);
461 fprintf (file, "\n");
462 }
463 }
464 if (BB_PARTITION (bb) == BB_COLD_PARTITION)
465 {
466 /* Warn about inconsistencies in the partitioning that are
467 currently caused by profile insanities created via optimization. */
468 if (!probably_never_executed_bb_p (fun, bb))
469 fprintf (file, ";; %sBlock in cold partition with hot count\n",
470 s_indent);
471 FOR_EACH_EDGE (e, ei, bb->preds)
472 {
473 if (!probably_never_executed_edge_p (fun, e))
474 fprintf (file,
475 ";; %sBlock in cold partition with incoming hot edge\n",
476 s_indent);
477 }
478 }
479 }
480 \f
481 void
482 dump_edge_info (FILE *file, edge e, dump_flags_t flags, int do_succ)
483 {
484 basic_block side = (do_succ ? e->dest : e->src);
485 bool do_details = false;
486
487 if ((flags & TDF_DETAILS) != 0
488 && (flags & TDF_SLIM) == 0)
489 do_details = true;
490
491 if (side->index == ENTRY_BLOCK)
492 fputs (" ENTRY", file);
493 else if (side->index == EXIT_BLOCK)
494 fputs (" EXIT", file);
495 else
496 fprintf (file, " %d", side->index);
497
498 if (e->probability.initialized_p () && do_details)
499 {
500 fprintf (file, " [");
501 e->probability.dump (file);
502 fprintf (file, "] ");
503 }
504
505 if (e->count ().initialized_p () && do_details)
506 {
507 fputs (" count:", file);
508 e->count ().dump (file);
509 }
510
511 if (e->flags && do_details)
512 {
513 static const char * const bitnames[] =
514 {
515 #define DEF_EDGE_FLAG(NAME,IDX) #NAME ,
516 #include "cfg-flags.def"
517 NULL
518 #undef DEF_EDGE_FLAG
519 };
520 bool comma = false;
521 int i, flags = e->flags;
522
523 gcc_assert (e->flags <= EDGE_ALL_FLAGS);
524 fputs (" (", file);
525 for (i = 0; flags; i++)
526 if (flags & (1 << i))
527 {
528 flags &= ~(1 << i);
529
530 if (comma)
531 fputc (',', file);
532 fputs (bitnames[i], file);
533 comma = true;
534 }
535
536 fputc (')', file);
537 }
538 }
539
540 DEBUG_FUNCTION void
541 debug (edge_def &ref)
542 {
543 /* FIXME (crowl): Is this desireable? */
544 dump_edge_info (stderr, &ref, 0, false);
545 dump_edge_info (stderr, &ref, 0, true);
546 }
547
548 DEBUG_FUNCTION void
549 debug (edge_def *ptr)
550 {
551 if (ptr)
552 debug (*ptr);
553 else
554 fprintf (stderr, "<nil>\n");
555 }
556
557 static void
558 debug_slim (edge e)
559 {
560 fprintf (stderr, "<edge 0x%p (%d -> %d)>", (void *) e,
561 e->src->index, e->dest->index);
562 }
563
564 DEFINE_DEBUG_VEC (edge)
565 DEFINE_DEBUG_HASH_SET (edge)
566 \f
567 /* Simple routines to easily allocate AUX fields of basic blocks. */
568
569 static struct obstack block_aux_obstack;
570 static void *first_block_aux_obj = 0;
571 static struct obstack edge_aux_obstack;
572 static void *first_edge_aux_obj = 0;
573
574 /* Allocate a memory block of SIZE as BB->aux. The obstack must
575 be first initialized by alloc_aux_for_blocks. */
576
577 static void
578 alloc_aux_for_block (basic_block bb, int size)
579 {
580 /* Verify that aux field is clear. */
581 gcc_assert (!bb->aux && first_block_aux_obj);
582 bb->aux = obstack_alloc (&block_aux_obstack, size);
583 memset (bb->aux, 0, size);
584 }
585
586 /* Initialize the block_aux_obstack and if SIZE is nonzero, call
587 alloc_aux_for_block for each basic block. */
588
589 void
590 alloc_aux_for_blocks (int size)
591 {
592 static int initialized;
593
594 if (!initialized)
595 {
596 gcc_obstack_init (&block_aux_obstack);
597 initialized = 1;
598 }
599 else
600 /* Check whether AUX data are still allocated. */
601 gcc_assert (!first_block_aux_obj);
602
603 first_block_aux_obj = obstack_alloc (&block_aux_obstack, 0);
604 if (size)
605 {
606 basic_block bb;
607
608 FOR_ALL_BB_FN (bb, cfun)
609 alloc_aux_for_block (bb, size);
610 }
611 }
612
613 /* Clear AUX pointers of all blocks. */
614
615 void
616 clear_aux_for_blocks (void)
617 {
618 basic_block bb;
619
620 FOR_ALL_BB_FN (bb, cfun)
621 bb->aux = NULL;
622 }
623
624 /* Free data allocated in block_aux_obstack and clear AUX pointers
625 of all blocks. */
626
627 void
628 free_aux_for_blocks (void)
629 {
630 gcc_assert (first_block_aux_obj);
631 obstack_free (&block_aux_obstack, first_block_aux_obj);
632 first_block_aux_obj = NULL;
633
634 clear_aux_for_blocks ();
635 }
636
637 /* Allocate a memory edge of SIZE as E->aux. The obstack must
638 be first initialized by alloc_aux_for_edges. */
639
640 void
641 alloc_aux_for_edge (edge e, int size)
642 {
643 /* Verify that aux field is clear. */
644 gcc_assert (!e->aux && first_edge_aux_obj);
645 e->aux = obstack_alloc (&edge_aux_obstack, size);
646 memset (e->aux, 0, size);
647 }
648
649 /* Initialize the edge_aux_obstack and if SIZE is nonzero, call
650 alloc_aux_for_edge for each basic edge. */
651
652 void
653 alloc_aux_for_edges (int size)
654 {
655 static int initialized;
656
657 if (!initialized)
658 {
659 gcc_obstack_init (&edge_aux_obstack);
660 initialized = 1;
661 }
662 else
663 /* Check whether AUX data are still allocated. */
664 gcc_assert (!first_edge_aux_obj);
665
666 first_edge_aux_obj = obstack_alloc (&edge_aux_obstack, 0);
667 if (size)
668 {
669 basic_block bb;
670
671 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun),
672 EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
673 {
674 edge e;
675 edge_iterator ei;
676
677 FOR_EACH_EDGE (e, ei, bb->succs)
678 alloc_aux_for_edge (e, size);
679 }
680 }
681 }
682
683 /* Clear AUX pointers of all edges. */
684
685 void
686 clear_aux_for_edges (void)
687 {
688 basic_block bb;
689 edge e;
690
691 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun),
692 EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
693 {
694 edge_iterator ei;
695 FOR_EACH_EDGE (e, ei, bb->succs)
696 e->aux = NULL;
697 }
698 }
699
700 /* Free data allocated in edge_aux_obstack and clear AUX pointers
701 of all edges. */
702
703 void
704 free_aux_for_edges (void)
705 {
706 gcc_assert (first_edge_aux_obj);
707 obstack_free (&edge_aux_obstack, first_edge_aux_obj);
708 first_edge_aux_obj = NULL;
709
710 clear_aux_for_edges ();
711 }
712
713 DEBUG_FUNCTION void
714 debug_bb (basic_block bb)
715 {
716 dump_bb (stderr, bb, 0, dump_flags);
717 }
718
719 DEBUG_FUNCTION basic_block
720 debug_bb_n (int n)
721 {
722 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, n);
723 debug_bb (bb);
724 return bb;
725 }
726
727 /* Dumps cfg related information about basic block BB to OUTF.
728 If HEADER is true, dump things that appear before the instructions
729 contained in BB. If FOOTER is true, dump things that appear after.
730 Flags are the TDF_* masks as documented in dumpfile.h.
731 NB: With TDF_DETAILS, it is assumed that cfun is available, so
732 that maybe_hot_bb_p and probably_never_executed_bb_p don't ICE. */
733
734 void
735 dump_bb_info (FILE *outf, basic_block bb, int indent, dump_flags_t flags,
736 bool do_header, bool do_footer)
737 {
738 edge_iterator ei;
739 edge e;
740 static const char * const bb_bitnames[] =
741 {
742 #define DEF_BASIC_BLOCK_FLAG(NAME,IDX) #NAME ,
743 #include "cfg-flags.def"
744 NULL
745 #undef DEF_BASIC_BLOCK_FLAG
746 };
747 const unsigned n_bitnames = sizeof (bb_bitnames) / sizeof (char *);
748 bool first;
749 char *s_indent = (char *) alloca ((size_t) indent + 1);
750 memset ((void *) s_indent, ' ', (size_t) indent);
751 s_indent[indent] = '\0';
752
753 gcc_assert (bb->flags <= BB_ALL_FLAGS);
754
755 if (do_header)
756 {
757 unsigned i;
758
759 fputs (";; ", outf);
760 fprintf (outf, "%sbasic block %d, loop depth %d",
761 s_indent, bb->index, bb_loop_depth (bb));
762 if (flags & TDF_DETAILS)
763 {
764 struct function *fun = DECL_STRUCT_FUNCTION (current_function_decl);
765 if (bb->count.initialized_p ())
766 {
767 fputs (", count ", outf);
768 bb->count.dump (outf);
769 }
770 if (maybe_hot_bb_p (fun, bb))
771 fputs (", maybe hot", outf);
772 if (probably_never_executed_bb_p (fun, bb))
773 fputs (", probably never executed", outf);
774 }
775 fputc ('\n', outf);
776
777 if (flags & TDF_DETAILS)
778 {
779 check_bb_profile (bb, outf, indent);
780 fputs (";; ", outf);
781 fprintf (outf, "%s prev block ", s_indent);
782 if (bb->prev_bb)
783 fprintf (outf, "%d", bb->prev_bb->index);
784 else
785 fprintf (outf, "(nil)");
786 fprintf (outf, ", next block ");
787 if (bb->next_bb)
788 fprintf (outf, "%d", bb->next_bb->index);
789 else
790 fprintf (outf, "(nil)");
791
792 fputs (", flags:", outf);
793 first = true;
794 for (i = 0; i < n_bitnames; i++)
795 if (bb->flags & (1 << i))
796 {
797 if (first)
798 fputs (" (", outf);
799 else
800 fputs (", ", outf);
801 first = false;
802 fputs (bb_bitnames[i], outf);
803 }
804 if (!first)
805 fputc (')', outf);
806 fputc ('\n', outf);
807 }
808
809 fputs (";; ", outf);
810 fprintf (outf, "%s pred: ", s_indent);
811 first = true;
812 FOR_EACH_EDGE (e, ei, bb->preds)
813 {
814 if (! first)
815 {
816 fputs (";; ", outf);
817 fprintf (outf, "%s ", s_indent);
818 }
819 first = false;
820 dump_edge_info (outf, e, flags, 0);
821 fputc ('\n', outf);
822 }
823 if (first)
824 fputc ('\n', outf);
825 }
826
827 if (do_footer)
828 {
829 fputs (";; ", outf);
830 fprintf (outf, "%s succ: ", s_indent);
831 first = true;
832 FOR_EACH_EDGE (e, ei, bb->succs)
833 {
834 if (! first)
835 {
836 fputs (";; ", outf);
837 fprintf (outf, "%s ", s_indent);
838 }
839 first = false;
840 dump_edge_info (outf, e, flags, 1);
841 fputc ('\n', outf);
842 }
843 if (first)
844 fputc ('\n', outf);
845 }
846 }
847
848 /* Dumps a brief description of cfg to FILE. */
849
850 void
851 brief_dump_cfg (FILE *file, dump_flags_t flags)
852 {
853 basic_block bb;
854
855 FOR_EACH_BB_FN (bb, cfun)
856 {
857 dump_bb_info (file, bb, 0, flags & TDF_DETAILS, true, true);
858 }
859 }
860
861 /* An edge originally destinating BB of COUNT has been proved to
862 leave the block by TAKEN_EDGE. Update profile of BB such that edge E can be
863 redirected to destination of TAKEN_EDGE.
864
865 This function may leave the profile inconsistent in the case TAKEN_EDGE
866 frequency or count is believed to be lower than COUNT
867 respectively. */
868 void
869 update_bb_profile_for_threading (basic_block bb,
870 profile_count count, edge taken_edge)
871 {
872 edge c;
873 profile_probability prob;
874 edge_iterator ei;
875
876 if (bb->count < count)
877 {
878 if (dump_file)
879 fprintf (dump_file, "bb %i count became negative after threading",
880 bb->index);
881 }
882 bb->count -= count;
883
884 /* Compute the probability of TAKEN_EDGE being reached via threaded edge.
885 Watch for overflows. */
886 if (bb->count.nonzero_p ())
887 prob = count.probability_in (bb->count);
888 else
889 prob = profile_probability::never ();
890 if (prob > taken_edge->probability)
891 {
892 if (dump_file)
893 {
894 fprintf (dump_file, "Jump threading proved probability of edge "
895 "%i->%i too small (it is ",
896 taken_edge->src->index, taken_edge->dest->index);
897 taken_edge->probability.dump (dump_file);
898 fprintf (dump_file, " should be ");
899 prob.dump (dump_file);
900 fprintf (dump_file, ")\n");
901 }
902 prob = taken_edge->probability.apply_scale (6, 8);
903 }
904
905 /* Now rescale the probabilities. */
906 taken_edge->probability -= prob;
907 prob = prob.invert ();
908 if (prob == profile_probability::never ())
909 {
910 if (dump_file)
911 fprintf (dump_file, "Edge probabilities of bb %i has been reset, "
912 "count of block should end up being 0, it is non-zero\n",
913 bb->index);
914 EDGE_SUCC (bb, 0)->probability = profile_probability::guessed_always ();
915 ei = ei_start (bb->succs);
916 ei_next (&ei);
917 for (; (c = ei_safe_edge (ei)); ei_next (&ei))
918 c->probability = profile_probability::guessed_never ();
919 }
920 else if (!(prob == profile_probability::always ()))
921 {
922 FOR_EACH_EDGE (c, ei, bb->succs)
923 c->probability /= prob;
924 }
925
926 gcc_assert (bb == taken_edge->src);
927 }
928
929 /* Multiply all frequencies of basic blocks in array BBS of length NBBS
930 by NUM/DEN, in profile_count arithmetic. More accurate than previous
931 function but considerably slower. */
932 void
933 scale_bbs_frequencies_profile_count (basic_block *bbs, int nbbs,
934 profile_count num, profile_count den)
935 {
936 int i;
937 if (num == profile_count::zero () || den.nonzero_p ())
938 for (i = 0; i < nbbs; i++)
939 bbs[i]->count = bbs[i]->count.apply_scale (num, den);
940 }
941
942 /* Multiply all frequencies of basic blocks in array BBS of length NBBS
943 by NUM/DEN, in profile_count arithmetic. More accurate than previous
944 function but considerably slower. */
945 void
946 scale_bbs_frequencies (basic_block *bbs, int nbbs,
947 profile_probability p)
948 {
949 int i;
950
951 for (i = 0; i < nbbs; i++)
952 bbs[i]->count = bbs[i]->count.apply_probability (p);
953 }
954
955 /* Helper types for hash tables. */
956
957 struct htab_bb_copy_original_entry
958 {
959 /* Block we are attaching info to. */
960 int index1;
961 /* Index of original or copy (depending on the hashtable) */
962 int index2;
963 };
964
965 struct bb_copy_hasher : nofree_ptr_hash <htab_bb_copy_original_entry>
966 {
967 static inline hashval_t hash (const htab_bb_copy_original_entry *);
968 static inline bool equal (const htab_bb_copy_original_entry *existing,
969 const htab_bb_copy_original_entry * candidate);
970 };
971
972 inline hashval_t
973 bb_copy_hasher::hash (const htab_bb_copy_original_entry *data)
974 {
975 return data->index1;
976 }
977
978 inline bool
979 bb_copy_hasher::equal (const htab_bb_copy_original_entry *data,
980 const htab_bb_copy_original_entry *data2)
981 {
982 return data->index1 == data2->index1;
983 }
984
985 /* Data structures used to maintain mapping between basic blocks and
986 copies. */
987 static hash_table<bb_copy_hasher> *bb_original;
988 static hash_table<bb_copy_hasher> *bb_copy;
989
990 /* And between loops and copies. */
991 static hash_table<bb_copy_hasher> *loop_copy;
992 static object_allocator<htab_bb_copy_original_entry> *original_copy_bb_pool;
993
994 /* Initialize the data structures to maintain mapping between blocks
995 and its copies. */
996 void
997 initialize_original_copy_tables (void)
998 {
999 original_copy_bb_pool = new object_allocator<htab_bb_copy_original_entry>
1000 ("original_copy");
1001 bb_original = new hash_table<bb_copy_hasher> (10);
1002 bb_copy = new hash_table<bb_copy_hasher> (10);
1003 loop_copy = new hash_table<bb_copy_hasher> (10);
1004 }
1005
1006 /* Reset the data structures to maintain mapping between blocks and
1007 its copies. */
1008
1009 void
1010 reset_original_copy_tables (void)
1011 {
1012 gcc_assert (original_copy_bb_pool);
1013 bb_original->empty ();
1014 bb_copy->empty ();
1015 loop_copy->empty ();
1016 }
1017
1018 /* Free the data structures to maintain mapping between blocks and
1019 its copies. */
1020 void
1021 free_original_copy_tables (void)
1022 {
1023 gcc_assert (original_copy_bb_pool);
1024 delete bb_copy;
1025 bb_copy = NULL;
1026 delete bb_original;
1027 bb_original = NULL;
1028 delete loop_copy;
1029 loop_copy = NULL;
1030 delete original_copy_bb_pool;
1031 original_copy_bb_pool = NULL;
1032 }
1033
1034 /* Return true iff we have had a call to initialize_original_copy_tables
1035 without a corresponding call to free_original_copy_tables. */
1036
1037 bool
1038 original_copy_tables_initialized_p (void)
1039 {
1040 return original_copy_bb_pool != NULL;
1041 }
1042
1043 /* Removes the value associated with OBJ from table TAB. */
1044
1045 static void
1046 copy_original_table_clear (hash_table<bb_copy_hasher> *tab, unsigned obj)
1047 {
1048 htab_bb_copy_original_entry **slot;
1049 struct htab_bb_copy_original_entry key, *elt;
1050
1051 if (!original_copy_bb_pool)
1052 return;
1053
1054 key.index1 = obj;
1055 slot = tab->find_slot (&key, NO_INSERT);
1056 if (!slot)
1057 return;
1058
1059 elt = *slot;
1060 tab->clear_slot (slot);
1061 original_copy_bb_pool->remove (elt);
1062 }
1063
1064 /* Sets the value associated with OBJ in table TAB to VAL.
1065 Do nothing when data structures are not initialized. */
1066
1067 static void
1068 copy_original_table_set (hash_table<bb_copy_hasher> *tab,
1069 unsigned obj, unsigned val)
1070 {
1071 struct htab_bb_copy_original_entry **slot;
1072 struct htab_bb_copy_original_entry key;
1073
1074 if (!original_copy_bb_pool)
1075 return;
1076
1077 key.index1 = obj;
1078 slot = tab->find_slot (&key, INSERT);
1079 if (!*slot)
1080 {
1081 *slot = original_copy_bb_pool->allocate ();
1082 (*slot)->index1 = obj;
1083 }
1084 (*slot)->index2 = val;
1085 }
1086
1087 /* Set original for basic block. Do nothing when data structures are not
1088 initialized so passes not needing this don't need to care. */
1089 void
1090 set_bb_original (basic_block bb, basic_block original)
1091 {
1092 copy_original_table_set (bb_original, bb->index, original->index);
1093 }
1094
1095 /* Get the original basic block. */
1096 basic_block
1097 get_bb_original (basic_block bb)
1098 {
1099 struct htab_bb_copy_original_entry *entry;
1100 struct htab_bb_copy_original_entry key;
1101
1102 gcc_assert (original_copy_bb_pool);
1103
1104 key.index1 = bb->index;
1105 entry = bb_original->find (&key);
1106 if (entry)
1107 return BASIC_BLOCK_FOR_FN (cfun, entry->index2);
1108 else
1109 return NULL;
1110 }
1111
1112 /* Set copy for basic block. Do nothing when data structures are not
1113 initialized so passes not needing this don't need to care. */
1114 void
1115 set_bb_copy (basic_block bb, basic_block copy)
1116 {
1117 copy_original_table_set (bb_copy, bb->index, copy->index);
1118 }
1119
1120 /* Get the copy of basic block. */
1121 basic_block
1122 get_bb_copy (basic_block bb)
1123 {
1124 struct htab_bb_copy_original_entry *entry;
1125 struct htab_bb_copy_original_entry key;
1126
1127 gcc_assert (original_copy_bb_pool);
1128
1129 key.index1 = bb->index;
1130 entry = bb_copy->find (&key);
1131 if (entry)
1132 return BASIC_BLOCK_FOR_FN (cfun, entry->index2);
1133 else
1134 return NULL;
1135 }
1136
1137 /* Set copy for LOOP to COPY. Do nothing when data structures are not
1138 initialized so passes not needing this don't need to care. */
1139
1140 void
1141 set_loop_copy (struct loop *loop, struct loop *copy)
1142 {
1143 if (!copy)
1144 copy_original_table_clear (loop_copy, loop->num);
1145 else
1146 copy_original_table_set (loop_copy, loop->num, copy->num);
1147 }
1148
1149 /* Get the copy of LOOP. */
1150
1151 struct loop *
1152 get_loop_copy (struct loop *loop)
1153 {
1154 struct htab_bb_copy_original_entry *entry;
1155 struct htab_bb_copy_original_entry key;
1156
1157 gcc_assert (original_copy_bb_pool);
1158
1159 key.index1 = loop->num;
1160 entry = loop_copy->find (&key);
1161 if (entry)
1162 return get_loop (cfun, entry->index2);
1163 else
1164 return NULL;
1165 }