einfo.ads (Is_Atomic_Or_VFA): Move to XEINFO INLINES section.
[gcc.git] / gcc / cfg.c
1 /* Control flow graph manipulation code for GNU compiler.
2 Copyright (C) 1987-2015 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 /* This file contains low level functions to manipulate the CFG and
21 analyze it. All other modules should not transform the data structure
22 directly and use abstraction instead. The file is supposed to be
23 ordered bottom-up and should not contain any code dependent on a
24 particular intermediate language (RTL or trees).
25
26 Available functionality:
27 - Initialization/deallocation
28 init_flow, clear_edges
29 - Low level basic block manipulation
30 alloc_block, expunge_block
31 - Edge manipulation
32 make_edge, make_single_succ_edge, cached_make_edge, remove_edge
33 - Low level edge redirection (without updating instruction chain)
34 redirect_edge_succ, redirect_edge_succ_nodup, redirect_edge_pred
35 - Dumping and debugging
36 dump_flow_info, debug_flow_info, dump_edge_info
37 - Allocation of AUX fields for basic blocks
38 alloc_aux_for_blocks, free_aux_for_blocks, alloc_aux_for_block
39 - clear_bb_flags
40 - Consistency checking
41 verify_flow_info
42 - Dumping and debugging
43 print_rtl_with_bb, dump_bb, debug_bb, debug_bb_n
44
45 TODO: Document these "Available functionality" functions in the files
46 that implement them.
47 */
48 \f
49 #include "config.h"
50 #include "system.h"
51 #include "coretypes.h"
52 #include "obstack.h"
53 #include "ggc.h"
54 #include "hash-table.h"
55 #include "alloc-pool.h"
56 #include "hash-set.h"
57 #include "machmode.h"
58 #include "vec.h"
59 #include "double-int.h"
60 #include "input.h"
61 #include "alias.h"
62 #include "symtab.h"
63 #include "options.h"
64 #include "wide-int.h"
65 #include "inchash.h"
66 #include "tree.h"
67 #include "predict.h"
68 #include "vec.h"
69 #include "hashtab.h"
70 #include "hash-set.h"
71 #include "machmode.h"
72 #include "tm.h"
73 #include "hard-reg-set.h"
74 #include "input.h"
75 #include "function.h"
76 #include "dominance.h"
77 #include "cfg.h"
78 #include "cfganal.h"
79 #include "basic-block.h"
80 #include "df.h"
81 #include "cfgloop.h" /* FIXME: For struct loop. */
82 #include "dumpfile.h"
83
84 \f
85 #define RDIV(X,Y) (((X) + (Y) / 2) / (Y))
86
87 /* Called once at initialization time. */
88
89 void
90 init_flow (struct function *the_fun)
91 {
92 if (!the_fun->cfg)
93 the_fun->cfg = ggc_cleared_alloc<control_flow_graph> ();
94 n_edges_for_fn (the_fun) = 0;
95 ENTRY_BLOCK_PTR_FOR_FN (the_fun)
96 = ggc_cleared_alloc<basic_block_def> ();
97 ENTRY_BLOCK_PTR_FOR_FN (the_fun)->index = ENTRY_BLOCK;
98 EXIT_BLOCK_PTR_FOR_FN (the_fun)
99 = ggc_cleared_alloc<basic_block_def> ();
100 EXIT_BLOCK_PTR_FOR_FN (the_fun)->index = EXIT_BLOCK;
101 ENTRY_BLOCK_PTR_FOR_FN (the_fun)->next_bb
102 = EXIT_BLOCK_PTR_FOR_FN (the_fun);
103 EXIT_BLOCK_PTR_FOR_FN (the_fun)->prev_bb
104 = ENTRY_BLOCK_PTR_FOR_FN (the_fun);
105 }
106 \f
107 /* Helper function for remove_edge and clear_edges. Frees edge structure
108 without actually removing it from the pred/succ arrays. */
109
110 static void
111 free_edge (edge e)
112 {
113 n_edges_for_fn (cfun)--;
114 ggc_free (e);
115 }
116
117 /* Free the memory associated with the edge structures. */
118
119 void
120 clear_edges (void)
121 {
122 basic_block bb;
123 edge e;
124 edge_iterator ei;
125
126 FOR_EACH_BB_FN (bb, cfun)
127 {
128 FOR_EACH_EDGE (e, ei, bb->succs)
129 free_edge (e);
130 vec_safe_truncate (bb->succs, 0);
131 vec_safe_truncate (bb->preds, 0);
132 }
133
134 FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs)
135 free_edge (e);
136 vec_safe_truncate (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds, 0);
137 vec_safe_truncate (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs, 0);
138
139 gcc_assert (!n_edges_for_fn (cfun));
140 }
141 \f
142 /* Allocate memory for basic_block. */
143
144 basic_block
145 alloc_block (void)
146 {
147 basic_block bb;
148 bb = ggc_cleared_alloc<basic_block_def> ();
149 return bb;
150 }
151
152 /* Link block B to chain after AFTER. */
153 void
154 link_block (basic_block b, basic_block after)
155 {
156 b->next_bb = after->next_bb;
157 b->prev_bb = after;
158 after->next_bb = b;
159 b->next_bb->prev_bb = b;
160 }
161
162 /* Unlink block B from chain. */
163 void
164 unlink_block (basic_block b)
165 {
166 b->next_bb->prev_bb = b->prev_bb;
167 b->prev_bb->next_bb = b->next_bb;
168 b->prev_bb = NULL;
169 b->next_bb = NULL;
170 }
171
172 /* Sequentially order blocks and compact the arrays. */
173 void
174 compact_blocks (void)
175 {
176 int i;
177
178 SET_BASIC_BLOCK_FOR_FN (cfun, ENTRY_BLOCK, ENTRY_BLOCK_PTR_FOR_FN (cfun));
179 SET_BASIC_BLOCK_FOR_FN (cfun, EXIT_BLOCK, EXIT_BLOCK_PTR_FOR_FN (cfun));
180
181 if (df)
182 df_compact_blocks ();
183 else
184 {
185 basic_block bb;
186
187 i = NUM_FIXED_BLOCKS;
188 FOR_EACH_BB_FN (bb, cfun)
189 {
190 SET_BASIC_BLOCK_FOR_FN (cfun, i, bb);
191 bb->index = i;
192 i++;
193 }
194 gcc_assert (i == n_basic_blocks_for_fn (cfun));
195
196 for (; i < last_basic_block_for_fn (cfun); i++)
197 SET_BASIC_BLOCK_FOR_FN (cfun, i, NULL);
198 }
199 last_basic_block_for_fn (cfun) = n_basic_blocks_for_fn (cfun);
200 }
201
202 /* Remove block B from the basic block array. */
203
204 void
205 expunge_block (basic_block b)
206 {
207 unlink_block (b);
208 SET_BASIC_BLOCK_FOR_FN (cfun, b->index, NULL);
209 n_basic_blocks_for_fn (cfun)--;
210 /* We should be able to ggc_free here, but we are not.
211 The dead SSA_NAMES are left pointing to dead statements that are pointing
212 to dead basic blocks making garbage collector to die.
213 We should be able to release all dead SSA_NAMES and at the same time we should
214 clear out BB pointer of dead statements consistently. */
215 }
216 \f
217 /* Connect E to E->src. */
218
219 static inline void
220 connect_src (edge e)
221 {
222 vec_safe_push (e->src->succs, e);
223 df_mark_solutions_dirty ();
224 }
225
226 /* Connect E to E->dest. */
227
228 static inline void
229 connect_dest (edge e)
230 {
231 basic_block dest = e->dest;
232 vec_safe_push (dest->preds, e);
233 e->dest_idx = EDGE_COUNT (dest->preds) - 1;
234 df_mark_solutions_dirty ();
235 }
236
237 /* Disconnect edge E from E->src. */
238
239 static inline void
240 disconnect_src (edge e)
241 {
242 basic_block src = e->src;
243 edge_iterator ei;
244 edge tmp;
245
246 for (ei = ei_start (src->succs); (tmp = ei_safe_edge (ei)); )
247 {
248 if (tmp == e)
249 {
250 src->succs->unordered_remove (ei.index);
251 df_mark_solutions_dirty ();
252 return;
253 }
254 else
255 ei_next (&ei);
256 }
257
258 gcc_unreachable ();
259 }
260
261 /* Disconnect edge E from E->dest. */
262
263 static inline void
264 disconnect_dest (edge e)
265 {
266 basic_block dest = e->dest;
267 unsigned int dest_idx = e->dest_idx;
268
269 dest->preds->unordered_remove (dest_idx);
270
271 /* If we removed an edge in the middle of the edge vector, we need
272 to update dest_idx of the edge that moved into the "hole". */
273 if (dest_idx < EDGE_COUNT (dest->preds))
274 EDGE_PRED (dest, dest_idx)->dest_idx = dest_idx;
275 df_mark_solutions_dirty ();
276 }
277
278 /* Create an edge connecting SRC and DEST with flags FLAGS. Return newly
279 created edge. Use this only if you are sure that this edge can't
280 possibly already exist. */
281
282 edge
283 unchecked_make_edge (basic_block src, basic_block dst, int flags)
284 {
285 edge e;
286 e = ggc_cleared_alloc<edge_def> ();
287 n_edges_for_fn (cfun)++;
288
289 e->src = src;
290 e->dest = dst;
291 e->flags = flags;
292
293 connect_src (e);
294 connect_dest (e);
295
296 execute_on_growing_pred (e);
297 return e;
298 }
299
300 /* Create an edge connecting SRC and DST with FLAGS optionally using
301 edge cache CACHE. Return the new edge, NULL if already exist. */
302
303 edge
304 cached_make_edge (sbitmap edge_cache, basic_block src, basic_block dst, int flags)
305 {
306 if (edge_cache == NULL
307 || src == ENTRY_BLOCK_PTR_FOR_FN (cfun)
308 || dst == EXIT_BLOCK_PTR_FOR_FN (cfun))
309 return make_edge (src, dst, flags);
310
311 /* Does the requested edge already exist? */
312 if (! bitmap_bit_p (edge_cache, dst->index))
313 {
314 /* The edge does not exist. Create one and update the
315 cache. */
316 bitmap_set_bit (edge_cache, dst->index);
317 return unchecked_make_edge (src, dst, flags);
318 }
319
320 /* At this point, we know that the requested edge exists. Adjust
321 flags if necessary. */
322 if (flags)
323 {
324 edge e = find_edge (src, dst);
325 e->flags |= flags;
326 }
327
328 return NULL;
329 }
330
331 /* Create an edge connecting SRC and DEST with flags FLAGS. Return newly
332 created edge or NULL if already exist. */
333
334 edge
335 make_edge (basic_block src, basic_block dest, int flags)
336 {
337 edge e = find_edge (src, dest);
338
339 /* Make sure we don't add duplicate edges. */
340 if (e)
341 {
342 e->flags |= flags;
343 return NULL;
344 }
345
346 return unchecked_make_edge (src, dest, flags);
347 }
348
349 /* Create an edge connecting SRC to DEST and set probability by knowing
350 that it is the single edge leaving SRC. */
351
352 edge
353 make_single_succ_edge (basic_block src, basic_block dest, int flags)
354 {
355 edge e = make_edge (src, dest, flags);
356
357 e->probability = REG_BR_PROB_BASE;
358 e->count = src->count;
359 return e;
360 }
361
362 /* This function will remove an edge from the flow graph. */
363
364 void
365 remove_edge_raw (edge e)
366 {
367 remove_predictions_associated_with_edge (e);
368 execute_on_shrinking_pred (e);
369
370 disconnect_src (e);
371 disconnect_dest (e);
372
373 free_edge (e);
374 }
375
376 /* Redirect an edge's successor from one block to another. */
377
378 void
379 redirect_edge_succ (edge e, basic_block new_succ)
380 {
381 execute_on_shrinking_pred (e);
382
383 disconnect_dest (e);
384
385 e->dest = new_succ;
386
387 /* Reconnect the edge to the new successor block. */
388 connect_dest (e);
389
390 execute_on_growing_pred (e);
391 }
392
393 /* Redirect an edge's predecessor from one block to another. */
394
395 void
396 redirect_edge_pred (edge e, basic_block new_pred)
397 {
398 disconnect_src (e);
399
400 e->src = new_pred;
401
402 /* Reconnect the edge to the new predecessor block. */
403 connect_src (e);
404 }
405
406 /* Clear all basic block flags that do not have to be preserved. */
407 void
408 clear_bb_flags (void)
409 {
410 basic_block bb;
411
412 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, next_bb)
413 bb->flags &= BB_FLAGS_TO_PRESERVE;
414 }
415 \f
416 /* Check the consistency of profile information. We can't do that
417 in verify_flow_info, as the counts may get invalid for incompletely
418 solved graphs, later eliminating of conditionals or roundoff errors.
419 It is still practical to have them reported for debugging of simple
420 testcases. */
421 static void
422 check_bb_profile (basic_block bb, FILE * file, int indent, int flags)
423 {
424 edge e;
425 int sum = 0;
426 gcov_type lsum;
427 edge_iterator ei;
428 struct function *fun = DECL_STRUCT_FUNCTION (current_function_decl);
429 char *s_indent = (char *) alloca ((size_t) indent + 1);
430 memset ((void *) s_indent, ' ', (size_t) indent);
431 s_indent[indent] = '\0';
432
433 if (profile_status_for_fn (fun) == PROFILE_ABSENT)
434 return;
435
436 if (bb != EXIT_BLOCK_PTR_FOR_FN (fun))
437 {
438 FOR_EACH_EDGE (e, ei, bb->succs)
439 sum += e->probability;
440 if (EDGE_COUNT (bb->succs) && abs (sum - REG_BR_PROB_BASE) > 100)
441 fprintf (file, "%s%sInvalid sum of outgoing probabilities %.1f%%\n",
442 (flags & TDF_COMMENT) ? ";; " : "", s_indent,
443 sum * 100.0 / REG_BR_PROB_BASE);
444 lsum = 0;
445 FOR_EACH_EDGE (e, ei, bb->succs)
446 lsum += e->count;
447 if (EDGE_COUNT (bb->succs)
448 && (lsum - bb->count > 100 || lsum - bb->count < -100))
449 fprintf (file, "%s%sInvalid sum of outgoing counts %i, should be %i\n",
450 (flags & TDF_COMMENT) ? ";; " : "", s_indent,
451 (int) lsum, (int) bb->count);
452 }
453 if (bb != ENTRY_BLOCK_PTR_FOR_FN (fun))
454 {
455 sum = 0;
456 FOR_EACH_EDGE (e, ei, bb->preds)
457 sum += EDGE_FREQUENCY (e);
458 if (abs (sum - bb->frequency) > 100)
459 fprintf (file,
460 "%s%sInvalid sum of incoming frequencies %i, should be %i\n",
461 (flags & TDF_COMMENT) ? ";; " : "", s_indent,
462 sum, bb->frequency);
463 lsum = 0;
464 FOR_EACH_EDGE (e, ei, bb->preds)
465 lsum += e->count;
466 if (lsum - bb->count > 100 || lsum - bb->count < -100)
467 fprintf (file, "%s%sInvalid sum of incoming counts %i, should be %i\n",
468 (flags & TDF_COMMENT) ? ";; " : "", s_indent,
469 (int) lsum, (int) bb->count);
470 }
471 if (BB_PARTITION (bb) == BB_COLD_PARTITION)
472 {
473 /* Warn about inconsistencies in the partitioning that are
474 currently caused by profile insanities created via optimization. */
475 if (!probably_never_executed_bb_p (fun, bb))
476 fprintf (file, "%s%sBlock in cold partition with hot count\n",
477 (flags & TDF_COMMENT) ? ";; " : "", s_indent);
478 FOR_EACH_EDGE (e, ei, bb->preds)
479 {
480 if (!probably_never_executed_edge_p (fun, e))
481 fprintf (file,
482 "%s%sBlock in cold partition with incoming hot edge\n",
483 (flags & TDF_COMMENT) ? ";; " : "", s_indent);
484 }
485 }
486 }
487 \f
488 void
489 dump_edge_info (FILE *file, edge e, int flags, int do_succ)
490 {
491 basic_block side = (do_succ ? e->dest : e->src);
492 bool do_details = false;
493
494 if ((flags & TDF_DETAILS) != 0
495 && (flags & TDF_SLIM) == 0)
496 do_details = true;
497
498 if (side->index == ENTRY_BLOCK)
499 fputs (" ENTRY", file);
500 else if (side->index == EXIT_BLOCK)
501 fputs (" EXIT", file);
502 else
503 fprintf (file, " %d", side->index);
504
505 if (e->probability && do_details)
506 fprintf (file, " [%.1f%%] ", e->probability * 100.0 / REG_BR_PROB_BASE);
507
508 if (e->count && do_details)
509 {
510 fputs (" count:", file);
511 fprintf (file, "%" PRId64, e->count);
512 }
513
514 if (e->flags && do_details)
515 {
516 static const char * const bitnames[] =
517 {
518 #define DEF_EDGE_FLAG(NAME,IDX) #NAME ,
519 #include "cfg-flags.def"
520 NULL
521 #undef DEF_EDGE_FLAG
522 };
523 bool comma = false;
524 int i, flags = e->flags;
525
526 gcc_assert (e->flags <= EDGE_ALL_FLAGS);
527 fputs (" (", file);
528 for (i = 0; flags; i++)
529 if (flags & (1 << i))
530 {
531 flags &= ~(1 << i);
532
533 if (comma)
534 fputc (',', file);
535 fputs (bitnames[i], file);
536 comma = true;
537 }
538
539 fputc (')', file);
540 }
541 }
542
543 DEBUG_FUNCTION void
544 debug (edge_def &ref)
545 {
546 /* FIXME (crowl): Is this desireable? */
547 dump_edge_info (stderr, &ref, 0, false);
548 dump_edge_info (stderr, &ref, 0, true);
549 }
550
551 DEBUG_FUNCTION void
552 debug (edge_def *ptr)
553 {
554 if (ptr)
555 debug (*ptr);
556 else
557 fprintf (stderr, "<nil>\n");
558 }
559 \f
560 /* Simple routines to easily allocate AUX fields of basic blocks. */
561
562 static struct obstack block_aux_obstack;
563 static void *first_block_aux_obj = 0;
564 static struct obstack edge_aux_obstack;
565 static void *first_edge_aux_obj = 0;
566
567 /* Allocate a memory block of SIZE as BB->aux. The obstack must
568 be first initialized by alloc_aux_for_blocks. */
569
570 static void
571 alloc_aux_for_block (basic_block bb, int size)
572 {
573 /* Verify that aux field is clear. */
574 gcc_assert (!bb->aux && first_block_aux_obj);
575 bb->aux = obstack_alloc (&block_aux_obstack, size);
576 memset (bb->aux, 0, size);
577 }
578
579 /* Initialize the block_aux_obstack and if SIZE is nonzero, call
580 alloc_aux_for_block for each basic block. */
581
582 void
583 alloc_aux_for_blocks (int size)
584 {
585 static int initialized;
586
587 if (!initialized)
588 {
589 gcc_obstack_init (&block_aux_obstack);
590 initialized = 1;
591 }
592 else
593 /* Check whether AUX data are still allocated. */
594 gcc_assert (!first_block_aux_obj);
595
596 first_block_aux_obj = obstack_alloc (&block_aux_obstack, 0);
597 if (size)
598 {
599 basic_block bb;
600
601 FOR_ALL_BB_FN (bb, cfun)
602 alloc_aux_for_block (bb, size);
603 }
604 }
605
606 /* Clear AUX pointers of all blocks. */
607
608 void
609 clear_aux_for_blocks (void)
610 {
611 basic_block bb;
612
613 FOR_ALL_BB_FN (bb, cfun)
614 bb->aux = NULL;
615 }
616
617 /* Free data allocated in block_aux_obstack and clear AUX pointers
618 of all blocks. */
619
620 void
621 free_aux_for_blocks (void)
622 {
623 gcc_assert (first_block_aux_obj);
624 obstack_free (&block_aux_obstack, first_block_aux_obj);
625 first_block_aux_obj = NULL;
626
627 clear_aux_for_blocks ();
628 }
629
630 /* Allocate a memory edge of SIZE as E->aux. The obstack must
631 be first initialized by alloc_aux_for_edges. */
632
633 void
634 alloc_aux_for_edge (edge e, int size)
635 {
636 /* Verify that aux field is clear. */
637 gcc_assert (!e->aux && first_edge_aux_obj);
638 e->aux = obstack_alloc (&edge_aux_obstack, size);
639 memset (e->aux, 0, size);
640 }
641
642 /* Initialize the edge_aux_obstack and if SIZE is nonzero, call
643 alloc_aux_for_edge for each basic edge. */
644
645 void
646 alloc_aux_for_edges (int size)
647 {
648 static int initialized;
649
650 if (!initialized)
651 {
652 gcc_obstack_init (&edge_aux_obstack);
653 initialized = 1;
654 }
655 else
656 /* Check whether AUX data are still allocated. */
657 gcc_assert (!first_edge_aux_obj);
658
659 first_edge_aux_obj = obstack_alloc (&edge_aux_obstack, 0);
660 if (size)
661 {
662 basic_block bb;
663
664 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun),
665 EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
666 {
667 edge e;
668 edge_iterator ei;
669
670 FOR_EACH_EDGE (e, ei, bb->succs)
671 alloc_aux_for_edge (e, size);
672 }
673 }
674 }
675
676 /* Clear AUX pointers of all edges. */
677
678 void
679 clear_aux_for_edges (void)
680 {
681 basic_block bb;
682 edge e;
683
684 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun),
685 EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
686 {
687 edge_iterator ei;
688 FOR_EACH_EDGE (e, ei, bb->succs)
689 e->aux = NULL;
690 }
691 }
692
693 /* Free data allocated in edge_aux_obstack and clear AUX pointers
694 of all edges. */
695
696 void
697 free_aux_for_edges (void)
698 {
699 gcc_assert (first_edge_aux_obj);
700 obstack_free (&edge_aux_obstack, first_edge_aux_obj);
701 first_edge_aux_obj = NULL;
702
703 clear_aux_for_edges ();
704 }
705
706 DEBUG_FUNCTION void
707 debug_bb (basic_block bb)
708 {
709 dump_bb (stderr, bb, 0, dump_flags);
710 }
711
712 DEBUG_FUNCTION basic_block
713 debug_bb_n (int n)
714 {
715 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, n);
716 debug_bb (bb);
717 return bb;
718 }
719
720 /* Dumps cfg related information about basic block BB to OUTF.
721 If HEADER is true, dump things that appear before the instructions
722 contained in BB. If FOOTER is true, dump things that appear after.
723 Flags are the TDF_* masks as documented in dumpfile.h.
724 NB: With TDF_DETAILS, it is assumed that cfun is available, so
725 that maybe_hot_bb_p and probably_never_executed_bb_p don't ICE. */
726
727 void
728 dump_bb_info (FILE *outf, basic_block bb, int indent, int flags,
729 bool do_header, bool do_footer)
730 {
731 edge_iterator ei;
732 edge e;
733 static const char * const bb_bitnames[] =
734 {
735 #define DEF_BASIC_BLOCK_FLAG(NAME,IDX) #NAME ,
736 #include "cfg-flags.def"
737 NULL
738 #undef DEF_BASIC_BLOCK_FLAG
739 };
740 const unsigned n_bitnames = sizeof (bb_bitnames) / sizeof (char *);
741 bool first;
742 char *s_indent = (char *) alloca ((size_t) indent + 1);
743 memset ((void *) s_indent, ' ', (size_t) indent);
744 s_indent[indent] = '\0';
745
746 gcc_assert (bb->flags <= BB_ALL_FLAGS);
747
748 if (do_header)
749 {
750 unsigned i;
751
752 if (flags & TDF_COMMENT)
753 fputs (";; ", outf);
754 fprintf (outf, "%sbasic block %d, loop depth %d",
755 s_indent, bb->index, bb_loop_depth (bb));
756 if (flags & TDF_DETAILS)
757 {
758 struct function *fun = DECL_STRUCT_FUNCTION (current_function_decl);
759 fprintf (outf, ", count " "%" PRId64,
760 (int64_t) bb->count);
761 fprintf (outf, ", freq %i", bb->frequency);
762 if (maybe_hot_bb_p (fun, bb))
763 fputs (", maybe hot", outf);
764 if (probably_never_executed_bb_p (fun, bb))
765 fputs (", probably never executed", outf);
766 }
767 fputc ('\n', outf);
768
769 if (flags & TDF_DETAILS)
770 {
771 check_bb_profile (bb, outf, indent, flags);
772 if (flags & TDF_COMMENT)
773 fputs (";; ", outf);
774 fprintf (outf, "%s prev block ", s_indent);
775 if (bb->prev_bb)
776 fprintf (outf, "%d", bb->prev_bb->index);
777 else
778 fprintf (outf, "(nil)");
779 fprintf (outf, ", next block ");
780 if (bb->next_bb)
781 fprintf (outf, "%d", bb->next_bb->index);
782 else
783 fprintf (outf, "(nil)");
784
785 fputs (", flags:", outf);
786 first = true;
787 for (i = 0; i < n_bitnames; i++)
788 if (bb->flags & (1 << i))
789 {
790 if (first)
791 fputs (" (", outf);
792 else
793 fputs (", ", outf);
794 first = false;
795 fputs (bb_bitnames[i], outf);
796 }
797 if (!first)
798 fputc (')', outf);
799 fputc ('\n', outf);
800 }
801
802 if (flags & TDF_COMMENT)
803 fputs (";; ", outf);
804 fprintf (outf, "%s pred: ", s_indent);
805 first = true;
806 FOR_EACH_EDGE (e, ei, bb->preds)
807 {
808 if (! first)
809 {
810 if (flags & TDF_COMMENT)
811 fputs (";; ", outf);
812 fprintf (outf, "%s ", s_indent);
813 }
814 first = false;
815 dump_edge_info (outf, e, flags, 0);
816 fputc ('\n', outf);
817 }
818 if (first)
819 fputc ('\n', outf);
820 }
821
822 if (do_footer)
823 {
824 if (flags & TDF_COMMENT)
825 fputs (";; ", outf);
826 fprintf (outf, "%s succ: ", s_indent);
827 first = true;
828 FOR_EACH_EDGE (e, ei, bb->succs)
829 {
830 if (! first)
831 {
832 if (flags & TDF_COMMENT)
833 fputs (";; ", outf);
834 fprintf (outf, "%s ", s_indent);
835 }
836 first = false;
837 dump_edge_info (outf, e, flags, 1);
838 fputc ('\n', outf);
839 }
840 if (first)
841 fputc ('\n', outf);
842 }
843 }
844
845 /* Dumps a brief description of cfg to FILE. */
846
847 void
848 brief_dump_cfg (FILE *file, int flags)
849 {
850 basic_block bb;
851
852 FOR_EACH_BB_FN (bb, cfun)
853 {
854 dump_bb_info (file, bb, 0,
855 flags & (TDF_COMMENT | TDF_DETAILS),
856 true, true);
857 }
858 }
859
860 /* An edge originally destinating BB of FREQUENCY and COUNT has been proved to
861 leave the block by TAKEN_EDGE. Update profile of BB such that edge E can be
862 redirected to destination of TAKEN_EDGE.
863
864 This function may leave the profile inconsistent in the case TAKEN_EDGE
865 frequency or count is believed to be lower than FREQUENCY or COUNT
866 respectively. */
867 void
868 update_bb_profile_for_threading (basic_block bb, int edge_frequency,
869 gcov_type count, edge taken_edge)
870 {
871 edge c;
872 int prob;
873 edge_iterator ei;
874
875 bb->count -= count;
876 if (bb->count < 0)
877 {
878 if (dump_file)
879 fprintf (dump_file, "bb %i count became negative after threading",
880 bb->index);
881 bb->count = 0;
882 }
883
884 /* Compute the probability of TAKEN_EDGE being reached via threaded edge.
885 Watch for overflows. */
886 if (bb->frequency)
887 prob = GCOV_COMPUTE_SCALE (edge_frequency, bb->frequency);
888 else
889 prob = 0;
890 if (prob > taken_edge->probability)
891 {
892 if (dump_file)
893 fprintf (dump_file, "Jump threading proved probability of edge "
894 "%i->%i too small (it is %i, should be %i).\n",
895 taken_edge->src->index, taken_edge->dest->index,
896 taken_edge->probability, prob);
897 prob = taken_edge->probability;
898 }
899
900 /* Now rescale the probabilities. */
901 taken_edge->probability -= prob;
902 prob = REG_BR_PROB_BASE - prob;
903 bb->frequency -= edge_frequency;
904 if (bb->frequency < 0)
905 bb->frequency = 0;
906 if (prob <= 0)
907 {
908 if (dump_file)
909 fprintf (dump_file, "Edge frequencies of bb %i has been reset, "
910 "frequency of block should end up being 0, it is %i\n",
911 bb->index, bb->frequency);
912 EDGE_SUCC (bb, 0)->probability = REG_BR_PROB_BASE;
913 ei = ei_start (bb->succs);
914 ei_next (&ei);
915 for (; (c = ei_safe_edge (ei)); ei_next (&ei))
916 c->probability = 0;
917 }
918 else if (prob != REG_BR_PROB_BASE)
919 {
920 int scale = RDIV (65536 * REG_BR_PROB_BASE, prob);
921
922 FOR_EACH_EDGE (c, ei, bb->succs)
923 {
924 /* Protect from overflow due to additional scaling. */
925 if (c->probability > prob)
926 c->probability = REG_BR_PROB_BASE;
927 else
928 {
929 c->probability = RDIV (c->probability * scale, 65536);
930 if (c->probability > REG_BR_PROB_BASE)
931 c->probability = REG_BR_PROB_BASE;
932 }
933 }
934 }
935
936 gcc_assert (bb == taken_edge->src);
937 taken_edge->count -= count;
938 if (taken_edge->count < 0)
939 {
940 if (dump_file)
941 fprintf (dump_file, "edge %i->%i count became negative after threading",
942 taken_edge->src->index, taken_edge->dest->index);
943 taken_edge->count = 0;
944 }
945 }
946
947 /* Multiply all frequencies of basic blocks in array BBS of length NBBS
948 by NUM/DEN, in int arithmetic. May lose some accuracy. */
949 void
950 scale_bbs_frequencies_int (basic_block *bbs, int nbbs, int num, int den)
951 {
952 int i;
953 edge e;
954 if (num < 0)
955 num = 0;
956
957 /* Scale NUM and DEN to avoid overflows. Frequencies are in order of
958 10^4, if we make DEN <= 10^3, we can afford to upscale by 100
959 and still safely fit in int during calculations. */
960 if (den > 1000)
961 {
962 if (num > 1000000)
963 return;
964
965 num = RDIV (1000 * num, den);
966 den = 1000;
967 }
968 if (num > 100 * den)
969 return;
970
971 for (i = 0; i < nbbs; i++)
972 {
973 edge_iterator ei;
974 bbs[i]->frequency = RDIV (bbs[i]->frequency * num, den);
975 /* Make sure the frequencies do not grow over BB_FREQ_MAX. */
976 if (bbs[i]->frequency > BB_FREQ_MAX)
977 bbs[i]->frequency = BB_FREQ_MAX;
978 bbs[i]->count = RDIV (bbs[i]->count * num, den);
979 FOR_EACH_EDGE (e, ei, bbs[i]->succs)
980 e->count = RDIV (e->count * num, den);
981 }
982 }
983
984 /* numbers smaller than this value are safe to multiply without getting
985 64bit overflow. */
986 #define MAX_SAFE_MULTIPLIER (1 << (sizeof (int64_t) * 4 - 1))
987
988 /* Multiply all frequencies of basic blocks in array BBS of length NBBS
989 by NUM/DEN, in gcov_type arithmetic. More accurate than previous
990 function but considerably slower. */
991 void
992 scale_bbs_frequencies_gcov_type (basic_block *bbs, int nbbs, gcov_type num,
993 gcov_type den)
994 {
995 int i;
996 edge e;
997 gcov_type fraction = RDIV (num * 65536, den);
998
999 gcc_assert (fraction >= 0);
1000
1001 if (num < MAX_SAFE_MULTIPLIER)
1002 for (i = 0; i < nbbs; i++)
1003 {
1004 edge_iterator ei;
1005 bbs[i]->frequency = RDIV (bbs[i]->frequency * num, den);
1006 if (bbs[i]->count <= MAX_SAFE_MULTIPLIER)
1007 bbs[i]->count = RDIV (bbs[i]->count * num, den);
1008 else
1009 bbs[i]->count = RDIV (bbs[i]->count * fraction, 65536);
1010 FOR_EACH_EDGE (e, ei, bbs[i]->succs)
1011 if (bbs[i]->count <= MAX_SAFE_MULTIPLIER)
1012 e->count = RDIV (e->count * num, den);
1013 else
1014 e->count = RDIV (e->count * fraction, 65536);
1015 }
1016 else
1017 for (i = 0; i < nbbs; i++)
1018 {
1019 edge_iterator ei;
1020 if (sizeof (gcov_type) > sizeof (int))
1021 bbs[i]->frequency = RDIV (bbs[i]->frequency * num, den);
1022 else
1023 bbs[i]->frequency = RDIV (bbs[i]->frequency * fraction, 65536);
1024 bbs[i]->count = RDIV (bbs[i]->count * fraction, 65536);
1025 FOR_EACH_EDGE (e, ei, bbs[i]->succs)
1026 e->count = RDIV (e->count * fraction, 65536);
1027 }
1028 }
1029
1030 /* Helper types for hash tables. */
1031
1032 struct htab_bb_copy_original_entry
1033 {
1034 /* Block we are attaching info to. */
1035 int index1;
1036 /* Index of original or copy (depending on the hashtable) */
1037 int index2;
1038 };
1039
1040 struct bb_copy_hasher : typed_noop_remove <htab_bb_copy_original_entry>
1041 {
1042 typedef htab_bb_copy_original_entry *value_type;
1043 typedef htab_bb_copy_original_entry *compare_type;
1044 static inline hashval_t hash (const htab_bb_copy_original_entry *);
1045 static inline bool equal (const htab_bb_copy_original_entry *existing,
1046 const htab_bb_copy_original_entry * candidate);
1047 };
1048
1049 inline hashval_t
1050 bb_copy_hasher::hash (const htab_bb_copy_original_entry *data)
1051 {
1052 return data->index1;
1053 }
1054
1055 inline bool
1056 bb_copy_hasher::equal (const htab_bb_copy_original_entry *data,
1057 const htab_bb_copy_original_entry *data2)
1058 {
1059 return data->index1 == data2->index1;
1060 }
1061
1062 /* Data structures used to maintain mapping between basic blocks and
1063 copies. */
1064 static hash_table<bb_copy_hasher> *bb_original;
1065 static hash_table<bb_copy_hasher> *bb_copy;
1066
1067 /* And between loops and copies. */
1068 static hash_table<bb_copy_hasher> *loop_copy;
1069 static alloc_pool original_copy_bb_pool;
1070
1071
1072 /* Initialize the data structures to maintain mapping between blocks
1073 and its copies. */
1074 void
1075 initialize_original_copy_tables (void)
1076 {
1077 gcc_assert (!original_copy_bb_pool);
1078 original_copy_bb_pool
1079 = create_alloc_pool ("original_copy",
1080 sizeof (struct htab_bb_copy_original_entry), 10);
1081 bb_original = new hash_table<bb_copy_hasher> (10);
1082 bb_copy = new hash_table<bb_copy_hasher> (10);
1083 loop_copy = new hash_table<bb_copy_hasher> (10);
1084 }
1085
1086 /* Free the data structures to maintain mapping between blocks and
1087 its copies. */
1088 void
1089 free_original_copy_tables (void)
1090 {
1091 gcc_assert (original_copy_bb_pool);
1092 delete bb_copy;
1093 bb_copy = NULL;
1094 delete bb_original;
1095 bb_copy = NULL;
1096 delete loop_copy;
1097 loop_copy = NULL;
1098 free_alloc_pool (original_copy_bb_pool);
1099 original_copy_bb_pool = NULL;
1100 }
1101
1102 /* Removes the value associated with OBJ from table TAB. */
1103
1104 static void
1105 copy_original_table_clear (hash_table<bb_copy_hasher> *tab, unsigned obj)
1106 {
1107 htab_bb_copy_original_entry **slot;
1108 struct htab_bb_copy_original_entry key, *elt;
1109
1110 if (!original_copy_bb_pool)
1111 return;
1112
1113 key.index1 = obj;
1114 slot = tab->find_slot (&key, NO_INSERT);
1115 if (!slot)
1116 return;
1117
1118 elt = *slot;
1119 tab->clear_slot (slot);
1120 pool_free (original_copy_bb_pool, elt);
1121 }
1122
1123 /* Sets the value associated with OBJ in table TAB to VAL.
1124 Do nothing when data structures are not initialized. */
1125
1126 static void
1127 copy_original_table_set (hash_table<bb_copy_hasher> *tab,
1128 unsigned obj, unsigned val)
1129 {
1130 struct htab_bb_copy_original_entry **slot;
1131 struct htab_bb_copy_original_entry key;
1132
1133 if (!original_copy_bb_pool)
1134 return;
1135
1136 key.index1 = obj;
1137 slot = tab->find_slot (&key, INSERT);
1138 if (!*slot)
1139 {
1140 *slot = (struct htab_bb_copy_original_entry *)
1141 pool_alloc (original_copy_bb_pool);
1142 (*slot)->index1 = obj;
1143 }
1144 (*slot)->index2 = val;
1145 }
1146
1147 /* Set original for basic block. Do nothing when data structures are not
1148 initialized so passes not needing this don't need to care. */
1149 void
1150 set_bb_original (basic_block bb, basic_block original)
1151 {
1152 copy_original_table_set (bb_original, bb->index, original->index);
1153 }
1154
1155 /* Get the original basic block. */
1156 basic_block
1157 get_bb_original (basic_block bb)
1158 {
1159 struct htab_bb_copy_original_entry *entry;
1160 struct htab_bb_copy_original_entry key;
1161
1162 gcc_assert (original_copy_bb_pool);
1163
1164 key.index1 = bb->index;
1165 entry = bb_original->find (&key);
1166 if (entry)
1167 return BASIC_BLOCK_FOR_FN (cfun, entry->index2);
1168 else
1169 return NULL;
1170 }
1171
1172 /* Set copy for basic block. Do nothing when data structures are not
1173 initialized so passes not needing this don't need to care. */
1174 void
1175 set_bb_copy (basic_block bb, basic_block copy)
1176 {
1177 copy_original_table_set (bb_copy, bb->index, copy->index);
1178 }
1179
1180 /* Get the copy of basic block. */
1181 basic_block
1182 get_bb_copy (basic_block bb)
1183 {
1184 struct htab_bb_copy_original_entry *entry;
1185 struct htab_bb_copy_original_entry key;
1186
1187 gcc_assert (original_copy_bb_pool);
1188
1189 key.index1 = bb->index;
1190 entry = bb_copy->find (&key);
1191 if (entry)
1192 return BASIC_BLOCK_FOR_FN (cfun, entry->index2);
1193 else
1194 return NULL;
1195 }
1196
1197 /* Set copy for LOOP to COPY. Do nothing when data structures are not
1198 initialized so passes not needing this don't need to care. */
1199
1200 void
1201 set_loop_copy (struct loop *loop, struct loop *copy)
1202 {
1203 if (!copy)
1204 copy_original_table_clear (loop_copy, loop->num);
1205 else
1206 copy_original_table_set (loop_copy, loop->num, copy->num);
1207 }
1208
1209 /* Get the copy of LOOP. */
1210
1211 struct loop *
1212 get_loop_copy (struct loop *loop)
1213 {
1214 struct htab_bb_copy_original_entry *entry;
1215 struct htab_bb_copy_original_entry key;
1216
1217 gcc_assert (original_copy_bb_pool);
1218
1219 key.index1 = loop->num;
1220 entry = loop_copy->find (&key);
1221 if (entry)
1222 return get_loop (cfun, entry->index2);
1223 else
1224 return NULL;
1225 }