[RS6000] Don't restore fixed regs
[gcc.git] / gcc / cfg.c
1 /* Control flow graph manipulation code for GNU compiler.
2 Copyright (C) 1987-2017 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 /* This file contains low level functions to manipulate the CFG and
21 analyze it. All other modules should not transform the data structure
22 directly and use abstraction instead. The file is supposed to be
23 ordered bottom-up and should not contain any code dependent on a
24 particular intermediate language (RTL or trees).
25
26 Available functionality:
27 - Initialization/deallocation
28 init_flow, clear_edges
29 - Low level basic block manipulation
30 alloc_block, expunge_block
31 - Edge manipulation
32 make_edge, make_single_succ_edge, cached_make_edge, remove_edge
33 - Low level edge redirection (without updating instruction chain)
34 redirect_edge_succ, redirect_edge_succ_nodup, redirect_edge_pred
35 - Dumping and debugging
36 dump_flow_info, debug_flow_info, dump_edge_info
37 - Allocation of AUX fields for basic blocks
38 alloc_aux_for_blocks, free_aux_for_blocks, alloc_aux_for_block
39 - clear_bb_flags
40 - Consistency checking
41 verify_flow_info
42 - Dumping and debugging
43 print_rtl_with_bb, dump_bb, debug_bb, debug_bb_n
44
45 TODO: Document these "Available functionality" functions in the files
46 that implement them.
47 */
48 \f
49 #include "config.h"
50 #include "system.h"
51 #include "coretypes.h"
52 #include "backend.h"
53 #include "hard-reg-set.h"
54 #include "tree.h"
55 #include "cfghooks.h"
56 #include "df.h"
57 #include "cfganal.h"
58 #include "cfgloop.h" /* FIXME: For struct loop. */
59 #include "dumpfile.h"
60
61 \f
62
63 /* Called once at initialization time. */
64
65 void
66 init_flow (struct function *the_fun)
67 {
68 if (!the_fun->cfg)
69 the_fun->cfg = ggc_cleared_alloc<control_flow_graph> ();
70 n_edges_for_fn (the_fun) = 0;
71 ENTRY_BLOCK_PTR_FOR_FN (the_fun)
72 = alloc_block ();
73 ENTRY_BLOCK_PTR_FOR_FN (the_fun)->index = ENTRY_BLOCK;
74 EXIT_BLOCK_PTR_FOR_FN (the_fun)
75 = alloc_block ();
76 EXIT_BLOCK_PTR_FOR_FN (the_fun)->index = EXIT_BLOCK;
77 ENTRY_BLOCK_PTR_FOR_FN (the_fun)->next_bb
78 = EXIT_BLOCK_PTR_FOR_FN (the_fun);
79 EXIT_BLOCK_PTR_FOR_FN (the_fun)->prev_bb
80 = ENTRY_BLOCK_PTR_FOR_FN (the_fun);
81 }
82 \f
83 /* Helper function for remove_edge and clear_edges. Frees edge structure
84 without actually removing it from the pred/succ arrays. */
85
86 static void
87 free_edge (function *fn, edge e)
88 {
89 n_edges_for_fn (fn)--;
90 ggc_free (e);
91 }
92
93 /* Free the memory associated with the edge structures. */
94
95 void
96 clear_edges (struct function *fn)
97 {
98 basic_block bb;
99 edge e;
100 edge_iterator ei;
101
102 FOR_EACH_BB_FN (bb, fn)
103 {
104 FOR_EACH_EDGE (e, ei, bb->succs)
105 free_edge (fn, e);
106 vec_safe_truncate (bb->succs, 0);
107 vec_safe_truncate (bb->preds, 0);
108 }
109
110 FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR_FOR_FN (fn)->succs)
111 free_edge (fn, e);
112 vec_safe_truncate (EXIT_BLOCK_PTR_FOR_FN (fn)->preds, 0);
113 vec_safe_truncate (ENTRY_BLOCK_PTR_FOR_FN (fn)->succs, 0);
114
115 gcc_assert (!n_edges_for_fn (fn));
116 }
117 \f
118 /* Allocate memory for basic_block. */
119
120 basic_block
121 alloc_block (void)
122 {
123 basic_block bb;
124 bb = ggc_cleared_alloc<basic_block_def> ();
125 bb->count = profile_count::uninitialized ();
126 return bb;
127 }
128
129 /* Link block B to chain after AFTER. */
130 void
131 link_block (basic_block b, basic_block after)
132 {
133 b->next_bb = after->next_bb;
134 b->prev_bb = after;
135 after->next_bb = b;
136 b->next_bb->prev_bb = b;
137 }
138
139 /* Unlink block B from chain. */
140 void
141 unlink_block (basic_block b)
142 {
143 b->next_bb->prev_bb = b->prev_bb;
144 b->prev_bb->next_bb = b->next_bb;
145 b->prev_bb = NULL;
146 b->next_bb = NULL;
147 }
148
149 /* Sequentially order blocks and compact the arrays. */
150 void
151 compact_blocks (void)
152 {
153 int i;
154
155 SET_BASIC_BLOCK_FOR_FN (cfun, ENTRY_BLOCK, ENTRY_BLOCK_PTR_FOR_FN (cfun));
156 SET_BASIC_BLOCK_FOR_FN (cfun, EXIT_BLOCK, EXIT_BLOCK_PTR_FOR_FN (cfun));
157
158 if (df)
159 df_compact_blocks ();
160 else
161 {
162 basic_block bb;
163
164 i = NUM_FIXED_BLOCKS;
165 FOR_EACH_BB_FN (bb, cfun)
166 {
167 SET_BASIC_BLOCK_FOR_FN (cfun, i, bb);
168 bb->index = i;
169 i++;
170 }
171 gcc_assert (i == n_basic_blocks_for_fn (cfun));
172
173 for (; i < last_basic_block_for_fn (cfun); i++)
174 SET_BASIC_BLOCK_FOR_FN (cfun, i, NULL);
175 }
176 last_basic_block_for_fn (cfun) = n_basic_blocks_for_fn (cfun);
177 }
178
179 /* Remove block B from the basic block array. */
180
181 void
182 expunge_block (basic_block b)
183 {
184 unlink_block (b);
185 SET_BASIC_BLOCK_FOR_FN (cfun, b->index, NULL);
186 n_basic_blocks_for_fn (cfun)--;
187 /* We should be able to ggc_free here, but we are not.
188 The dead SSA_NAMES are left pointing to dead statements that are pointing
189 to dead basic blocks making garbage collector to die.
190 We should be able to release all dead SSA_NAMES and at the same time we should
191 clear out BB pointer of dead statements consistently. */
192 }
193 \f
194 /* Connect E to E->src. */
195
196 static inline void
197 connect_src (edge e)
198 {
199 vec_safe_push (e->src->succs, e);
200 df_mark_solutions_dirty ();
201 }
202
203 /* Connect E to E->dest. */
204
205 static inline void
206 connect_dest (edge e)
207 {
208 basic_block dest = e->dest;
209 vec_safe_push (dest->preds, e);
210 e->dest_idx = EDGE_COUNT (dest->preds) - 1;
211 df_mark_solutions_dirty ();
212 }
213
214 /* Disconnect edge E from E->src. */
215
216 static inline void
217 disconnect_src (edge e)
218 {
219 basic_block src = e->src;
220 edge_iterator ei;
221 edge tmp;
222
223 for (ei = ei_start (src->succs); (tmp = ei_safe_edge (ei)); )
224 {
225 if (tmp == e)
226 {
227 src->succs->unordered_remove (ei.index);
228 df_mark_solutions_dirty ();
229 return;
230 }
231 else
232 ei_next (&ei);
233 }
234
235 gcc_unreachable ();
236 }
237
238 /* Disconnect edge E from E->dest. */
239
240 static inline void
241 disconnect_dest (edge e)
242 {
243 basic_block dest = e->dest;
244 unsigned int dest_idx = e->dest_idx;
245
246 dest->preds->unordered_remove (dest_idx);
247
248 /* If we removed an edge in the middle of the edge vector, we need
249 to update dest_idx of the edge that moved into the "hole". */
250 if (dest_idx < EDGE_COUNT (dest->preds))
251 EDGE_PRED (dest, dest_idx)->dest_idx = dest_idx;
252 df_mark_solutions_dirty ();
253 }
254
255 /* Create an edge connecting SRC and DEST with flags FLAGS. Return newly
256 created edge. Use this only if you are sure that this edge can't
257 possibly already exist. */
258
259 edge
260 unchecked_make_edge (basic_block src, basic_block dst, int flags)
261 {
262 edge e;
263 e = ggc_cleared_alloc<edge_def> ();
264 n_edges_for_fn (cfun)++;
265
266 e->count = profile_count::uninitialized ();
267 e->probability = profile_probability::uninitialized ();
268 e->src = src;
269 e->dest = dst;
270 e->flags = flags;
271
272 connect_src (e);
273 connect_dest (e);
274
275 execute_on_growing_pred (e);
276 return e;
277 }
278
279 /* Create an edge connecting SRC and DST with FLAGS optionally using
280 edge cache CACHE. Return the new edge, NULL if already exist. */
281
282 edge
283 cached_make_edge (sbitmap edge_cache, basic_block src, basic_block dst, int flags)
284 {
285 if (edge_cache == NULL
286 || src == ENTRY_BLOCK_PTR_FOR_FN (cfun)
287 || dst == EXIT_BLOCK_PTR_FOR_FN (cfun))
288 return make_edge (src, dst, flags);
289
290 /* Does the requested edge already exist? */
291 if (! bitmap_bit_p (edge_cache, dst->index))
292 {
293 /* The edge does not exist. Create one and update the
294 cache. */
295 bitmap_set_bit (edge_cache, dst->index);
296 return unchecked_make_edge (src, dst, flags);
297 }
298
299 /* At this point, we know that the requested edge exists. Adjust
300 flags if necessary. */
301 if (flags)
302 {
303 edge e = find_edge (src, dst);
304 e->flags |= flags;
305 }
306
307 return NULL;
308 }
309
310 /* Create an edge connecting SRC and DEST with flags FLAGS. Return newly
311 created edge or NULL if already exist. */
312
313 edge
314 make_edge (basic_block src, basic_block dest, int flags)
315 {
316 edge e = find_edge (src, dest);
317
318 /* Make sure we don't add duplicate edges. */
319 if (e)
320 {
321 e->flags |= flags;
322 return NULL;
323 }
324
325 return unchecked_make_edge (src, dest, flags);
326 }
327
328 /* Create an edge connecting SRC to DEST and set probability by knowing
329 that it is the single edge leaving SRC. */
330
331 edge
332 make_single_succ_edge (basic_block src, basic_block dest, int flags)
333 {
334 edge e = make_edge (src, dest, flags);
335
336 e->probability = profile_probability::always ();
337 e->count = src->count;
338 return e;
339 }
340
341 /* This function will remove an edge from the flow graph. */
342
343 void
344 remove_edge_raw (edge e)
345 {
346 remove_predictions_associated_with_edge (e);
347 execute_on_shrinking_pred (e);
348
349 disconnect_src (e);
350 disconnect_dest (e);
351
352 free_edge (cfun, e);
353 }
354
355 /* Redirect an edge's successor from one block to another. */
356
357 void
358 redirect_edge_succ (edge e, basic_block new_succ)
359 {
360 execute_on_shrinking_pred (e);
361
362 disconnect_dest (e);
363
364 e->dest = new_succ;
365
366 /* Reconnect the edge to the new successor block. */
367 connect_dest (e);
368
369 execute_on_growing_pred (e);
370 }
371
372 /* Redirect an edge's predecessor from one block to another. */
373
374 void
375 redirect_edge_pred (edge e, basic_block new_pred)
376 {
377 disconnect_src (e);
378
379 e->src = new_pred;
380
381 /* Reconnect the edge to the new predecessor block. */
382 connect_src (e);
383 }
384
385 /* Clear all basic block flags that do not have to be preserved. */
386 void
387 clear_bb_flags (void)
388 {
389 basic_block bb;
390
391 FOR_ALL_BB_FN (bb, cfun)
392 bb->flags &= BB_FLAGS_TO_PRESERVE;
393 }
394 \f
395 /* Check the consistency of profile information. We can't do that
396 in verify_flow_info, as the counts may get invalid for incompletely
397 solved graphs, later eliminating of conditionals or roundoff errors.
398 It is still practical to have them reported for debugging of simple
399 testcases. */
400 static void
401 check_bb_profile (basic_block bb, FILE * file, int indent)
402 {
403 edge e;
404 edge_iterator ei;
405 struct function *fun = DECL_STRUCT_FUNCTION (current_function_decl);
406 char *s_indent = (char *) alloca ((size_t) indent + 1);
407 memset ((void *) s_indent, ' ', (size_t) indent);
408 s_indent[indent] = '\0';
409
410 if (profile_status_for_fn (fun) == PROFILE_ABSENT)
411 return;
412
413 if (bb != EXIT_BLOCK_PTR_FOR_FN (fun))
414 {
415 bool found = false;
416 profile_probability sum = profile_probability::never ();
417 int isum = 0;
418
419 FOR_EACH_EDGE (e, ei, bb->succs)
420 {
421 if (!(e->flags & (EDGE_EH | EDGE_FAKE)))
422 found = true;
423 sum += e->probability;
424 if (e->probability.initialized_p ())
425 isum += e->probability.to_reg_br_prob_base ();
426 }
427 /* Only report mismatches for non-EH control flow. If there are only EH
428 edges it means that the BB ends by noreturn call. Here the control
429 flow may just terminate. */
430 if (found)
431 {
432 if (sum.differs_from_p (profile_probability::always ()))
433 {
434 fprintf (file,
435 ";; %sInvalid sum of outgoing probabilities ",
436 s_indent);
437 sum.dump (file);
438 fprintf (file, "\n");
439 }
440 /* Probabilities caps to 100% and thus the previous test will never
441 fire if the sum of probabilities is too large. */
442 else if (isum > REG_BR_PROB_BASE + 100)
443 {
444 fprintf (file,
445 ";; %sInvalid sum of outgoing probabilities %.1f%%\n",
446 s_indent, isum * 100.0 / REG_BR_PROB_BASE);
447 }
448 profile_count lsum = profile_count::zero ();
449 FOR_EACH_EDGE (e, ei, bb->succs)
450 lsum += e->count;
451 if (EDGE_COUNT (bb->succs) && lsum.differs_from_p (bb->count))
452 {
453 fprintf (file, ";; %sInvalid sum of outgoing counts ",
454 s_indent);
455 lsum.dump (file);
456 fprintf (file, ", should be ");
457 bb->count.dump (file);
458 fprintf (file, "\n");
459 }
460 }
461 }
462 if (bb != ENTRY_BLOCK_PTR_FOR_FN (fun))
463 {
464 int sum = 0;
465 FOR_EACH_EDGE (e, ei, bb->preds)
466 sum += EDGE_FREQUENCY (e);
467 if (abs (sum - bb->frequency) > 100)
468 fprintf (file,
469 ";; %sInvalid sum of incoming frequencies %i, should be %i\n",
470 s_indent, sum, bb->frequency);
471 profile_count lsum = profile_count::zero ();
472 FOR_EACH_EDGE (e, ei, bb->preds)
473 lsum += e->count;
474 if (lsum.differs_from_p (bb->count))
475 {
476 fprintf (file, ";; %sInvalid sum of incoming counts ",
477 s_indent);
478 lsum.dump (file);
479 fprintf (file, ", should be ");
480 bb->count.dump (file);
481 fprintf (file, "\n");
482 }
483 }
484 if (BB_PARTITION (bb) == BB_COLD_PARTITION)
485 {
486 /* Warn about inconsistencies in the partitioning that are
487 currently caused by profile insanities created via optimization. */
488 if (!probably_never_executed_bb_p (fun, bb))
489 fprintf (file, ";; %sBlock in cold partition with hot count\n",
490 s_indent);
491 FOR_EACH_EDGE (e, ei, bb->preds)
492 {
493 if (!probably_never_executed_edge_p (fun, e))
494 fprintf (file,
495 ";; %sBlock in cold partition with incoming hot edge\n",
496 s_indent);
497 }
498 }
499 }
500 \f
501 void
502 dump_edge_info (FILE *file, edge e, dump_flags_t flags, int do_succ)
503 {
504 basic_block side = (do_succ ? e->dest : e->src);
505 bool do_details = false;
506
507 if ((flags & TDF_DETAILS) != 0
508 && (flags & TDF_SLIM) == 0)
509 do_details = true;
510
511 if (side->index == ENTRY_BLOCK)
512 fputs (" ENTRY", file);
513 else if (side->index == EXIT_BLOCK)
514 fputs (" EXIT", file);
515 else
516 fprintf (file, " %d", side->index);
517
518 if (e->probability.initialized_p () && do_details)
519 {
520 fprintf (file, " [");
521 e->probability.dump (file);
522 fprintf (file, "] ");
523 }
524
525 if (e->count.initialized_p () && do_details)
526 {
527 fputs (" count:", file);
528 e->count.dump (file);
529 }
530
531 if (e->flags && do_details)
532 {
533 static const char * const bitnames[] =
534 {
535 #define DEF_EDGE_FLAG(NAME,IDX) #NAME ,
536 #include "cfg-flags.def"
537 NULL
538 #undef DEF_EDGE_FLAG
539 };
540 bool comma = false;
541 int i, flags = e->flags;
542
543 gcc_assert (e->flags <= EDGE_ALL_FLAGS);
544 fputs (" (", file);
545 for (i = 0; flags; i++)
546 if (flags & (1 << i))
547 {
548 flags &= ~(1 << i);
549
550 if (comma)
551 fputc (',', file);
552 fputs (bitnames[i], file);
553 comma = true;
554 }
555
556 fputc (')', file);
557 }
558 }
559
560 DEBUG_FUNCTION void
561 debug (edge_def &ref)
562 {
563 /* FIXME (crowl): Is this desireable? */
564 dump_edge_info (stderr, &ref, 0, false);
565 dump_edge_info (stderr, &ref, 0, true);
566 }
567
568 DEBUG_FUNCTION void
569 debug (edge_def *ptr)
570 {
571 if (ptr)
572 debug (*ptr);
573 else
574 fprintf (stderr, "<nil>\n");
575 }
576 \f
577 /* Simple routines to easily allocate AUX fields of basic blocks. */
578
579 static struct obstack block_aux_obstack;
580 static void *first_block_aux_obj = 0;
581 static struct obstack edge_aux_obstack;
582 static void *first_edge_aux_obj = 0;
583
584 /* Allocate a memory block of SIZE as BB->aux. The obstack must
585 be first initialized by alloc_aux_for_blocks. */
586
587 static void
588 alloc_aux_for_block (basic_block bb, int size)
589 {
590 /* Verify that aux field is clear. */
591 gcc_assert (!bb->aux && first_block_aux_obj);
592 bb->aux = obstack_alloc (&block_aux_obstack, size);
593 memset (bb->aux, 0, size);
594 }
595
596 /* Initialize the block_aux_obstack and if SIZE is nonzero, call
597 alloc_aux_for_block for each basic block. */
598
599 void
600 alloc_aux_for_blocks (int size)
601 {
602 static int initialized;
603
604 if (!initialized)
605 {
606 gcc_obstack_init (&block_aux_obstack);
607 initialized = 1;
608 }
609 else
610 /* Check whether AUX data are still allocated. */
611 gcc_assert (!first_block_aux_obj);
612
613 first_block_aux_obj = obstack_alloc (&block_aux_obstack, 0);
614 if (size)
615 {
616 basic_block bb;
617
618 FOR_ALL_BB_FN (bb, cfun)
619 alloc_aux_for_block (bb, size);
620 }
621 }
622
623 /* Clear AUX pointers of all blocks. */
624
625 void
626 clear_aux_for_blocks (void)
627 {
628 basic_block bb;
629
630 FOR_ALL_BB_FN (bb, cfun)
631 bb->aux = NULL;
632 }
633
634 /* Free data allocated in block_aux_obstack and clear AUX pointers
635 of all blocks. */
636
637 void
638 free_aux_for_blocks (void)
639 {
640 gcc_assert (first_block_aux_obj);
641 obstack_free (&block_aux_obstack, first_block_aux_obj);
642 first_block_aux_obj = NULL;
643
644 clear_aux_for_blocks ();
645 }
646
647 /* Allocate a memory edge of SIZE as E->aux. The obstack must
648 be first initialized by alloc_aux_for_edges. */
649
650 void
651 alloc_aux_for_edge (edge e, int size)
652 {
653 /* Verify that aux field is clear. */
654 gcc_assert (!e->aux && first_edge_aux_obj);
655 e->aux = obstack_alloc (&edge_aux_obstack, size);
656 memset (e->aux, 0, size);
657 }
658
659 /* Initialize the edge_aux_obstack and if SIZE is nonzero, call
660 alloc_aux_for_edge for each basic edge. */
661
662 void
663 alloc_aux_for_edges (int size)
664 {
665 static int initialized;
666
667 if (!initialized)
668 {
669 gcc_obstack_init (&edge_aux_obstack);
670 initialized = 1;
671 }
672 else
673 /* Check whether AUX data are still allocated. */
674 gcc_assert (!first_edge_aux_obj);
675
676 first_edge_aux_obj = obstack_alloc (&edge_aux_obstack, 0);
677 if (size)
678 {
679 basic_block bb;
680
681 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun),
682 EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
683 {
684 edge e;
685 edge_iterator ei;
686
687 FOR_EACH_EDGE (e, ei, bb->succs)
688 alloc_aux_for_edge (e, size);
689 }
690 }
691 }
692
693 /* Clear AUX pointers of all edges. */
694
695 void
696 clear_aux_for_edges (void)
697 {
698 basic_block bb;
699 edge e;
700
701 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun),
702 EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
703 {
704 edge_iterator ei;
705 FOR_EACH_EDGE (e, ei, bb->succs)
706 e->aux = NULL;
707 }
708 }
709
710 /* Free data allocated in edge_aux_obstack and clear AUX pointers
711 of all edges. */
712
713 void
714 free_aux_for_edges (void)
715 {
716 gcc_assert (first_edge_aux_obj);
717 obstack_free (&edge_aux_obstack, first_edge_aux_obj);
718 first_edge_aux_obj = NULL;
719
720 clear_aux_for_edges ();
721 }
722
723 DEBUG_FUNCTION void
724 debug_bb (basic_block bb)
725 {
726 dump_bb (stderr, bb, 0, dump_flags);
727 }
728
729 DEBUG_FUNCTION basic_block
730 debug_bb_n (int n)
731 {
732 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, n);
733 debug_bb (bb);
734 return bb;
735 }
736
737 /* Dumps cfg related information about basic block BB to OUTF.
738 If HEADER is true, dump things that appear before the instructions
739 contained in BB. If FOOTER is true, dump things that appear after.
740 Flags are the TDF_* masks as documented in dumpfile.h.
741 NB: With TDF_DETAILS, it is assumed that cfun is available, so
742 that maybe_hot_bb_p and probably_never_executed_bb_p don't ICE. */
743
744 void
745 dump_bb_info (FILE *outf, basic_block bb, int indent, dump_flags_t flags,
746 bool do_header, bool do_footer)
747 {
748 edge_iterator ei;
749 edge e;
750 static const char * const bb_bitnames[] =
751 {
752 #define DEF_BASIC_BLOCK_FLAG(NAME,IDX) #NAME ,
753 #include "cfg-flags.def"
754 NULL
755 #undef DEF_BASIC_BLOCK_FLAG
756 };
757 const unsigned n_bitnames = sizeof (bb_bitnames) / sizeof (char *);
758 bool first;
759 char *s_indent = (char *) alloca ((size_t) indent + 1);
760 memset ((void *) s_indent, ' ', (size_t) indent);
761 s_indent[indent] = '\0';
762
763 gcc_assert (bb->flags <= BB_ALL_FLAGS);
764
765 if (do_header)
766 {
767 unsigned i;
768
769 fputs (";; ", outf);
770 fprintf (outf, "%sbasic block %d, loop depth %d",
771 s_indent, bb->index, bb_loop_depth (bb));
772 if (flags & TDF_DETAILS)
773 {
774 struct function *fun = DECL_STRUCT_FUNCTION (current_function_decl);
775 if (bb->count.initialized_p ())
776 {
777 fputs (", count ", outf);
778 bb->count.dump (outf);
779 }
780 fprintf (outf, ", freq %i", bb->frequency);
781 if (maybe_hot_bb_p (fun, bb))
782 fputs (", maybe hot", outf);
783 if (probably_never_executed_bb_p (fun, bb))
784 fputs (", probably never executed", outf);
785 }
786 fputc ('\n', outf);
787
788 if (flags & TDF_DETAILS)
789 {
790 check_bb_profile (bb, outf, indent);
791 fputs (";; ", outf);
792 fprintf (outf, "%s prev block ", s_indent);
793 if (bb->prev_bb)
794 fprintf (outf, "%d", bb->prev_bb->index);
795 else
796 fprintf (outf, "(nil)");
797 fprintf (outf, ", next block ");
798 if (bb->next_bb)
799 fprintf (outf, "%d", bb->next_bb->index);
800 else
801 fprintf (outf, "(nil)");
802
803 fputs (", flags:", outf);
804 first = true;
805 for (i = 0; i < n_bitnames; i++)
806 if (bb->flags & (1 << i))
807 {
808 if (first)
809 fputs (" (", outf);
810 else
811 fputs (", ", outf);
812 first = false;
813 fputs (bb_bitnames[i], outf);
814 }
815 if (!first)
816 fputc (')', outf);
817 fputc ('\n', outf);
818 }
819
820 fputs (";; ", outf);
821 fprintf (outf, "%s pred: ", s_indent);
822 first = true;
823 FOR_EACH_EDGE (e, ei, bb->preds)
824 {
825 if (! first)
826 {
827 fputs (";; ", outf);
828 fprintf (outf, "%s ", s_indent);
829 }
830 first = false;
831 dump_edge_info (outf, e, flags, 0);
832 fputc ('\n', outf);
833 }
834 if (first)
835 fputc ('\n', outf);
836 }
837
838 if (do_footer)
839 {
840 fputs (";; ", outf);
841 fprintf (outf, "%s succ: ", s_indent);
842 first = true;
843 FOR_EACH_EDGE (e, ei, bb->succs)
844 {
845 if (! first)
846 {
847 fputs (";; ", outf);
848 fprintf (outf, "%s ", s_indent);
849 }
850 first = false;
851 dump_edge_info (outf, e, flags, 1);
852 fputc ('\n', outf);
853 }
854 if (first)
855 fputc ('\n', outf);
856 }
857 }
858
859 /* Dumps a brief description of cfg to FILE. */
860
861 void
862 brief_dump_cfg (FILE *file, dump_flags_t flags)
863 {
864 basic_block bb;
865
866 FOR_EACH_BB_FN (bb, cfun)
867 {
868 dump_bb_info (file, bb, 0, flags & TDF_DETAILS, true, true);
869 }
870 }
871
872 /* An edge originally destinating BB of FREQUENCY and COUNT has been proved to
873 leave the block by TAKEN_EDGE. Update profile of BB such that edge E can be
874 redirected to destination of TAKEN_EDGE.
875
876 This function may leave the profile inconsistent in the case TAKEN_EDGE
877 frequency or count is believed to be lower than FREQUENCY or COUNT
878 respectively. */
879 void
880 update_bb_profile_for_threading (basic_block bb, int edge_frequency,
881 profile_count count, edge taken_edge)
882 {
883 edge c;
884 profile_probability prob;
885 edge_iterator ei;
886
887 if (bb->count < count)
888 {
889 if (dump_file)
890 fprintf (dump_file, "bb %i count became negative after threading",
891 bb->index);
892 }
893 bb->count -= count;
894
895 bb->frequency -= edge_frequency;
896 if (bb->frequency < 0)
897 bb->frequency = 0;
898
899 /* Compute the probability of TAKEN_EDGE being reached via threaded edge.
900 Watch for overflows. */
901 if (bb->frequency)
902 /* FIXME: We should get edge frequency as count. */
903 prob = profile_probability::probability_in_gcov_type
904 (edge_frequency, bb->frequency);
905 else
906 prob = profile_probability::never ();
907 if (prob > taken_edge->probability)
908 {
909 if (dump_file)
910 {
911 fprintf (dump_file, "Jump threading proved probability of edge "
912 "%i->%i too small (it is ",
913 taken_edge->src->index, taken_edge->dest->index);
914 taken_edge->probability.dump (dump_file);
915 fprintf (dump_file, " should be ");
916 prob.dump (dump_file);
917 fprintf (dump_file, ")\n");
918 }
919 prob = taken_edge->probability.apply_scale (6, 8);
920 }
921
922 /* Now rescale the probabilities. */
923 taken_edge->probability -= prob;
924 prob = prob.invert ();
925 if (prob == profile_probability::never ())
926 {
927 if (dump_file)
928 fprintf (dump_file, "Edge frequencies of bb %i has been reset, "
929 "frequency of block should end up being 0, it is %i\n",
930 bb->index, bb->frequency);
931 EDGE_SUCC (bb, 0)->probability = profile_probability::guessed_always ();
932 ei = ei_start (bb->succs);
933 ei_next (&ei);
934 for (; (c = ei_safe_edge (ei)); ei_next (&ei))
935 c->probability = profile_probability::guessed_never ();
936 }
937 else if (!(prob == profile_probability::always ()))
938 {
939 FOR_EACH_EDGE (c, ei, bb->succs)
940 c->probability /= prob;
941 }
942
943 gcc_assert (bb == taken_edge->src);
944 if (dump_file && taken_edge->count < count)
945 fprintf (dump_file, "edge %i->%i count became negative after threading",
946 taken_edge->src->index, taken_edge->dest->index);
947 taken_edge->count -= count;
948 }
949
950 /* Multiply all frequencies of basic blocks in array BBS of length NBBS
951 by NUM/DEN, in int arithmetic. May lose some accuracy. */
952 void
953 scale_bbs_frequencies_int (basic_block *bbs, int nbbs, int num, int den)
954 {
955 int i;
956 edge e;
957 if (num < 0)
958 num = 0;
959
960 /* Scale NUM and DEN to avoid overflows. Frequencies are in order of
961 10^4, if we make DEN <= 10^3, we can afford to upscale by 100
962 and still safely fit in int during calculations. */
963 if (den > 1000)
964 {
965 if (num > 1000000)
966 return;
967
968 num = RDIV (1000 * num, den);
969 den = 1000;
970 }
971 if (num > 100 * den)
972 return;
973
974 for (i = 0; i < nbbs; i++)
975 {
976 edge_iterator ei;
977 bbs[i]->frequency = RDIV (bbs[i]->frequency * num, den);
978 /* Make sure the frequencies do not grow over BB_FREQ_MAX. */
979 if (bbs[i]->frequency > BB_FREQ_MAX)
980 bbs[i]->frequency = BB_FREQ_MAX;
981 bbs[i]->count = bbs[i]->count.apply_scale (num, den);
982 FOR_EACH_EDGE (e, ei, bbs[i]->succs)
983 e->count = e->count.apply_scale (num, den);
984 }
985 }
986
987 /* numbers smaller than this value are safe to multiply without getting
988 64bit overflow. */
989 #define MAX_SAFE_MULTIPLIER (1 << (sizeof (int64_t) * 4 - 1))
990
991 /* Multiply all frequencies of basic blocks in array BBS of length NBBS
992 by NUM/DEN, in gcov_type arithmetic. More accurate than previous
993 function but considerably slower. */
994 void
995 scale_bbs_frequencies_gcov_type (basic_block *bbs, int nbbs, gcov_type num,
996 gcov_type den)
997 {
998 int i;
999 edge e;
1000 gcov_type fraction = RDIV (num * 65536, den);
1001
1002 gcc_assert (fraction >= 0);
1003
1004 if (num < MAX_SAFE_MULTIPLIER)
1005 for (i = 0; i < nbbs; i++)
1006 {
1007 edge_iterator ei;
1008 bbs[i]->frequency = RDIV (bbs[i]->frequency * num, den);
1009 if (bbs[i]->count <= MAX_SAFE_MULTIPLIER)
1010 bbs[i]->count = bbs[i]->count.apply_scale (num, den);
1011 else
1012 bbs[i]->count = bbs[i]->count.apply_scale (fraction, 65536);
1013 FOR_EACH_EDGE (e, ei, bbs[i]->succs)
1014 if (bbs[i]->count <= MAX_SAFE_MULTIPLIER)
1015 e->count = e->count.apply_scale (num, den);
1016 else
1017 e->count = e->count.apply_scale (fraction, 65536);
1018 }
1019 else
1020 for (i = 0; i < nbbs; i++)
1021 {
1022 edge_iterator ei;
1023 if (sizeof (gcov_type) > sizeof (int))
1024 bbs[i]->frequency = RDIV (bbs[i]->frequency * num, den);
1025 else
1026 bbs[i]->frequency = RDIV (bbs[i]->frequency * fraction, 65536);
1027 bbs[i]->count = bbs[i]->count.apply_scale (fraction, 65536);
1028 FOR_EACH_EDGE (e, ei, bbs[i]->succs)
1029 e->count = e->count.apply_scale (fraction, 65536);
1030 }
1031 }
1032
1033 /* Multiply all frequencies of basic blocks in array BBS of length NBBS
1034 by NUM/DEN, in profile_count arithmetic. More accurate than previous
1035 function but considerably slower. */
1036 void
1037 scale_bbs_frequencies_profile_count (basic_block *bbs, int nbbs,
1038 profile_count num, profile_count den)
1039 {
1040 int i;
1041 edge e;
1042
1043 for (i = 0; i < nbbs; i++)
1044 {
1045 edge_iterator ei;
1046 bbs[i]->frequency = RDIV (bbs[i]->frequency * num.to_gcov_type (),
1047 den.to_gcov_type ());
1048 bbs[i]->count = bbs[i]->count.apply_scale (num, den);
1049 FOR_EACH_EDGE (e, ei, bbs[i]->succs)
1050 e->count = e->count.apply_scale (num, den);
1051 }
1052 }
1053
1054 /* Multiply all frequencies of basic blocks in array BBS of length NBBS
1055 by NUM/DEN, in profile_count arithmetic. More accurate than previous
1056 function but considerably slower. */
1057 void
1058 scale_bbs_frequencies (basic_block *bbs, int nbbs,
1059 profile_probability p)
1060 {
1061 int i;
1062 edge e;
1063
1064 for (i = 0; i < nbbs; i++)
1065 {
1066 edge_iterator ei;
1067 bbs[i]->frequency = p.apply (bbs[i]->frequency);
1068 bbs[i]->count = bbs[i]->count.apply_probability (p);
1069 FOR_EACH_EDGE (e, ei, bbs[i]->succs)
1070 e->count = e->count.apply_probability (p);
1071 }
1072 }
1073
1074 /* Helper types for hash tables. */
1075
1076 struct htab_bb_copy_original_entry
1077 {
1078 /* Block we are attaching info to. */
1079 int index1;
1080 /* Index of original or copy (depending on the hashtable) */
1081 int index2;
1082 };
1083
1084 struct bb_copy_hasher : nofree_ptr_hash <htab_bb_copy_original_entry>
1085 {
1086 static inline hashval_t hash (const htab_bb_copy_original_entry *);
1087 static inline bool equal (const htab_bb_copy_original_entry *existing,
1088 const htab_bb_copy_original_entry * candidate);
1089 };
1090
1091 inline hashval_t
1092 bb_copy_hasher::hash (const htab_bb_copy_original_entry *data)
1093 {
1094 return data->index1;
1095 }
1096
1097 inline bool
1098 bb_copy_hasher::equal (const htab_bb_copy_original_entry *data,
1099 const htab_bb_copy_original_entry *data2)
1100 {
1101 return data->index1 == data2->index1;
1102 }
1103
1104 /* Data structures used to maintain mapping between basic blocks and
1105 copies. */
1106 static hash_table<bb_copy_hasher> *bb_original;
1107 static hash_table<bb_copy_hasher> *bb_copy;
1108
1109 /* And between loops and copies. */
1110 static hash_table<bb_copy_hasher> *loop_copy;
1111 static object_allocator<htab_bb_copy_original_entry> *original_copy_bb_pool;
1112
1113 /* Initialize the data structures to maintain mapping between blocks
1114 and its copies. */
1115 void
1116 initialize_original_copy_tables (void)
1117 {
1118 original_copy_bb_pool = new object_allocator<htab_bb_copy_original_entry>
1119 ("original_copy");
1120 bb_original = new hash_table<bb_copy_hasher> (10);
1121 bb_copy = new hash_table<bb_copy_hasher> (10);
1122 loop_copy = new hash_table<bb_copy_hasher> (10);
1123 }
1124
1125 /* Reset the data structures to maintain mapping between blocks and
1126 its copies. */
1127
1128 void
1129 reset_original_copy_tables (void)
1130 {
1131 gcc_assert (original_copy_bb_pool);
1132 bb_original->empty ();
1133 bb_copy->empty ();
1134 loop_copy->empty ();
1135 }
1136
1137 /* Free the data structures to maintain mapping between blocks and
1138 its copies. */
1139 void
1140 free_original_copy_tables (void)
1141 {
1142 gcc_assert (original_copy_bb_pool);
1143 delete bb_copy;
1144 bb_copy = NULL;
1145 delete bb_original;
1146 bb_original = NULL;
1147 delete loop_copy;
1148 loop_copy = NULL;
1149 delete original_copy_bb_pool;
1150 original_copy_bb_pool = NULL;
1151 }
1152
1153 /* Return true iff we have had a call to initialize_original_copy_tables
1154 without a corresponding call to free_original_copy_tables. */
1155
1156 bool
1157 original_copy_tables_initialized_p (void)
1158 {
1159 return original_copy_bb_pool != NULL;
1160 }
1161
1162 /* Removes the value associated with OBJ from table TAB. */
1163
1164 static void
1165 copy_original_table_clear (hash_table<bb_copy_hasher> *tab, unsigned obj)
1166 {
1167 htab_bb_copy_original_entry **slot;
1168 struct htab_bb_copy_original_entry key, *elt;
1169
1170 if (!original_copy_bb_pool)
1171 return;
1172
1173 key.index1 = obj;
1174 slot = tab->find_slot (&key, NO_INSERT);
1175 if (!slot)
1176 return;
1177
1178 elt = *slot;
1179 tab->clear_slot (slot);
1180 original_copy_bb_pool->remove (elt);
1181 }
1182
1183 /* Sets the value associated with OBJ in table TAB to VAL.
1184 Do nothing when data structures are not initialized. */
1185
1186 static void
1187 copy_original_table_set (hash_table<bb_copy_hasher> *tab,
1188 unsigned obj, unsigned val)
1189 {
1190 struct htab_bb_copy_original_entry **slot;
1191 struct htab_bb_copy_original_entry key;
1192
1193 if (!original_copy_bb_pool)
1194 return;
1195
1196 key.index1 = obj;
1197 slot = tab->find_slot (&key, INSERT);
1198 if (!*slot)
1199 {
1200 *slot = original_copy_bb_pool->allocate ();
1201 (*slot)->index1 = obj;
1202 }
1203 (*slot)->index2 = val;
1204 }
1205
1206 /* Set original for basic block. Do nothing when data structures are not
1207 initialized so passes not needing this don't need to care. */
1208 void
1209 set_bb_original (basic_block bb, basic_block original)
1210 {
1211 copy_original_table_set (bb_original, bb->index, original->index);
1212 }
1213
1214 /* Get the original basic block. */
1215 basic_block
1216 get_bb_original (basic_block bb)
1217 {
1218 struct htab_bb_copy_original_entry *entry;
1219 struct htab_bb_copy_original_entry key;
1220
1221 gcc_assert (original_copy_bb_pool);
1222
1223 key.index1 = bb->index;
1224 entry = bb_original->find (&key);
1225 if (entry)
1226 return BASIC_BLOCK_FOR_FN (cfun, entry->index2);
1227 else
1228 return NULL;
1229 }
1230
1231 /* Set copy for basic block. Do nothing when data structures are not
1232 initialized so passes not needing this don't need to care. */
1233 void
1234 set_bb_copy (basic_block bb, basic_block copy)
1235 {
1236 copy_original_table_set (bb_copy, bb->index, copy->index);
1237 }
1238
1239 /* Get the copy of basic block. */
1240 basic_block
1241 get_bb_copy (basic_block bb)
1242 {
1243 struct htab_bb_copy_original_entry *entry;
1244 struct htab_bb_copy_original_entry key;
1245
1246 gcc_assert (original_copy_bb_pool);
1247
1248 key.index1 = bb->index;
1249 entry = bb_copy->find (&key);
1250 if (entry)
1251 return BASIC_BLOCK_FOR_FN (cfun, entry->index2);
1252 else
1253 return NULL;
1254 }
1255
1256 /* Set copy for LOOP to COPY. Do nothing when data structures are not
1257 initialized so passes not needing this don't need to care. */
1258
1259 void
1260 set_loop_copy (struct loop *loop, struct loop *copy)
1261 {
1262 if (!copy)
1263 copy_original_table_clear (loop_copy, loop->num);
1264 else
1265 copy_original_table_set (loop_copy, loop->num, copy->num);
1266 }
1267
1268 /* Get the copy of LOOP. */
1269
1270 struct loop *
1271 get_loop_copy (struct loop *loop)
1272 {
1273 struct htab_bb_copy_original_entry *entry;
1274 struct htab_bb_copy_original_entry key;
1275
1276 gcc_assert (original_copy_bb_pool);
1277
1278 key.index1 = loop->num;
1279 entry = loop_copy->find (&key);
1280 if (entry)
1281 return get_loop (cfun, entry->index2);
1282 else
1283 return NULL;
1284 }