Factor unrelated declarations out of tree.h.
[gcc.git] / gcc / cfgrtl.c
1 /* Control flow graph manipulation code for GNU compiler.
2 Copyright (C) 1987-2013 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 /* This file contains low level functions to manipulate the CFG and analyze it
21 that are aware of the RTL intermediate language.
22
23 Available functionality:
24 - Basic CFG/RTL manipulation API documented in cfghooks.h
25 - CFG-aware instruction chain manipulation
26 delete_insn, delete_insn_chain
27 - Edge splitting and committing to edges
28 insert_insn_on_edge, commit_edge_insertions
29 - CFG updating after insn simplification
30 purge_dead_edges, purge_all_dead_edges
31 - CFG fixing after coarse manipulation
32 fixup_abnormal_edges
33
34 Functions not supposed for generic use:
35 - Infrastructure to determine quickly basic block for insn
36 compute_bb_for_insn, update_bb_for_insn, set_block_for_insn,
37 - Edge redirection with updating and optimizing of insn chain
38 block_label, tidy_fallthru_edge, force_nonfallthru */
39 \f
40 #include "config.h"
41 #include "system.h"
42 #include "coretypes.h"
43 #include "tm.h"
44 #include "tree.h"
45 #include "hard-reg-set.h"
46 #include "basic-block.h"
47 #include "bb-reorder.h"
48 #include "regs.h"
49 #include "flags.h"
50 #include "function.h"
51 #include "except.h"
52 #include "rtl-error.h"
53 #include "tm_p.h"
54 #include "obstack.h"
55 #include "insn-attr.h"
56 #include "insn-config.h"
57 #include "expr.h"
58 #include "target.h"
59 #include "common/common-target.h"
60 #include "cfgloop.h"
61 #include "ggc.h"
62 #include "tree-pass.h"
63 #include "df.h"
64
65 /* Holds the interesting leading and trailing notes for the function.
66 Only applicable if the CFG is in cfglayout mode. */
67 static GTY(()) rtx cfg_layout_function_footer;
68 static GTY(()) rtx cfg_layout_function_header;
69
70 static rtx skip_insns_after_block (basic_block);
71 static void record_effective_endpoints (void);
72 static rtx label_for_bb (basic_block);
73 static void fixup_reorder_chain (void);
74
75 void verify_insn_chain (void);
76 static void fixup_fallthru_exit_predecessor (void);
77 static int can_delete_note_p (const_rtx);
78 static int can_delete_label_p (const_rtx);
79 static basic_block rtl_split_edge (edge);
80 static bool rtl_move_block_after (basic_block, basic_block);
81 static int rtl_verify_flow_info (void);
82 static basic_block cfg_layout_split_block (basic_block, void *);
83 static edge cfg_layout_redirect_edge_and_branch (edge, basic_block);
84 static basic_block cfg_layout_redirect_edge_and_branch_force (edge, basic_block);
85 static void cfg_layout_delete_block (basic_block);
86 static void rtl_delete_block (basic_block);
87 static basic_block rtl_redirect_edge_and_branch_force (edge, basic_block);
88 static edge rtl_redirect_edge_and_branch (edge, basic_block);
89 static basic_block rtl_split_block (basic_block, void *);
90 static void rtl_dump_bb (FILE *, basic_block, int, int);
91 static int rtl_verify_flow_info_1 (void);
92 static void rtl_make_forwarder_block (edge);
93 \f
94 /* Return true if NOTE is not one of the ones that must be kept paired,
95 so that we may simply delete it. */
96
97 static int
98 can_delete_note_p (const_rtx note)
99 {
100 switch (NOTE_KIND (note))
101 {
102 case NOTE_INSN_DELETED:
103 case NOTE_INSN_BASIC_BLOCK:
104 case NOTE_INSN_EPILOGUE_BEG:
105 return true;
106
107 default:
108 return false;
109 }
110 }
111
112 /* True if a given label can be deleted. */
113
114 static int
115 can_delete_label_p (const_rtx label)
116 {
117 return (!LABEL_PRESERVE_P (label)
118 /* User declared labels must be preserved. */
119 && LABEL_NAME (label) == 0
120 && !in_expr_list_p (forced_labels, label));
121 }
122
123 /* Delete INSN by patching it out. */
124
125 void
126 delete_insn (rtx insn)
127 {
128 rtx note;
129 bool really_delete = true;
130
131 if (LABEL_P (insn))
132 {
133 /* Some labels can't be directly removed from the INSN chain, as they
134 might be references via variables, constant pool etc.
135 Convert them to the special NOTE_INSN_DELETED_LABEL note. */
136 if (! can_delete_label_p (insn))
137 {
138 const char *name = LABEL_NAME (insn);
139 basic_block bb = BLOCK_FOR_INSN (insn);
140 rtx bb_note = NEXT_INSN (insn);
141
142 really_delete = false;
143 PUT_CODE (insn, NOTE);
144 NOTE_KIND (insn) = NOTE_INSN_DELETED_LABEL;
145 NOTE_DELETED_LABEL_NAME (insn) = name;
146
147 /* If the note following the label starts a basic block, and the
148 label is a member of the same basic block, interchange the two. */
149 if (bb_note != NULL_RTX
150 && NOTE_INSN_BASIC_BLOCK_P (bb_note)
151 && bb != NULL
152 && bb == BLOCK_FOR_INSN (bb_note))
153 {
154 reorder_insns_nobb (insn, insn, bb_note);
155 BB_HEAD (bb) = bb_note;
156 if (BB_END (bb) == bb_note)
157 BB_END (bb) = insn;
158 }
159 }
160
161 remove_node_from_expr_list (insn, &nonlocal_goto_handler_labels);
162 }
163
164 if (really_delete)
165 {
166 /* If this insn has already been deleted, something is very wrong. */
167 gcc_assert (!INSN_DELETED_P (insn));
168 if (INSN_P (insn))
169 df_insn_delete (insn);
170 remove_insn (insn);
171 INSN_DELETED_P (insn) = 1;
172 }
173
174 /* If deleting a jump, decrement the use count of the label. Deleting
175 the label itself should happen in the normal course of block merging. */
176 if (JUMP_P (insn))
177 {
178 if (JUMP_LABEL (insn)
179 && LABEL_P (JUMP_LABEL (insn)))
180 LABEL_NUSES (JUMP_LABEL (insn))--;
181
182 /* If there are more targets, remove them too. */
183 while ((note
184 = find_reg_note (insn, REG_LABEL_TARGET, NULL_RTX)) != NULL_RTX
185 && LABEL_P (XEXP (note, 0)))
186 {
187 LABEL_NUSES (XEXP (note, 0))--;
188 remove_note (insn, note);
189 }
190 }
191
192 /* Also if deleting any insn that references a label as an operand. */
193 while ((note = find_reg_note (insn, REG_LABEL_OPERAND, NULL_RTX)) != NULL_RTX
194 && LABEL_P (XEXP (note, 0)))
195 {
196 LABEL_NUSES (XEXP (note, 0))--;
197 remove_note (insn, note);
198 }
199
200 if (JUMP_TABLE_DATA_P (insn))
201 {
202 rtx pat = PATTERN (insn);
203 int diff_vec_p = GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC;
204 int len = XVECLEN (pat, diff_vec_p);
205 int i;
206
207 for (i = 0; i < len; i++)
208 {
209 rtx label = XEXP (XVECEXP (pat, diff_vec_p, i), 0);
210
211 /* When deleting code in bulk (e.g. removing many unreachable
212 blocks) we can delete a label that's a target of the vector
213 before deleting the vector itself. */
214 if (!NOTE_P (label))
215 LABEL_NUSES (label)--;
216 }
217 }
218 }
219
220 /* Like delete_insn but also purge dead edges from BB. */
221
222 void
223 delete_insn_and_edges (rtx insn)
224 {
225 bool purge = false;
226
227 if (INSN_P (insn)
228 && BLOCK_FOR_INSN (insn)
229 && BB_END (BLOCK_FOR_INSN (insn)) == insn)
230 purge = true;
231 delete_insn (insn);
232 if (purge)
233 purge_dead_edges (BLOCK_FOR_INSN (insn));
234 }
235
236 /* Unlink a chain of insns between START and FINISH, leaving notes
237 that must be paired. If CLEAR_BB is true, we set bb field for
238 insns that cannot be removed to NULL. */
239
240 void
241 delete_insn_chain (rtx start, rtx finish, bool clear_bb)
242 {
243 rtx prev, current;
244
245 /* Unchain the insns one by one. It would be quicker to delete all of these
246 with a single unchaining, rather than one at a time, but we need to keep
247 the NOTE's. */
248 current = finish;
249 while (1)
250 {
251 prev = PREV_INSN (current);
252 if (NOTE_P (current) && !can_delete_note_p (current))
253 ;
254 else
255 delete_insn (current);
256
257 if (clear_bb && !INSN_DELETED_P (current))
258 set_block_for_insn (current, NULL);
259
260 if (current == start)
261 break;
262 current = prev;
263 }
264 }
265 \f
266 /* Create a new basic block consisting of the instructions between HEAD and END
267 inclusive. This function is designed to allow fast BB construction - reuses
268 the note and basic block struct in BB_NOTE, if any and do not grow
269 BASIC_BLOCK chain and should be used directly only by CFG construction code.
270 END can be NULL in to create new empty basic block before HEAD. Both END
271 and HEAD can be NULL to create basic block at the end of INSN chain.
272 AFTER is the basic block we should be put after. */
273
274 basic_block
275 create_basic_block_structure (rtx head, rtx end, rtx bb_note, basic_block after)
276 {
277 basic_block bb;
278
279 if (bb_note
280 && (bb = NOTE_BASIC_BLOCK (bb_note)) != NULL
281 && bb->aux == NULL)
282 {
283 /* If we found an existing note, thread it back onto the chain. */
284
285 rtx after;
286
287 if (LABEL_P (head))
288 after = head;
289 else
290 {
291 after = PREV_INSN (head);
292 head = bb_note;
293 }
294
295 if (after != bb_note && NEXT_INSN (after) != bb_note)
296 reorder_insns_nobb (bb_note, bb_note, after);
297 }
298 else
299 {
300 /* Otherwise we must create a note and a basic block structure. */
301
302 bb = alloc_block ();
303
304 init_rtl_bb_info (bb);
305 if (!head && !end)
306 head = end = bb_note
307 = emit_note_after (NOTE_INSN_BASIC_BLOCK, get_last_insn ());
308 else if (LABEL_P (head) && end)
309 {
310 bb_note = emit_note_after (NOTE_INSN_BASIC_BLOCK, head);
311 if (head == end)
312 end = bb_note;
313 }
314 else
315 {
316 bb_note = emit_note_before (NOTE_INSN_BASIC_BLOCK, head);
317 head = bb_note;
318 if (!end)
319 end = head;
320 }
321
322 NOTE_BASIC_BLOCK (bb_note) = bb;
323 }
324
325 /* Always include the bb note in the block. */
326 if (NEXT_INSN (end) == bb_note)
327 end = bb_note;
328
329 BB_HEAD (bb) = head;
330 BB_END (bb) = end;
331 bb->index = last_basic_block++;
332 bb->flags = BB_NEW | BB_RTL;
333 link_block (bb, after);
334 SET_BASIC_BLOCK (bb->index, bb);
335 df_bb_refs_record (bb->index, false);
336 update_bb_for_insn (bb);
337 BB_SET_PARTITION (bb, BB_UNPARTITIONED);
338
339 /* Tag the block so that we know it has been used when considering
340 other basic block notes. */
341 bb->aux = bb;
342
343 return bb;
344 }
345
346 /* Create new basic block consisting of instructions in between HEAD and END
347 and place it to the BB chain after block AFTER. END can be NULL to
348 create a new empty basic block before HEAD. Both END and HEAD can be
349 NULL to create basic block at the end of INSN chain. */
350
351 static basic_block
352 rtl_create_basic_block (void *headp, void *endp, basic_block after)
353 {
354 rtx head = (rtx) headp, end = (rtx) endp;
355 basic_block bb;
356
357 /* Grow the basic block array if needed. */
358 if ((size_t) last_basic_block >= basic_block_info->length ())
359 {
360 size_t new_size = last_basic_block + (last_basic_block + 3) / 4;
361 vec_safe_grow_cleared (basic_block_info, new_size);
362 }
363
364 n_basic_blocks_for_fn (cfun)++;
365
366 bb = create_basic_block_structure (head, end, NULL, after);
367 bb->aux = NULL;
368 return bb;
369 }
370
371 static basic_block
372 cfg_layout_create_basic_block (void *head, void *end, basic_block after)
373 {
374 basic_block newbb = rtl_create_basic_block (head, end, after);
375
376 return newbb;
377 }
378 \f
379 /* Delete the insns in a (non-live) block. We physically delete every
380 non-deleted-note insn, and update the flow graph appropriately.
381
382 Return nonzero if we deleted an exception handler. */
383
384 /* ??? Preserving all such notes strikes me as wrong. It would be nice
385 to post-process the stream to remove empty blocks, loops, ranges, etc. */
386
387 static void
388 rtl_delete_block (basic_block b)
389 {
390 rtx insn, end;
391
392 /* If the head of this block is a CODE_LABEL, then it might be the
393 label for an exception handler which can't be reached. We need
394 to remove the label from the exception_handler_label list. */
395 insn = BB_HEAD (b);
396
397 end = get_last_bb_insn (b);
398
399 /* Selectively delete the entire chain. */
400 BB_HEAD (b) = NULL;
401 delete_insn_chain (insn, end, true);
402
403
404 if (dump_file)
405 fprintf (dump_file, "deleting block %d\n", b->index);
406 df_bb_delete (b->index);
407 }
408 \f
409 /* Records the basic block struct in BLOCK_FOR_INSN for every insn. */
410
411 void
412 compute_bb_for_insn (void)
413 {
414 basic_block bb;
415
416 FOR_EACH_BB (bb)
417 {
418 rtx end = BB_END (bb);
419 rtx insn;
420
421 for (insn = BB_HEAD (bb); ; insn = NEXT_INSN (insn))
422 {
423 BLOCK_FOR_INSN (insn) = bb;
424 if (insn == end)
425 break;
426 }
427 }
428 }
429
430 /* Release the basic_block_for_insn array. */
431
432 unsigned int
433 free_bb_for_insn (void)
434 {
435 rtx insn;
436 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
437 if (!BARRIER_P (insn))
438 BLOCK_FOR_INSN (insn) = NULL;
439 return 0;
440 }
441
442 static unsigned int
443 rest_of_pass_free_cfg (void)
444 {
445 #ifdef DELAY_SLOTS
446 /* The resource.c machinery uses DF but the CFG isn't guaranteed to be
447 valid at that point so it would be too late to call df_analyze. */
448 if (optimize > 0 && flag_delayed_branch)
449 {
450 df_note_add_problem ();
451 df_analyze ();
452 }
453 #endif
454
455 if (crtl->has_bb_partition)
456 insert_section_boundary_note ();
457
458 free_bb_for_insn ();
459 return 0;
460 }
461
462 namespace {
463
464 const pass_data pass_data_free_cfg =
465 {
466 RTL_PASS, /* type */
467 "*free_cfg", /* name */
468 OPTGROUP_NONE, /* optinfo_flags */
469 false, /* has_gate */
470 true, /* has_execute */
471 TV_NONE, /* tv_id */
472 0, /* properties_required */
473 0, /* properties_provided */
474 PROP_cfg, /* properties_destroyed */
475 0, /* todo_flags_start */
476 0, /* todo_flags_finish */
477 };
478
479 class pass_free_cfg : public rtl_opt_pass
480 {
481 public:
482 pass_free_cfg (gcc::context *ctxt)
483 : rtl_opt_pass (pass_data_free_cfg, ctxt)
484 {}
485
486 /* opt_pass methods: */
487 unsigned int execute () { return rest_of_pass_free_cfg (); }
488
489 }; // class pass_free_cfg
490
491 } // anon namespace
492
493 rtl_opt_pass *
494 make_pass_free_cfg (gcc::context *ctxt)
495 {
496 return new pass_free_cfg (ctxt);
497 }
498
499 /* Return RTX to emit after when we want to emit code on the entry of function. */
500 rtx
501 entry_of_function (void)
502 {
503 return (n_basic_blocks_for_fn (cfun) > NUM_FIXED_BLOCKS ?
504 BB_HEAD (ENTRY_BLOCK_PTR->next_bb) : get_insns ());
505 }
506
507 /* Emit INSN at the entry point of the function, ensuring that it is only
508 executed once per function. */
509 void
510 emit_insn_at_entry (rtx insn)
511 {
512 edge_iterator ei = ei_start (ENTRY_BLOCK_PTR->succs);
513 edge e = ei_safe_edge (ei);
514 gcc_assert (e->flags & EDGE_FALLTHRU);
515
516 insert_insn_on_edge (insn, e);
517 commit_edge_insertions ();
518 }
519
520 /* Update BLOCK_FOR_INSN of insns between BEGIN and END
521 (or BARRIER if found) and notify df of the bb change.
522 The insn chain range is inclusive
523 (i.e. both BEGIN and END will be updated. */
524
525 static void
526 update_bb_for_insn_chain (rtx begin, rtx end, basic_block bb)
527 {
528 rtx insn;
529
530 end = NEXT_INSN (end);
531 for (insn = begin; insn != end; insn = NEXT_INSN (insn))
532 if (!BARRIER_P (insn))
533 df_insn_change_bb (insn, bb);
534 }
535
536 /* Update BLOCK_FOR_INSN of insns in BB to BB,
537 and notify df of the change. */
538
539 void
540 update_bb_for_insn (basic_block bb)
541 {
542 update_bb_for_insn_chain (BB_HEAD (bb), BB_END (bb), bb);
543 }
544
545 \f
546 /* Like active_insn_p, except keep the return value clobber around
547 even after reload. */
548
549 static bool
550 flow_active_insn_p (const_rtx insn)
551 {
552 if (active_insn_p (insn))
553 return true;
554
555 /* A clobber of the function return value exists for buggy
556 programs that fail to return a value. Its effect is to
557 keep the return value from being live across the entire
558 function. If we allow it to be skipped, we introduce the
559 possibility for register lifetime confusion. */
560 if (GET_CODE (PATTERN (insn)) == CLOBBER
561 && REG_P (XEXP (PATTERN (insn), 0))
562 && REG_FUNCTION_VALUE_P (XEXP (PATTERN (insn), 0)))
563 return true;
564
565 return false;
566 }
567
568 /* Return true if the block has no effect and only forwards control flow to
569 its single destination. */
570
571 bool
572 contains_no_active_insn_p (const_basic_block bb)
573 {
574 rtx insn;
575
576 if (bb == EXIT_BLOCK_PTR || bb == ENTRY_BLOCK_PTR
577 || !single_succ_p (bb))
578 return false;
579
580 for (insn = BB_HEAD (bb); insn != BB_END (bb); insn = NEXT_INSN (insn))
581 if (INSN_P (insn) && flow_active_insn_p (insn))
582 return false;
583
584 return (!INSN_P (insn)
585 || (JUMP_P (insn) && simplejump_p (insn))
586 || !flow_active_insn_p (insn));
587 }
588
589 /* Likewise, but protect loop latches, headers and preheaders. */
590 /* FIXME: Make this a cfg hook. */
591
592 bool
593 forwarder_block_p (const_basic_block bb)
594 {
595 if (!contains_no_active_insn_p (bb))
596 return false;
597
598 /* Protect loop latches, headers and preheaders. */
599 if (current_loops)
600 {
601 basic_block dest;
602 if (bb->loop_father->header == bb)
603 return false;
604 dest = EDGE_SUCC (bb, 0)->dest;
605 if (dest->loop_father->header == dest)
606 return false;
607 }
608
609 return true;
610 }
611
612 /* Return nonzero if we can reach target from src by falling through. */
613 /* FIXME: Make this a cfg hook, the result is only valid in cfgrtl mode. */
614
615 bool
616 can_fallthru (basic_block src, basic_block target)
617 {
618 rtx insn = BB_END (src);
619 rtx insn2;
620 edge e;
621 edge_iterator ei;
622
623 if (target == EXIT_BLOCK_PTR)
624 return true;
625 if (src->next_bb != target)
626 return false;
627
628 /* ??? Later we may add code to move jump tables offline. */
629 if (tablejump_p (insn, NULL, NULL))
630 return false;
631
632 FOR_EACH_EDGE (e, ei, src->succs)
633 if (e->dest == EXIT_BLOCK_PTR
634 && e->flags & EDGE_FALLTHRU)
635 return false;
636
637 insn2 = BB_HEAD (target);
638 if (!active_insn_p (insn2))
639 insn2 = next_active_insn (insn2);
640
641 return next_active_insn (insn) == insn2;
642 }
643
644 /* Return nonzero if we could reach target from src by falling through,
645 if the target was made adjacent. If we already have a fall-through
646 edge to the exit block, we can't do that. */
647 static bool
648 could_fall_through (basic_block src, basic_block target)
649 {
650 edge e;
651 edge_iterator ei;
652
653 if (target == EXIT_BLOCK_PTR)
654 return true;
655 FOR_EACH_EDGE (e, ei, src->succs)
656 if (e->dest == EXIT_BLOCK_PTR
657 && e->flags & EDGE_FALLTHRU)
658 return 0;
659 return true;
660 }
661 \f
662 /* Return the NOTE_INSN_BASIC_BLOCK of BB. */
663 rtx
664 bb_note (basic_block bb)
665 {
666 rtx note;
667
668 note = BB_HEAD (bb);
669 if (LABEL_P (note))
670 note = NEXT_INSN (note);
671
672 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
673 return note;
674 }
675
676 /* Return the INSN immediately following the NOTE_INSN_BASIC_BLOCK
677 note associated with the BLOCK. */
678
679 static rtx
680 first_insn_after_basic_block_note (basic_block block)
681 {
682 rtx insn;
683
684 /* Get the first instruction in the block. */
685 insn = BB_HEAD (block);
686
687 if (insn == NULL_RTX)
688 return NULL_RTX;
689 if (LABEL_P (insn))
690 insn = NEXT_INSN (insn);
691 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
692
693 return NEXT_INSN (insn);
694 }
695
696 /* Creates a new basic block just after basic block B by splitting
697 everything after specified instruction I. */
698
699 static basic_block
700 rtl_split_block (basic_block bb, void *insnp)
701 {
702 basic_block new_bb;
703 rtx insn = (rtx) insnp;
704 edge e;
705 edge_iterator ei;
706
707 if (!insn)
708 {
709 insn = first_insn_after_basic_block_note (bb);
710
711 if (insn)
712 {
713 rtx next = insn;
714
715 insn = PREV_INSN (insn);
716
717 /* If the block contains only debug insns, insn would have
718 been NULL in a non-debug compilation, and then we'd end
719 up emitting a DELETED note. For -fcompare-debug
720 stability, emit the note too. */
721 if (insn != BB_END (bb)
722 && DEBUG_INSN_P (next)
723 && DEBUG_INSN_P (BB_END (bb)))
724 {
725 while (next != BB_END (bb) && DEBUG_INSN_P (next))
726 next = NEXT_INSN (next);
727
728 if (next == BB_END (bb))
729 emit_note_after (NOTE_INSN_DELETED, next);
730 }
731 }
732 else
733 insn = get_last_insn ();
734 }
735
736 /* We probably should check type of the insn so that we do not create
737 inconsistent cfg. It is checked in verify_flow_info anyway, so do not
738 bother. */
739 if (insn == BB_END (bb))
740 emit_note_after (NOTE_INSN_DELETED, insn);
741
742 /* Create the new basic block. */
743 new_bb = create_basic_block (NEXT_INSN (insn), BB_END (bb), bb);
744 BB_COPY_PARTITION (new_bb, bb);
745 BB_END (bb) = insn;
746
747 /* Redirect the outgoing edges. */
748 new_bb->succs = bb->succs;
749 bb->succs = NULL;
750 FOR_EACH_EDGE (e, ei, new_bb->succs)
751 e->src = new_bb;
752
753 /* The new block starts off being dirty. */
754 df_set_bb_dirty (bb);
755 return new_bb;
756 }
757
758 /* Return true if the single edge between blocks A and B is the only place
759 in RTL which holds some unique locus. */
760
761 static bool
762 unique_locus_on_edge_between_p (basic_block a, basic_block b)
763 {
764 const location_t goto_locus = EDGE_SUCC (a, 0)->goto_locus;
765 rtx insn, end;
766
767 if (LOCATION_LOCUS (goto_locus) == UNKNOWN_LOCATION)
768 return false;
769
770 /* First scan block A backward. */
771 insn = BB_END (a);
772 end = PREV_INSN (BB_HEAD (a));
773 while (insn != end && (!NONDEBUG_INSN_P (insn) || !INSN_HAS_LOCATION (insn)))
774 insn = PREV_INSN (insn);
775
776 if (insn != end && INSN_LOCATION (insn) == goto_locus)
777 return false;
778
779 /* Then scan block B forward. */
780 insn = BB_HEAD (b);
781 if (insn)
782 {
783 end = NEXT_INSN (BB_END (b));
784 while (insn != end && !NONDEBUG_INSN_P (insn))
785 insn = NEXT_INSN (insn);
786
787 if (insn != end && INSN_HAS_LOCATION (insn)
788 && INSN_LOCATION (insn) == goto_locus)
789 return false;
790 }
791
792 return true;
793 }
794
795 /* If the single edge between blocks A and B is the only place in RTL which
796 holds some unique locus, emit a nop with that locus between the blocks. */
797
798 static void
799 emit_nop_for_unique_locus_between (basic_block a, basic_block b)
800 {
801 if (!unique_locus_on_edge_between_p (a, b))
802 return;
803
804 BB_END (a) = emit_insn_after_noloc (gen_nop (), BB_END (a), a);
805 INSN_LOCATION (BB_END (a)) = EDGE_SUCC (a, 0)->goto_locus;
806 }
807
808 /* Blocks A and B are to be merged into a single block A. The insns
809 are already contiguous. */
810
811 static void
812 rtl_merge_blocks (basic_block a, basic_block b)
813 {
814 rtx b_head = BB_HEAD (b), b_end = BB_END (b), a_end = BB_END (a);
815 rtx del_first = NULL_RTX, del_last = NULL_RTX;
816 rtx b_debug_start = b_end, b_debug_end = b_end;
817 bool forwarder_p = (b->flags & BB_FORWARDER_BLOCK) != 0;
818 int b_empty = 0;
819
820 if (dump_file)
821 fprintf (dump_file, "Merging block %d into block %d...\n", b->index,
822 a->index);
823
824 while (DEBUG_INSN_P (b_end))
825 b_end = PREV_INSN (b_debug_start = b_end);
826
827 /* If there was a CODE_LABEL beginning B, delete it. */
828 if (LABEL_P (b_head))
829 {
830 /* Detect basic blocks with nothing but a label. This can happen
831 in particular at the end of a function. */
832 if (b_head == b_end)
833 b_empty = 1;
834
835 del_first = del_last = b_head;
836 b_head = NEXT_INSN (b_head);
837 }
838
839 /* Delete the basic block note and handle blocks containing just that
840 note. */
841 if (NOTE_INSN_BASIC_BLOCK_P (b_head))
842 {
843 if (b_head == b_end)
844 b_empty = 1;
845 if (! del_last)
846 del_first = b_head;
847
848 del_last = b_head;
849 b_head = NEXT_INSN (b_head);
850 }
851
852 /* If there was a jump out of A, delete it. */
853 if (JUMP_P (a_end))
854 {
855 rtx prev;
856
857 for (prev = PREV_INSN (a_end); ; prev = PREV_INSN (prev))
858 if (!NOTE_P (prev)
859 || NOTE_INSN_BASIC_BLOCK_P (prev)
860 || prev == BB_HEAD (a))
861 break;
862
863 del_first = a_end;
864
865 #ifdef HAVE_cc0
866 /* If this was a conditional jump, we need to also delete
867 the insn that set cc0. */
868 if (only_sets_cc0_p (prev))
869 {
870 rtx tmp = prev;
871
872 prev = prev_nonnote_insn (prev);
873 if (!prev)
874 prev = BB_HEAD (a);
875 del_first = tmp;
876 }
877 #endif
878
879 a_end = PREV_INSN (del_first);
880 }
881 else if (BARRIER_P (NEXT_INSN (a_end)))
882 del_first = NEXT_INSN (a_end);
883
884 /* Delete everything marked above as well as crap that might be
885 hanging out between the two blocks. */
886 BB_END (a) = a_end;
887 BB_HEAD (b) = b_empty ? NULL_RTX : b_head;
888 delete_insn_chain (del_first, del_last, true);
889
890 /* When not optimizing CFG and the edge is the only place in RTL which holds
891 some unique locus, emit a nop with that locus in between. */
892 if (!optimize)
893 {
894 emit_nop_for_unique_locus_between (a, b);
895 a_end = BB_END (a);
896 }
897
898 /* Reassociate the insns of B with A. */
899 if (!b_empty)
900 {
901 update_bb_for_insn_chain (a_end, b_debug_end, a);
902
903 BB_END (a) = b_debug_end;
904 BB_HEAD (b) = NULL_RTX;
905 }
906 else if (b_end != b_debug_end)
907 {
908 /* Move any deleted labels and other notes between the end of A
909 and the debug insns that make up B after the debug insns,
910 bringing the debug insns into A while keeping the notes after
911 the end of A. */
912 if (NEXT_INSN (a_end) != b_debug_start)
913 reorder_insns_nobb (NEXT_INSN (a_end), PREV_INSN (b_debug_start),
914 b_debug_end);
915 update_bb_for_insn_chain (b_debug_start, b_debug_end, a);
916 BB_END (a) = b_debug_end;
917 }
918
919 df_bb_delete (b->index);
920
921 /* If B was a forwarder block, propagate the locus on the edge. */
922 if (forwarder_p
923 && LOCATION_LOCUS (EDGE_SUCC (b, 0)->goto_locus) == UNKNOWN_LOCATION)
924 EDGE_SUCC (b, 0)->goto_locus = EDGE_SUCC (a, 0)->goto_locus;
925
926 if (dump_file)
927 fprintf (dump_file, "Merged blocks %d and %d.\n", a->index, b->index);
928 }
929
930
931 /* Return true when block A and B can be merged. */
932
933 static bool
934 rtl_can_merge_blocks (basic_block a, basic_block b)
935 {
936 /* If we are partitioning hot/cold basic blocks, we don't want to
937 mess up unconditional or indirect jumps that cross between hot
938 and cold sections.
939
940 Basic block partitioning may result in some jumps that appear to
941 be optimizable (or blocks that appear to be mergeable), but which really
942 must be left untouched (they are required to make it safely across
943 partition boundaries). See the comments at the top of
944 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
945
946 if (BB_PARTITION (a) != BB_PARTITION (b))
947 return false;
948
949 /* Protect the loop latches. */
950 if (current_loops && b->loop_father->latch == b)
951 return false;
952
953 /* There must be exactly one edge in between the blocks. */
954 return (single_succ_p (a)
955 && single_succ (a) == b
956 && single_pred_p (b)
957 && a != b
958 /* Must be simple edge. */
959 && !(single_succ_edge (a)->flags & EDGE_COMPLEX)
960 && a->next_bb == b
961 && a != ENTRY_BLOCK_PTR && b != EXIT_BLOCK_PTR
962 /* If the jump insn has side effects,
963 we can't kill the edge. */
964 && (!JUMP_P (BB_END (a))
965 || (reload_completed
966 ? simplejump_p (BB_END (a)) : onlyjump_p (BB_END (a)))));
967 }
968 \f
969 /* Return the label in the head of basic block BLOCK. Create one if it doesn't
970 exist. */
971
972 rtx
973 block_label (basic_block block)
974 {
975 if (block == EXIT_BLOCK_PTR)
976 return NULL_RTX;
977
978 if (!LABEL_P (BB_HEAD (block)))
979 {
980 BB_HEAD (block) = emit_label_before (gen_label_rtx (), BB_HEAD (block));
981 }
982
983 return BB_HEAD (block);
984 }
985
986 /* Attempt to perform edge redirection by replacing possibly complex jump
987 instruction by unconditional jump or removing jump completely. This can
988 apply only if all edges now point to the same block. The parameters and
989 return values are equivalent to redirect_edge_and_branch. */
990
991 edge
992 try_redirect_by_replacing_jump (edge e, basic_block target, bool in_cfglayout)
993 {
994 basic_block src = e->src;
995 rtx insn = BB_END (src), kill_from;
996 rtx set;
997 int fallthru = 0;
998
999 /* If we are partitioning hot/cold basic blocks, we don't want to
1000 mess up unconditional or indirect jumps that cross between hot
1001 and cold sections.
1002
1003 Basic block partitioning may result in some jumps that appear to
1004 be optimizable (or blocks that appear to be mergeable), but which really
1005 must be left untouched (they are required to make it safely across
1006 partition boundaries). See the comments at the top of
1007 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
1008
1009 if (BB_PARTITION (src) != BB_PARTITION (target))
1010 return NULL;
1011
1012 /* We can replace or remove a complex jump only when we have exactly
1013 two edges. Also, if we have exactly one outgoing edge, we can
1014 redirect that. */
1015 if (EDGE_COUNT (src->succs) >= 3
1016 /* Verify that all targets will be TARGET. Specifically, the
1017 edge that is not E must also go to TARGET. */
1018 || (EDGE_COUNT (src->succs) == 2
1019 && EDGE_SUCC (src, EDGE_SUCC (src, 0) == e)->dest != target))
1020 return NULL;
1021
1022 if (!onlyjump_p (insn))
1023 return NULL;
1024 if ((!optimize || reload_completed) && tablejump_p (insn, NULL, NULL))
1025 return NULL;
1026
1027 /* Avoid removing branch with side effects. */
1028 set = single_set (insn);
1029 if (!set || side_effects_p (set))
1030 return NULL;
1031
1032 /* In case we zap a conditional jump, we'll need to kill
1033 the cc0 setter too. */
1034 kill_from = insn;
1035 #ifdef HAVE_cc0
1036 if (reg_mentioned_p (cc0_rtx, PATTERN (insn))
1037 && only_sets_cc0_p (PREV_INSN (insn)))
1038 kill_from = PREV_INSN (insn);
1039 #endif
1040
1041 /* See if we can create the fallthru edge. */
1042 if (in_cfglayout || can_fallthru (src, target))
1043 {
1044 if (dump_file)
1045 fprintf (dump_file, "Removing jump %i.\n", INSN_UID (insn));
1046 fallthru = 1;
1047
1048 /* Selectively unlink whole insn chain. */
1049 if (in_cfglayout)
1050 {
1051 rtx insn = BB_FOOTER (src);
1052
1053 delete_insn_chain (kill_from, BB_END (src), false);
1054
1055 /* Remove barriers but keep jumptables. */
1056 while (insn)
1057 {
1058 if (BARRIER_P (insn))
1059 {
1060 if (PREV_INSN (insn))
1061 NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
1062 else
1063 BB_FOOTER (src) = NEXT_INSN (insn);
1064 if (NEXT_INSN (insn))
1065 PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
1066 }
1067 if (LABEL_P (insn))
1068 break;
1069 insn = NEXT_INSN (insn);
1070 }
1071 }
1072 else
1073 delete_insn_chain (kill_from, PREV_INSN (BB_HEAD (target)),
1074 false);
1075 }
1076
1077 /* If this already is simplejump, redirect it. */
1078 else if (simplejump_p (insn))
1079 {
1080 if (e->dest == target)
1081 return NULL;
1082 if (dump_file)
1083 fprintf (dump_file, "Redirecting jump %i from %i to %i.\n",
1084 INSN_UID (insn), e->dest->index, target->index);
1085 if (!redirect_jump (insn, block_label (target), 0))
1086 {
1087 gcc_assert (target == EXIT_BLOCK_PTR);
1088 return NULL;
1089 }
1090 }
1091
1092 /* Cannot do anything for target exit block. */
1093 else if (target == EXIT_BLOCK_PTR)
1094 return NULL;
1095
1096 /* Or replace possibly complicated jump insn by simple jump insn. */
1097 else
1098 {
1099 rtx target_label = block_label (target);
1100 rtx barrier, label, table;
1101
1102 emit_jump_insn_after_noloc (gen_jump (target_label), insn);
1103 JUMP_LABEL (BB_END (src)) = target_label;
1104 LABEL_NUSES (target_label)++;
1105 if (dump_file)
1106 fprintf (dump_file, "Replacing insn %i by jump %i\n",
1107 INSN_UID (insn), INSN_UID (BB_END (src)));
1108
1109
1110 delete_insn_chain (kill_from, insn, false);
1111
1112 /* Recognize a tablejump that we are converting to a
1113 simple jump and remove its associated CODE_LABEL
1114 and ADDR_VEC or ADDR_DIFF_VEC. */
1115 if (tablejump_p (insn, &label, &table))
1116 delete_insn_chain (label, table, false);
1117
1118 barrier = next_nonnote_insn (BB_END (src));
1119 if (!barrier || !BARRIER_P (barrier))
1120 emit_barrier_after (BB_END (src));
1121 else
1122 {
1123 if (barrier != NEXT_INSN (BB_END (src)))
1124 {
1125 /* Move the jump before barrier so that the notes
1126 which originally were or were created before jump table are
1127 inside the basic block. */
1128 rtx new_insn = BB_END (src);
1129
1130 update_bb_for_insn_chain (NEXT_INSN (BB_END (src)),
1131 PREV_INSN (barrier), src);
1132
1133 NEXT_INSN (PREV_INSN (new_insn)) = NEXT_INSN (new_insn);
1134 PREV_INSN (NEXT_INSN (new_insn)) = PREV_INSN (new_insn);
1135
1136 NEXT_INSN (new_insn) = barrier;
1137 NEXT_INSN (PREV_INSN (barrier)) = new_insn;
1138
1139 PREV_INSN (new_insn) = PREV_INSN (barrier);
1140 PREV_INSN (barrier) = new_insn;
1141 }
1142 }
1143 }
1144
1145 /* Keep only one edge out and set proper flags. */
1146 if (!single_succ_p (src))
1147 remove_edge (e);
1148 gcc_assert (single_succ_p (src));
1149
1150 e = single_succ_edge (src);
1151 if (fallthru)
1152 e->flags = EDGE_FALLTHRU;
1153 else
1154 e->flags = 0;
1155
1156 e->probability = REG_BR_PROB_BASE;
1157 e->count = src->count;
1158
1159 if (e->dest != target)
1160 redirect_edge_succ (e, target);
1161 return e;
1162 }
1163
1164 /* Subroutine of redirect_branch_edge that tries to patch the jump
1165 instruction INSN so that it reaches block NEW. Do this
1166 only when it originally reached block OLD. Return true if this
1167 worked or the original target wasn't OLD, return false if redirection
1168 doesn't work. */
1169
1170 static bool
1171 patch_jump_insn (rtx insn, rtx old_label, basic_block new_bb)
1172 {
1173 rtx tmp;
1174 /* Recognize a tablejump and adjust all matching cases. */
1175 if (tablejump_p (insn, NULL, &tmp))
1176 {
1177 rtvec vec;
1178 int j;
1179 rtx new_label = block_label (new_bb);
1180
1181 if (new_bb == EXIT_BLOCK_PTR)
1182 return false;
1183 if (GET_CODE (PATTERN (tmp)) == ADDR_VEC)
1184 vec = XVEC (PATTERN (tmp), 0);
1185 else
1186 vec = XVEC (PATTERN (tmp), 1);
1187
1188 for (j = GET_NUM_ELEM (vec) - 1; j >= 0; --j)
1189 if (XEXP (RTVEC_ELT (vec, j), 0) == old_label)
1190 {
1191 RTVEC_ELT (vec, j) = gen_rtx_LABEL_REF (Pmode, new_label);
1192 --LABEL_NUSES (old_label);
1193 ++LABEL_NUSES (new_label);
1194 }
1195
1196 /* Handle casesi dispatch insns. */
1197 if ((tmp = single_set (insn)) != NULL
1198 && SET_DEST (tmp) == pc_rtx
1199 && GET_CODE (SET_SRC (tmp)) == IF_THEN_ELSE
1200 && GET_CODE (XEXP (SET_SRC (tmp), 2)) == LABEL_REF
1201 && XEXP (XEXP (SET_SRC (tmp), 2), 0) == old_label)
1202 {
1203 XEXP (SET_SRC (tmp), 2) = gen_rtx_LABEL_REF (Pmode,
1204 new_label);
1205 --LABEL_NUSES (old_label);
1206 ++LABEL_NUSES (new_label);
1207 }
1208 }
1209 else if ((tmp = extract_asm_operands (PATTERN (insn))) != NULL)
1210 {
1211 int i, n = ASM_OPERANDS_LABEL_LENGTH (tmp);
1212 rtx new_label, note;
1213
1214 if (new_bb == EXIT_BLOCK_PTR)
1215 return false;
1216 new_label = block_label (new_bb);
1217
1218 for (i = 0; i < n; ++i)
1219 {
1220 rtx old_ref = ASM_OPERANDS_LABEL (tmp, i);
1221 gcc_assert (GET_CODE (old_ref) == LABEL_REF);
1222 if (XEXP (old_ref, 0) == old_label)
1223 {
1224 ASM_OPERANDS_LABEL (tmp, i)
1225 = gen_rtx_LABEL_REF (Pmode, new_label);
1226 --LABEL_NUSES (old_label);
1227 ++LABEL_NUSES (new_label);
1228 }
1229 }
1230
1231 if (JUMP_LABEL (insn) == old_label)
1232 {
1233 JUMP_LABEL (insn) = new_label;
1234 note = find_reg_note (insn, REG_LABEL_TARGET, new_label);
1235 if (note)
1236 remove_note (insn, note);
1237 }
1238 else
1239 {
1240 note = find_reg_note (insn, REG_LABEL_TARGET, old_label);
1241 if (note)
1242 remove_note (insn, note);
1243 if (JUMP_LABEL (insn) != new_label
1244 && !find_reg_note (insn, REG_LABEL_TARGET, new_label))
1245 add_reg_note (insn, REG_LABEL_TARGET, new_label);
1246 }
1247 while ((note = find_reg_note (insn, REG_LABEL_OPERAND, old_label))
1248 != NULL_RTX)
1249 XEXP (note, 0) = new_label;
1250 }
1251 else
1252 {
1253 /* ?? We may play the games with moving the named labels from
1254 one basic block to the other in case only one computed_jump is
1255 available. */
1256 if (computed_jump_p (insn)
1257 /* A return instruction can't be redirected. */
1258 || returnjump_p (insn))
1259 return false;
1260
1261 if (!currently_expanding_to_rtl || JUMP_LABEL (insn) == old_label)
1262 {
1263 /* If the insn doesn't go where we think, we're confused. */
1264 gcc_assert (JUMP_LABEL (insn) == old_label);
1265
1266 /* If the substitution doesn't succeed, die. This can happen
1267 if the back end emitted unrecognizable instructions or if
1268 target is exit block on some arches. */
1269 if (!redirect_jump (insn, block_label (new_bb), 0))
1270 {
1271 gcc_assert (new_bb == EXIT_BLOCK_PTR);
1272 return false;
1273 }
1274 }
1275 }
1276 return true;
1277 }
1278
1279
1280 /* Redirect edge representing branch of (un)conditional jump or tablejump,
1281 NULL on failure */
1282 static edge
1283 redirect_branch_edge (edge e, basic_block target)
1284 {
1285 rtx old_label = BB_HEAD (e->dest);
1286 basic_block src = e->src;
1287 rtx insn = BB_END (src);
1288
1289 /* We can only redirect non-fallthru edges of jump insn. */
1290 if (e->flags & EDGE_FALLTHRU)
1291 return NULL;
1292 else if (!JUMP_P (insn) && !currently_expanding_to_rtl)
1293 return NULL;
1294
1295 if (!currently_expanding_to_rtl)
1296 {
1297 if (!patch_jump_insn (insn, old_label, target))
1298 return NULL;
1299 }
1300 else
1301 /* When expanding this BB might actually contain multiple
1302 jumps (i.e. not yet split by find_many_sub_basic_blocks).
1303 Redirect all of those that match our label. */
1304 FOR_BB_INSNS (src, insn)
1305 if (JUMP_P (insn) && !patch_jump_insn (insn, old_label, target))
1306 return NULL;
1307
1308 if (dump_file)
1309 fprintf (dump_file, "Edge %i->%i redirected to %i\n",
1310 e->src->index, e->dest->index, target->index);
1311
1312 if (e->dest != target)
1313 e = redirect_edge_succ_nodup (e, target);
1314
1315 return e;
1316 }
1317
1318 /* Called when edge E has been redirected to a new destination,
1319 in order to update the region crossing flag on the edge and
1320 jump. */
1321
1322 static void
1323 fixup_partition_crossing (edge e)
1324 {
1325 rtx note;
1326
1327 if (e->src == ENTRY_BLOCK_PTR || e->dest == EXIT_BLOCK_PTR)
1328 return;
1329 /* If we redirected an existing edge, it may already be marked
1330 crossing, even though the new src is missing a reg crossing note.
1331 But make sure reg crossing note doesn't already exist before
1332 inserting. */
1333 if (BB_PARTITION (e->src) != BB_PARTITION (e->dest))
1334 {
1335 e->flags |= EDGE_CROSSING;
1336 note = find_reg_note (BB_END (e->src), REG_CROSSING_JUMP, NULL_RTX);
1337 if (JUMP_P (BB_END (e->src))
1338 && !note)
1339 add_reg_note (BB_END (e->src), REG_CROSSING_JUMP, NULL_RTX);
1340 }
1341 else if (BB_PARTITION (e->src) == BB_PARTITION (e->dest))
1342 {
1343 e->flags &= ~EDGE_CROSSING;
1344 /* Remove the section crossing note from jump at end of
1345 src if it exists, and if no other successors are
1346 still crossing. */
1347 note = find_reg_note (BB_END (e->src), REG_CROSSING_JUMP, NULL_RTX);
1348 if (note)
1349 {
1350 bool has_crossing_succ = false;
1351 edge e2;
1352 edge_iterator ei;
1353 FOR_EACH_EDGE (e2, ei, e->src->succs)
1354 {
1355 has_crossing_succ |= (e2->flags & EDGE_CROSSING);
1356 if (has_crossing_succ)
1357 break;
1358 }
1359 if (!has_crossing_succ)
1360 remove_note (BB_END (e->src), note);
1361 }
1362 }
1363 }
1364
1365 /* Called when block BB has been reassigned to the cold partition,
1366 because it is now dominated by another cold block,
1367 to ensure that the region crossing attributes are updated. */
1368
1369 static void
1370 fixup_new_cold_bb (basic_block bb)
1371 {
1372 edge e;
1373 edge_iterator ei;
1374
1375 /* This is called when a hot bb is found to now be dominated
1376 by a cold bb and therefore needs to become cold. Therefore,
1377 its preds will no longer be region crossing. Any non-dominating
1378 preds that were previously hot would also have become cold
1379 in the caller for the same region. Any preds that were previously
1380 region-crossing will be adjusted in fixup_partition_crossing. */
1381 FOR_EACH_EDGE (e, ei, bb->preds)
1382 {
1383 fixup_partition_crossing (e);
1384 }
1385
1386 /* Possibly need to make bb's successor edges region crossing,
1387 or remove stale region crossing. */
1388 FOR_EACH_EDGE (e, ei, bb->succs)
1389 {
1390 /* We can't have fall-through edges across partition boundaries.
1391 Note that force_nonfallthru will do any necessary partition
1392 boundary fixup by calling fixup_partition_crossing itself. */
1393 if ((e->flags & EDGE_FALLTHRU)
1394 && BB_PARTITION (bb) != BB_PARTITION (e->dest)
1395 && e->dest != EXIT_BLOCK_PTR)
1396 force_nonfallthru (e);
1397 else
1398 fixup_partition_crossing (e);
1399 }
1400 }
1401
1402 /* Attempt to change code to redirect edge E to TARGET. Don't do that on
1403 expense of adding new instructions or reordering basic blocks.
1404
1405 Function can be also called with edge destination equivalent to the TARGET.
1406 Then it should try the simplifications and do nothing if none is possible.
1407
1408 Return edge representing the branch if transformation succeeded. Return NULL
1409 on failure.
1410 We still return NULL in case E already destinated TARGET and we didn't
1411 managed to simplify instruction stream. */
1412
1413 static edge
1414 rtl_redirect_edge_and_branch (edge e, basic_block target)
1415 {
1416 edge ret;
1417 basic_block src = e->src;
1418 basic_block dest = e->dest;
1419
1420 if (e->flags & (EDGE_ABNORMAL_CALL | EDGE_EH))
1421 return NULL;
1422
1423 if (dest == target)
1424 return e;
1425
1426 if ((ret = try_redirect_by_replacing_jump (e, target, false)) != NULL)
1427 {
1428 df_set_bb_dirty (src);
1429 fixup_partition_crossing (ret);
1430 return ret;
1431 }
1432
1433 ret = redirect_branch_edge (e, target);
1434 if (!ret)
1435 return NULL;
1436
1437 df_set_bb_dirty (src);
1438 fixup_partition_crossing (ret);
1439 return ret;
1440 }
1441
1442 /* Emit a barrier after BB, into the footer if we are in CFGLAYOUT mode. */
1443
1444 void
1445 emit_barrier_after_bb (basic_block bb)
1446 {
1447 rtx barrier = emit_barrier_after (BB_END (bb));
1448 gcc_assert (current_ir_type () == IR_RTL_CFGRTL
1449 || current_ir_type () == IR_RTL_CFGLAYOUT);
1450 if (current_ir_type () == IR_RTL_CFGLAYOUT)
1451 BB_FOOTER (bb) = unlink_insn_chain (barrier, barrier);
1452 }
1453
1454 /* Like force_nonfallthru below, but additionally performs redirection
1455 Used by redirect_edge_and_branch_force. JUMP_LABEL is used only
1456 when redirecting to the EXIT_BLOCK, it is either ret_rtx or
1457 simple_return_rtx, indicating which kind of returnjump to create.
1458 It should be NULL otherwise. */
1459
1460 basic_block
1461 force_nonfallthru_and_redirect (edge e, basic_block target, rtx jump_label)
1462 {
1463 basic_block jump_block, new_bb = NULL, src = e->src;
1464 rtx note;
1465 edge new_edge;
1466 int abnormal_edge_flags = 0;
1467 bool asm_goto_edge = false;
1468 int loc;
1469
1470 /* In the case the last instruction is conditional jump to the next
1471 instruction, first redirect the jump itself and then continue
1472 by creating a basic block afterwards to redirect fallthru edge. */
1473 if (e->src != ENTRY_BLOCK_PTR && e->dest != EXIT_BLOCK_PTR
1474 && any_condjump_p (BB_END (e->src))
1475 && JUMP_LABEL (BB_END (e->src)) == BB_HEAD (e->dest))
1476 {
1477 rtx note;
1478 edge b = unchecked_make_edge (e->src, target, 0);
1479 bool redirected;
1480
1481 redirected = redirect_jump (BB_END (e->src), block_label (target), 0);
1482 gcc_assert (redirected);
1483
1484 note = find_reg_note (BB_END (e->src), REG_BR_PROB, NULL_RTX);
1485 if (note)
1486 {
1487 int prob = XINT (note, 0);
1488
1489 b->probability = prob;
1490 /* Update this to use GCOV_COMPUTE_SCALE. */
1491 b->count = e->count * prob / REG_BR_PROB_BASE;
1492 e->probability -= e->probability;
1493 e->count -= b->count;
1494 if (e->probability < 0)
1495 e->probability = 0;
1496 if (e->count < 0)
1497 e->count = 0;
1498 }
1499 }
1500
1501 if (e->flags & EDGE_ABNORMAL)
1502 {
1503 /* Irritating special case - fallthru edge to the same block as abnormal
1504 edge.
1505 We can't redirect abnormal edge, but we still can split the fallthru
1506 one and create separate abnormal edge to original destination.
1507 This allows bb-reorder to make such edge non-fallthru. */
1508 gcc_assert (e->dest == target);
1509 abnormal_edge_flags = e->flags & ~EDGE_FALLTHRU;
1510 e->flags &= EDGE_FALLTHRU;
1511 }
1512 else
1513 {
1514 gcc_assert (e->flags & EDGE_FALLTHRU);
1515 if (e->src == ENTRY_BLOCK_PTR)
1516 {
1517 /* We can't redirect the entry block. Create an empty block
1518 at the start of the function which we use to add the new
1519 jump. */
1520 edge tmp;
1521 edge_iterator ei;
1522 bool found = false;
1523
1524 basic_block bb = create_basic_block (BB_HEAD (e->dest), NULL, ENTRY_BLOCK_PTR);
1525
1526 /* Change the existing edge's source to be the new block, and add
1527 a new edge from the entry block to the new block. */
1528 e->src = bb;
1529 for (ei = ei_start (ENTRY_BLOCK_PTR->succs); (tmp = ei_safe_edge (ei)); )
1530 {
1531 if (tmp == e)
1532 {
1533 ENTRY_BLOCK_PTR->succs->unordered_remove (ei.index);
1534 found = true;
1535 break;
1536 }
1537 else
1538 ei_next (&ei);
1539 }
1540
1541 gcc_assert (found);
1542
1543 vec_safe_push (bb->succs, e);
1544 make_single_succ_edge (ENTRY_BLOCK_PTR, bb, EDGE_FALLTHRU);
1545 }
1546 }
1547
1548 /* If e->src ends with asm goto, see if any of the ASM_OPERANDS_LABELs
1549 don't point to the target or fallthru label. */
1550 if (JUMP_P (BB_END (e->src))
1551 && target != EXIT_BLOCK_PTR
1552 && (e->flags & EDGE_FALLTHRU)
1553 && (note = extract_asm_operands (PATTERN (BB_END (e->src)))))
1554 {
1555 int i, n = ASM_OPERANDS_LABEL_LENGTH (note);
1556 bool adjust_jump_target = false;
1557
1558 for (i = 0; i < n; ++i)
1559 {
1560 if (XEXP (ASM_OPERANDS_LABEL (note, i), 0) == BB_HEAD (e->dest))
1561 {
1562 LABEL_NUSES (XEXP (ASM_OPERANDS_LABEL (note, i), 0))--;
1563 XEXP (ASM_OPERANDS_LABEL (note, i), 0) = block_label (target);
1564 LABEL_NUSES (XEXP (ASM_OPERANDS_LABEL (note, i), 0))++;
1565 adjust_jump_target = true;
1566 }
1567 if (XEXP (ASM_OPERANDS_LABEL (note, i), 0) == BB_HEAD (target))
1568 asm_goto_edge = true;
1569 }
1570 if (adjust_jump_target)
1571 {
1572 rtx insn = BB_END (e->src), note;
1573 rtx old_label = BB_HEAD (e->dest);
1574 rtx new_label = BB_HEAD (target);
1575
1576 if (JUMP_LABEL (insn) == old_label)
1577 {
1578 JUMP_LABEL (insn) = new_label;
1579 note = find_reg_note (insn, REG_LABEL_TARGET, new_label);
1580 if (note)
1581 remove_note (insn, note);
1582 }
1583 else
1584 {
1585 note = find_reg_note (insn, REG_LABEL_TARGET, old_label);
1586 if (note)
1587 remove_note (insn, note);
1588 if (JUMP_LABEL (insn) != new_label
1589 && !find_reg_note (insn, REG_LABEL_TARGET, new_label))
1590 add_reg_note (insn, REG_LABEL_TARGET, new_label);
1591 }
1592 while ((note = find_reg_note (insn, REG_LABEL_OPERAND, old_label))
1593 != NULL_RTX)
1594 XEXP (note, 0) = new_label;
1595 }
1596 }
1597
1598 if (EDGE_COUNT (e->src->succs) >= 2 || abnormal_edge_flags || asm_goto_edge)
1599 {
1600 gcov_type count = e->count;
1601 int probability = e->probability;
1602 /* Create the new structures. */
1603
1604 /* If the old block ended with a tablejump, skip its table
1605 by searching forward from there. Otherwise start searching
1606 forward from the last instruction of the old block. */
1607 if (!tablejump_p (BB_END (e->src), NULL, &note))
1608 note = BB_END (e->src);
1609 note = NEXT_INSN (note);
1610
1611 jump_block = create_basic_block (note, NULL, e->src);
1612 jump_block->count = count;
1613 jump_block->frequency = EDGE_FREQUENCY (e);
1614
1615 /* Make sure new block ends up in correct hot/cold section. */
1616
1617 BB_COPY_PARTITION (jump_block, e->src);
1618
1619 /* Wire edge in. */
1620 new_edge = make_edge (e->src, jump_block, EDGE_FALLTHRU);
1621 new_edge->probability = probability;
1622 new_edge->count = count;
1623
1624 /* Redirect old edge. */
1625 redirect_edge_pred (e, jump_block);
1626 e->probability = REG_BR_PROB_BASE;
1627
1628 /* If e->src was previously region crossing, it no longer is
1629 and the reg crossing note should be removed. */
1630 fixup_partition_crossing (new_edge);
1631
1632 /* If asm goto has any label refs to target's label,
1633 add also edge from asm goto bb to target. */
1634 if (asm_goto_edge)
1635 {
1636 new_edge->probability /= 2;
1637 new_edge->count /= 2;
1638 jump_block->count /= 2;
1639 jump_block->frequency /= 2;
1640 new_edge = make_edge (new_edge->src, target,
1641 e->flags & ~EDGE_FALLTHRU);
1642 new_edge->probability = probability - probability / 2;
1643 new_edge->count = count - count / 2;
1644 }
1645
1646 new_bb = jump_block;
1647 }
1648 else
1649 jump_block = e->src;
1650
1651 loc = e->goto_locus;
1652 e->flags &= ~EDGE_FALLTHRU;
1653 if (target == EXIT_BLOCK_PTR)
1654 {
1655 if (jump_label == ret_rtx)
1656 {
1657 #ifdef HAVE_return
1658 emit_jump_insn_after_setloc (gen_return (), BB_END (jump_block), loc);
1659 #else
1660 gcc_unreachable ();
1661 #endif
1662 }
1663 else
1664 {
1665 gcc_assert (jump_label == simple_return_rtx);
1666 #ifdef HAVE_simple_return
1667 emit_jump_insn_after_setloc (gen_simple_return (),
1668 BB_END (jump_block), loc);
1669 #else
1670 gcc_unreachable ();
1671 #endif
1672 }
1673 set_return_jump_label (BB_END (jump_block));
1674 }
1675 else
1676 {
1677 rtx label = block_label (target);
1678 emit_jump_insn_after_setloc (gen_jump (label), BB_END (jump_block), loc);
1679 JUMP_LABEL (BB_END (jump_block)) = label;
1680 LABEL_NUSES (label)++;
1681 }
1682
1683 /* We might be in cfg layout mode, and if so, the following routine will
1684 insert the barrier correctly. */
1685 emit_barrier_after_bb (jump_block);
1686 redirect_edge_succ_nodup (e, target);
1687
1688 if (abnormal_edge_flags)
1689 make_edge (src, target, abnormal_edge_flags);
1690
1691 df_mark_solutions_dirty ();
1692 fixup_partition_crossing (e);
1693 return new_bb;
1694 }
1695
1696 /* Edge E is assumed to be fallthru edge. Emit needed jump instruction
1697 (and possibly create new basic block) to make edge non-fallthru.
1698 Return newly created BB or NULL if none. */
1699
1700 static basic_block
1701 rtl_force_nonfallthru (edge e)
1702 {
1703 return force_nonfallthru_and_redirect (e, e->dest, NULL_RTX);
1704 }
1705
1706 /* Redirect edge even at the expense of creating new jump insn or
1707 basic block. Return new basic block if created, NULL otherwise.
1708 Conversion must be possible. */
1709
1710 static basic_block
1711 rtl_redirect_edge_and_branch_force (edge e, basic_block target)
1712 {
1713 if (redirect_edge_and_branch (e, target)
1714 || e->dest == target)
1715 return NULL;
1716
1717 /* In case the edge redirection failed, try to force it to be non-fallthru
1718 and redirect newly created simplejump. */
1719 df_set_bb_dirty (e->src);
1720 return force_nonfallthru_and_redirect (e, target, NULL_RTX);
1721 }
1722
1723 /* The given edge should potentially be a fallthru edge. If that is in
1724 fact true, delete the jump and barriers that are in the way. */
1725
1726 static void
1727 rtl_tidy_fallthru_edge (edge e)
1728 {
1729 rtx q;
1730 basic_block b = e->src, c = b->next_bb;
1731
1732 /* ??? In a late-running flow pass, other folks may have deleted basic
1733 blocks by nopping out blocks, leaving multiple BARRIERs between here
1734 and the target label. They ought to be chastised and fixed.
1735
1736 We can also wind up with a sequence of undeletable labels between
1737 one block and the next.
1738
1739 So search through a sequence of barriers, labels, and notes for
1740 the head of block C and assert that we really do fall through. */
1741
1742 for (q = NEXT_INSN (BB_END (b)); q != BB_HEAD (c); q = NEXT_INSN (q))
1743 if (INSN_P (q))
1744 return;
1745
1746 /* Remove what will soon cease being the jump insn from the source block.
1747 If block B consisted only of this single jump, turn it into a deleted
1748 note. */
1749 q = BB_END (b);
1750 if (JUMP_P (q)
1751 && onlyjump_p (q)
1752 && (any_uncondjump_p (q)
1753 || single_succ_p (b)))
1754 {
1755 #ifdef HAVE_cc0
1756 /* If this was a conditional jump, we need to also delete
1757 the insn that set cc0. */
1758 if (any_condjump_p (q) && only_sets_cc0_p (PREV_INSN (q)))
1759 q = PREV_INSN (q);
1760 #endif
1761
1762 q = PREV_INSN (q);
1763 }
1764
1765 /* Selectively unlink the sequence. */
1766 if (q != PREV_INSN (BB_HEAD (c)))
1767 delete_insn_chain (NEXT_INSN (q), PREV_INSN (BB_HEAD (c)), false);
1768
1769 e->flags |= EDGE_FALLTHRU;
1770 }
1771 \f
1772 /* Should move basic block BB after basic block AFTER. NIY. */
1773
1774 static bool
1775 rtl_move_block_after (basic_block bb ATTRIBUTE_UNUSED,
1776 basic_block after ATTRIBUTE_UNUSED)
1777 {
1778 return false;
1779 }
1780
1781 /* Locate the last bb in the same partition as START_BB. */
1782
1783 static basic_block
1784 last_bb_in_partition (basic_block start_bb)
1785 {
1786 basic_block bb;
1787 FOR_BB_BETWEEN (bb, start_bb, EXIT_BLOCK_PTR, next_bb)
1788 {
1789 if (BB_PARTITION (start_bb) != BB_PARTITION (bb->next_bb))
1790 return bb;
1791 }
1792 /* Return bb before EXIT_BLOCK_PTR. */
1793 return bb->prev_bb;
1794 }
1795
1796 /* Split a (typically critical) edge. Return the new block.
1797 The edge must not be abnormal.
1798
1799 ??? The code generally expects to be called on critical edges.
1800 The case of a block ending in an unconditional jump to a
1801 block with multiple predecessors is not handled optimally. */
1802
1803 static basic_block
1804 rtl_split_edge (edge edge_in)
1805 {
1806 basic_block bb, new_bb;
1807 rtx before;
1808
1809 /* Abnormal edges cannot be split. */
1810 gcc_assert (!(edge_in->flags & EDGE_ABNORMAL));
1811
1812 /* We are going to place the new block in front of edge destination.
1813 Avoid existence of fallthru predecessors. */
1814 if ((edge_in->flags & EDGE_FALLTHRU) == 0)
1815 {
1816 edge e = find_fallthru_edge (edge_in->dest->preds);
1817
1818 if (e)
1819 force_nonfallthru (e);
1820 }
1821
1822 /* Create the basic block note. */
1823 if (edge_in->dest != EXIT_BLOCK_PTR)
1824 before = BB_HEAD (edge_in->dest);
1825 else
1826 before = NULL_RTX;
1827
1828 /* If this is a fall through edge to the exit block, the blocks might be
1829 not adjacent, and the right place is after the source. */
1830 if ((edge_in->flags & EDGE_FALLTHRU) && edge_in->dest == EXIT_BLOCK_PTR)
1831 {
1832 before = NEXT_INSN (BB_END (edge_in->src));
1833 bb = create_basic_block (before, NULL, edge_in->src);
1834 BB_COPY_PARTITION (bb, edge_in->src);
1835 }
1836 else
1837 {
1838 if (edge_in->src == ENTRY_BLOCK_PTR)
1839 {
1840 bb = create_basic_block (before, NULL, edge_in->dest->prev_bb);
1841 BB_COPY_PARTITION (bb, edge_in->dest);
1842 }
1843 else
1844 {
1845 basic_block after = edge_in->dest->prev_bb;
1846 /* If this is post-bb reordering, and the edge crosses a partition
1847 boundary, the new block needs to be inserted in the bb chain
1848 at the end of the src partition (since we put the new bb into
1849 that partition, see below). Otherwise we may end up creating
1850 an extra partition crossing in the chain, which is illegal.
1851 It can't go after the src, because src may have a fall-through
1852 to a different block. */
1853 if (crtl->bb_reorder_complete
1854 && (edge_in->flags & EDGE_CROSSING))
1855 {
1856 after = last_bb_in_partition (edge_in->src);
1857 before = NEXT_INSN (BB_END (after));
1858 /* The instruction following the last bb in partition should
1859 be a barrier, since it cannot end in a fall-through. */
1860 gcc_checking_assert (BARRIER_P (before));
1861 before = NEXT_INSN (before);
1862 }
1863 bb = create_basic_block (before, NULL, after);
1864 /* Put the split bb into the src partition, to avoid creating
1865 a situation where a cold bb dominates a hot bb, in the case
1866 where src is cold and dest is hot. The src will dominate
1867 the new bb (whereas it might not have dominated dest). */
1868 BB_COPY_PARTITION (bb, edge_in->src);
1869 }
1870 }
1871
1872 make_single_succ_edge (bb, edge_in->dest, EDGE_FALLTHRU);
1873
1874 /* Can't allow a region crossing edge to be fallthrough. */
1875 if (BB_PARTITION (bb) != BB_PARTITION (edge_in->dest)
1876 && edge_in->dest != EXIT_BLOCK_PTR)
1877 {
1878 new_bb = force_nonfallthru (single_succ_edge (bb));
1879 gcc_assert (!new_bb);
1880 }
1881
1882 /* For non-fallthru edges, we must adjust the predecessor's
1883 jump instruction to target our new block. */
1884 if ((edge_in->flags & EDGE_FALLTHRU) == 0)
1885 {
1886 edge redirected = redirect_edge_and_branch (edge_in, bb);
1887 gcc_assert (redirected);
1888 }
1889 else
1890 {
1891 if (edge_in->src != ENTRY_BLOCK_PTR)
1892 {
1893 /* For asm goto even splitting of fallthru edge might
1894 need insn patching, as other labels might point to the
1895 old label. */
1896 rtx last = BB_END (edge_in->src);
1897 if (last
1898 && JUMP_P (last)
1899 && edge_in->dest != EXIT_BLOCK_PTR
1900 && extract_asm_operands (PATTERN (last)) != NULL_RTX
1901 && patch_jump_insn (last, before, bb))
1902 df_set_bb_dirty (edge_in->src);
1903 }
1904 redirect_edge_succ (edge_in, bb);
1905 }
1906
1907 return bb;
1908 }
1909
1910 /* Queue instructions for insertion on an edge between two basic blocks.
1911 The new instructions and basic blocks (if any) will not appear in the
1912 CFG until commit_edge_insertions is called. */
1913
1914 void
1915 insert_insn_on_edge (rtx pattern, edge e)
1916 {
1917 /* We cannot insert instructions on an abnormal critical edge.
1918 It will be easier to find the culprit if we die now. */
1919 gcc_assert (!((e->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (e)));
1920
1921 if (e->insns.r == NULL_RTX)
1922 start_sequence ();
1923 else
1924 push_to_sequence (e->insns.r);
1925
1926 emit_insn (pattern);
1927
1928 e->insns.r = get_insns ();
1929 end_sequence ();
1930 }
1931
1932 /* Update the CFG for the instructions queued on edge E. */
1933
1934 void
1935 commit_one_edge_insertion (edge e)
1936 {
1937 rtx before = NULL_RTX, after = NULL_RTX, insns, tmp, last;
1938 basic_block bb;
1939
1940 /* Pull the insns off the edge now since the edge might go away. */
1941 insns = e->insns.r;
1942 e->insns.r = NULL_RTX;
1943
1944 /* Figure out where to put these insns. If the destination has
1945 one predecessor, insert there. Except for the exit block. */
1946 if (single_pred_p (e->dest) && e->dest != EXIT_BLOCK_PTR)
1947 {
1948 bb = e->dest;
1949
1950 /* Get the location correct wrt a code label, and "nice" wrt
1951 a basic block note, and before everything else. */
1952 tmp = BB_HEAD (bb);
1953 if (LABEL_P (tmp))
1954 tmp = NEXT_INSN (tmp);
1955 if (NOTE_INSN_BASIC_BLOCK_P (tmp))
1956 tmp = NEXT_INSN (tmp);
1957 if (tmp == BB_HEAD (bb))
1958 before = tmp;
1959 else if (tmp)
1960 after = PREV_INSN (tmp);
1961 else
1962 after = get_last_insn ();
1963 }
1964
1965 /* If the source has one successor and the edge is not abnormal,
1966 insert there. Except for the entry block.
1967 Don't do this if the predecessor ends in a jump other than
1968 unconditional simple jump. E.g. for asm goto that points all
1969 its labels at the fallthru basic block, we can't insert instructions
1970 before the asm goto, as the asm goto can have various of side effects,
1971 and can't emit instructions after the asm goto, as it must end
1972 the basic block. */
1973 else if ((e->flags & EDGE_ABNORMAL) == 0
1974 && single_succ_p (e->src)
1975 && e->src != ENTRY_BLOCK_PTR
1976 && (!JUMP_P (BB_END (e->src))
1977 || simplejump_p (BB_END (e->src))))
1978 {
1979 bb = e->src;
1980
1981 /* It is possible to have a non-simple jump here. Consider a target
1982 where some forms of unconditional jumps clobber a register. This
1983 happens on the fr30 for example.
1984
1985 We know this block has a single successor, so we can just emit
1986 the queued insns before the jump. */
1987 if (JUMP_P (BB_END (bb)))
1988 before = BB_END (bb);
1989 else
1990 {
1991 /* We'd better be fallthru, or we've lost track of what's what. */
1992 gcc_assert (e->flags & EDGE_FALLTHRU);
1993
1994 after = BB_END (bb);
1995 }
1996 }
1997
1998 /* Otherwise we must split the edge. */
1999 else
2000 {
2001 bb = split_edge (e);
2002
2003 /* If E crossed a partition boundary, we needed to make bb end in
2004 a region-crossing jump, even though it was originally fallthru. */
2005 if (JUMP_P (BB_END (bb)))
2006 before = BB_END (bb);
2007 else
2008 after = BB_END (bb);
2009 }
2010
2011 /* Now that we've found the spot, do the insertion. */
2012 if (before)
2013 {
2014 emit_insn_before_noloc (insns, before, bb);
2015 last = prev_nonnote_insn (before);
2016 }
2017 else
2018 last = emit_insn_after_noloc (insns, after, bb);
2019
2020 if (returnjump_p (last))
2021 {
2022 /* ??? Remove all outgoing edges from BB and add one for EXIT.
2023 This is not currently a problem because this only happens
2024 for the (single) epilogue, which already has a fallthru edge
2025 to EXIT. */
2026
2027 e = single_succ_edge (bb);
2028 gcc_assert (e->dest == EXIT_BLOCK_PTR
2029 && single_succ_p (bb) && (e->flags & EDGE_FALLTHRU));
2030
2031 e->flags &= ~EDGE_FALLTHRU;
2032 emit_barrier_after (last);
2033
2034 if (before)
2035 delete_insn (before);
2036 }
2037 else
2038 gcc_assert (!JUMP_P (last));
2039 }
2040
2041 /* Update the CFG for all queued instructions. */
2042
2043 void
2044 commit_edge_insertions (void)
2045 {
2046 basic_block bb;
2047
2048 /* Optimization passes that invoke this routine can cause hot blocks
2049 previously reached by both hot and cold blocks to become dominated only
2050 by cold blocks. This will cause the verification below to fail,
2051 and lead to now cold code in the hot section. In some cases this
2052 may only be visible after newly unreachable blocks are deleted,
2053 which will be done by fixup_partitions. */
2054 fixup_partitions ();
2055
2056 #ifdef ENABLE_CHECKING
2057 verify_flow_info ();
2058 #endif
2059
2060 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
2061 {
2062 edge e;
2063 edge_iterator ei;
2064
2065 FOR_EACH_EDGE (e, ei, bb->succs)
2066 if (e->insns.r)
2067 commit_one_edge_insertion (e);
2068 }
2069 }
2070 \f
2071
2072 /* Print out RTL-specific basic block information (live information
2073 at start and end with TDF_DETAILS). FLAGS are the TDF_* masks
2074 documented in dumpfile.h. */
2075
2076 static void
2077 rtl_dump_bb (FILE *outf, basic_block bb, int indent, int flags)
2078 {
2079 rtx insn;
2080 rtx last;
2081 char *s_indent;
2082
2083 s_indent = (char *) alloca ((size_t) indent + 1);
2084 memset (s_indent, ' ', (size_t) indent);
2085 s_indent[indent] = '\0';
2086
2087 if (df && (flags & TDF_DETAILS))
2088 {
2089 df_dump_top (bb, outf);
2090 putc ('\n', outf);
2091 }
2092
2093 if (bb->index != ENTRY_BLOCK && bb->index != EXIT_BLOCK)
2094 for (insn = BB_HEAD (bb), last = NEXT_INSN (BB_END (bb)); insn != last;
2095 insn = NEXT_INSN (insn))
2096 {
2097 if (flags & TDF_DETAILS)
2098 df_dump_insn_top (insn, outf);
2099 if (! (flags & TDF_SLIM))
2100 print_rtl_single (outf, insn);
2101 else
2102 dump_insn_slim (outf, insn);
2103 if (flags & TDF_DETAILS)
2104 df_dump_insn_bottom (insn, outf);
2105 }
2106
2107 if (df && (flags & TDF_DETAILS))
2108 {
2109 df_dump_bottom (bb, outf);
2110 putc ('\n', outf);
2111 }
2112
2113 }
2114 \f
2115 /* Like dump_function_to_file, but for RTL. Print out dataflow information
2116 for the start of each basic block. FLAGS are the TDF_* masks documented
2117 in dumpfile.h. */
2118
2119 void
2120 print_rtl_with_bb (FILE *outf, const_rtx rtx_first, int flags)
2121 {
2122 const_rtx tmp_rtx;
2123 if (rtx_first == 0)
2124 fprintf (outf, "(nil)\n");
2125 else
2126 {
2127 enum bb_state { NOT_IN_BB, IN_ONE_BB, IN_MULTIPLE_BB };
2128 int max_uid = get_max_uid ();
2129 basic_block *start = XCNEWVEC (basic_block, max_uid);
2130 basic_block *end = XCNEWVEC (basic_block, max_uid);
2131 enum bb_state *in_bb_p = XCNEWVEC (enum bb_state, max_uid);
2132 basic_block bb;
2133
2134 /* After freeing the CFG, we still have BLOCK_FOR_INSN set on most
2135 insns, but the CFG is not maintained so the basic block info
2136 is not reliable. Therefore it's omitted from the dumps. */
2137 if (! (cfun->curr_properties & PROP_cfg))
2138 flags &= ~TDF_BLOCKS;
2139
2140 if (df)
2141 df_dump_start (outf);
2142
2143 if (flags & TDF_BLOCKS)
2144 {
2145 FOR_EACH_BB_REVERSE (bb)
2146 {
2147 rtx x;
2148
2149 start[INSN_UID (BB_HEAD (bb))] = bb;
2150 end[INSN_UID (BB_END (bb))] = bb;
2151 for (x = BB_HEAD (bb); x != NULL_RTX; x = NEXT_INSN (x))
2152 {
2153 enum bb_state state = IN_MULTIPLE_BB;
2154
2155 if (in_bb_p[INSN_UID (x)] == NOT_IN_BB)
2156 state = IN_ONE_BB;
2157 in_bb_p[INSN_UID (x)] = state;
2158
2159 if (x == BB_END (bb))
2160 break;
2161 }
2162 }
2163 }
2164
2165 for (tmp_rtx = rtx_first; NULL != tmp_rtx; tmp_rtx = NEXT_INSN (tmp_rtx))
2166 {
2167 if (flags & TDF_BLOCKS)
2168 {
2169 bb = start[INSN_UID (tmp_rtx)];
2170 if (bb != NULL)
2171 {
2172 dump_bb_info (outf, bb, 0, dump_flags | TDF_COMMENT, true, false);
2173 if (df && (flags & TDF_DETAILS))
2174 df_dump_top (bb, outf);
2175 }
2176
2177 if (in_bb_p[INSN_UID (tmp_rtx)] == NOT_IN_BB
2178 && !NOTE_P (tmp_rtx)
2179 && !BARRIER_P (tmp_rtx))
2180 fprintf (outf, ";; Insn is not within a basic block\n");
2181 else if (in_bb_p[INSN_UID (tmp_rtx)] == IN_MULTIPLE_BB)
2182 fprintf (outf, ";; Insn is in multiple basic blocks\n");
2183 }
2184
2185 if (flags & TDF_DETAILS)
2186 df_dump_insn_top (tmp_rtx, outf);
2187 if (! (flags & TDF_SLIM))
2188 print_rtl_single (outf, tmp_rtx);
2189 else
2190 dump_insn_slim (outf, tmp_rtx);
2191 if (flags & TDF_DETAILS)
2192 df_dump_insn_bottom (tmp_rtx, outf);
2193
2194 if (flags & TDF_BLOCKS)
2195 {
2196 bb = end[INSN_UID (tmp_rtx)];
2197 if (bb != NULL)
2198 {
2199 dump_bb_info (outf, bb, 0, dump_flags | TDF_COMMENT, false, true);
2200 if (df && (flags & TDF_DETAILS))
2201 df_dump_bottom (bb, outf);
2202 putc ('\n', outf);
2203 }
2204 }
2205 }
2206
2207 free (start);
2208 free (end);
2209 free (in_bb_p);
2210 }
2211 }
2212 \f
2213 /* Update the branch probability of BB if a REG_BR_PROB is present. */
2214
2215 void
2216 update_br_prob_note (basic_block bb)
2217 {
2218 rtx note;
2219 if (!JUMP_P (BB_END (bb)))
2220 return;
2221 note = find_reg_note (BB_END (bb), REG_BR_PROB, NULL_RTX);
2222 if (!note || XINT (note, 0) == BRANCH_EDGE (bb)->probability)
2223 return;
2224 XINT (note, 0) = BRANCH_EDGE (bb)->probability;
2225 }
2226
2227 /* Get the last insn associated with block BB (that includes barriers and
2228 tablejumps after BB). */
2229 rtx
2230 get_last_bb_insn (basic_block bb)
2231 {
2232 rtx tmp;
2233 rtx end = BB_END (bb);
2234
2235 /* Include any jump table following the basic block. */
2236 if (tablejump_p (end, NULL, &tmp))
2237 end = tmp;
2238
2239 /* Include any barriers that may follow the basic block. */
2240 tmp = next_nonnote_insn_bb (end);
2241 while (tmp && BARRIER_P (tmp))
2242 {
2243 end = tmp;
2244 tmp = next_nonnote_insn_bb (end);
2245 }
2246
2247 return end;
2248 }
2249
2250 /* Sanity check partition hotness to ensure that basic blocks in
2251   the cold partition don't dominate basic blocks in the hot partition.
2252 If FLAG_ONLY is true, report violations as errors. Otherwise
2253 re-mark the dominated blocks as cold, since this is run after
2254 cfg optimizations that may make hot blocks previously reached
2255 by both hot and cold blocks now only reachable along cold paths. */
2256
2257 static vec<basic_block>
2258 find_partition_fixes (bool flag_only)
2259 {
2260 basic_block bb;
2261 vec<basic_block> bbs_in_cold_partition = vNULL;
2262 vec<basic_block> bbs_to_fix = vNULL;
2263
2264 /* Callers check this. */
2265 gcc_checking_assert (crtl->has_bb_partition);
2266
2267 FOR_EACH_BB (bb)
2268 if ((BB_PARTITION (bb) == BB_COLD_PARTITION))
2269 bbs_in_cold_partition.safe_push (bb);
2270
2271 if (bbs_in_cold_partition.is_empty ())
2272 return vNULL;
2273
2274 bool dom_calculated_here = !dom_info_available_p (CDI_DOMINATORS);
2275
2276 if (dom_calculated_here)
2277 calculate_dominance_info (CDI_DOMINATORS);
2278
2279 while (! bbs_in_cold_partition.is_empty ())
2280 {
2281 bb = bbs_in_cold_partition.pop ();
2282 /* Any blocks dominated by a block in the cold section
2283 must also be cold. */
2284 basic_block son;
2285 for (son = first_dom_son (CDI_DOMINATORS, bb);
2286 son;
2287 son = next_dom_son (CDI_DOMINATORS, son))
2288 {
2289 /* If son is not yet cold, then mark it cold here and
2290 enqueue it for further processing. */
2291 if ((BB_PARTITION (son) != BB_COLD_PARTITION))
2292 {
2293 if (flag_only)
2294 error ("non-cold basic block %d dominated "
2295 "by a block in the cold partition (%d)", son->index, bb->index);
2296 else
2297 BB_SET_PARTITION (son, BB_COLD_PARTITION);
2298 bbs_to_fix.safe_push (son);
2299 bbs_in_cold_partition.safe_push (son);
2300 }
2301 }
2302 }
2303
2304 if (dom_calculated_here)
2305 free_dominance_info (CDI_DOMINATORS);
2306
2307 return bbs_to_fix;
2308 }
2309
2310 /* Perform cleanup on the hot/cold bb partitioning after optimization
2311 passes that modify the cfg. */
2312
2313 void
2314 fixup_partitions (void)
2315 {
2316 basic_block bb;
2317
2318 if (!crtl->has_bb_partition)
2319 return;
2320
2321 /* Delete any blocks that became unreachable and weren't
2322 already cleaned up, for example during edge forwarding
2323 and convert_jumps_to_returns. This will expose more
2324 opportunities for fixing the partition boundaries here.
2325 Also, the calculation of the dominance graph during verification
2326 will assert if there are unreachable nodes. */
2327 delete_unreachable_blocks ();
2328
2329 /* If there are partitions, do a sanity check on them: A basic block in
2330   a cold partition cannot dominate a basic block in a hot partition.
2331 Fixup any that now violate this requirement, as a result of edge
2332 forwarding and unreachable block deletion.  */
2333 vec<basic_block> bbs_to_fix = find_partition_fixes (false);
2334
2335 /* Do the partition fixup after all necessary blocks have been converted to
2336 cold, so that we only update the region crossings the minimum number of
2337 places, which can require forcing edges to be non fallthru. */
2338 while (! bbs_to_fix.is_empty ())
2339 {
2340 bb = bbs_to_fix.pop ();
2341 fixup_new_cold_bb (bb);
2342 }
2343 }
2344
2345 /* Verify, in the basic block chain, that there is at most one switch
2346 between hot/cold partitions. This condition will not be true until
2347 after reorder_basic_blocks is called. */
2348
2349 static int
2350 verify_hot_cold_block_grouping (void)
2351 {
2352 basic_block bb;
2353 int err = 0;
2354 bool switched_sections = false;
2355 int current_partition = BB_UNPARTITIONED;
2356
2357 /* Even after bb reordering is complete, we go into cfglayout mode
2358 again (in compgoto). Ensure we don't call this before going back
2359 into linearized RTL when any layout fixes would have been committed. */
2360 if (!crtl->bb_reorder_complete
2361 || current_ir_type () != IR_RTL_CFGRTL)
2362 return err;
2363
2364 FOR_EACH_BB (bb)
2365 {
2366 if (current_partition != BB_UNPARTITIONED
2367 && BB_PARTITION (bb) != current_partition)
2368 {
2369 if (switched_sections)
2370 {
2371 error ("multiple hot/cold transitions found (bb %i)",
2372 bb->index);
2373 err = 1;
2374 }
2375 else
2376 switched_sections = true;
2377
2378 if (!crtl->has_bb_partition)
2379 error ("partition found but function partition flag not set");
2380 }
2381 current_partition = BB_PARTITION (bb);
2382 }
2383
2384 return err;
2385 }
2386 \f
2387
2388 /* Perform several checks on the edges out of each block, such as
2389 the consistency of the branch probabilities, the correctness
2390 of hot/cold partition crossing edges, and the number of expected
2391 successor edges. Also verify that the dominance relationship
2392 between hot/cold blocks is sane. */
2393
2394 static int
2395 rtl_verify_edges (void)
2396 {
2397 int err = 0;
2398 basic_block bb;
2399
2400 FOR_EACH_BB_REVERSE (bb)
2401 {
2402 int n_fallthru = 0, n_branch = 0, n_abnormal_call = 0, n_sibcall = 0;
2403 int n_eh = 0, n_abnormal = 0;
2404 edge e, fallthru = NULL;
2405 edge_iterator ei;
2406 rtx note;
2407 bool has_crossing_edge = false;
2408
2409 if (JUMP_P (BB_END (bb))
2410 && (note = find_reg_note (BB_END (bb), REG_BR_PROB, NULL_RTX))
2411 && EDGE_COUNT (bb->succs) >= 2
2412 && any_condjump_p (BB_END (bb)))
2413 {
2414 if (XINT (note, 0) != BRANCH_EDGE (bb)->probability
2415 && profile_status != PROFILE_ABSENT)
2416 {
2417 error ("verify_flow_info: REG_BR_PROB does not match cfg %i %i",
2418 XINT (note, 0), BRANCH_EDGE (bb)->probability);
2419 err = 1;
2420 }
2421 }
2422
2423 FOR_EACH_EDGE (e, ei, bb->succs)
2424 {
2425 bool is_crossing;
2426
2427 if (e->flags & EDGE_FALLTHRU)
2428 n_fallthru++, fallthru = e;
2429
2430 is_crossing = (BB_PARTITION (e->src) != BB_PARTITION (e->dest)
2431 && e->src != ENTRY_BLOCK_PTR
2432 && e->dest != EXIT_BLOCK_PTR);
2433 has_crossing_edge |= is_crossing;
2434 if (e->flags & EDGE_CROSSING)
2435 {
2436 if (!is_crossing)
2437 {
2438 error ("EDGE_CROSSING incorrectly set across same section");
2439 err = 1;
2440 }
2441 if (e->flags & EDGE_FALLTHRU)
2442 {
2443 error ("fallthru edge crosses section boundary in bb %i",
2444 e->src->index);
2445 err = 1;
2446 }
2447 if (e->flags & EDGE_EH)
2448 {
2449 error ("EH edge crosses section boundary in bb %i",
2450 e->src->index);
2451 err = 1;
2452 }
2453 if (JUMP_P (BB_END (bb))
2454 && !find_reg_note (BB_END (bb), REG_CROSSING_JUMP, NULL_RTX))
2455 {
2456 error ("No region crossing jump at section boundary in bb %i",
2457 bb->index);
2458 err = 1;
2459 }
2460 }
2461 else if (is_crossing)
2462 {
2463 error ("EDGE_CROSSING missing across section boundary");
2464 err = 1;
2465 }
2466
2467 if ((e->flags & ~(EDGE_DFS_BACK
2468 | EDGE_CAN_FALLTHRU
2469 | EDGE_IRREDUCIBLE_LOOP
2470 | EDGE_LOOP_EXIT
2471 | EDGE_CROSSING
2472 | EDGE_PRESERVE)) == 0)
2473 n_branch++;
2474
2475 if (e->flags & EDGE_ABNORMAL_CALL)
2476 n_abnormal_call++;
2477
2478 if (e->flags & EDGE_SIBCALL)
2479 n_sibcall++;
2480
2481 if (e->flags & EDGE_EH)
2482 n_eh++;
2483
2484 if (e->flags & EDGE_ABNORMAL)
2485 n_abnormal++;
2486 }
2487
2488 if (!has_crossing_edge
2489 && find_reg_note (BB_END (bb), REG_CROSSING_JUMP, NULL_RTX))
2490 {
2491 print_rtl_with_bb (stderr, get_insns (), TDF_RTL | TDF_BLOCKS | TDF_DETAILS);
2492 error ("Region crossing jump across same section in bb %i",
2493 bb->index);
2494 err = 1;
2495 }
2496
2497 if (n_eh && !find_reg_note (BB_END (bb), REG_EH_REGION, NULL_RTX))
2498 {
2499 error ("missing REG_EH_REGION note at the end of bb %i", bb->index);
2500 err = 1;
2501 }
2502 if (n_eh > 1)
2503 {
2504 error ("too many exception handling edges in bb %i", bb->index);
2505 err = 1;
2506 }
2507 if (n_branch
2508 && (!JUMP_P (BB_END (bb))
2509 || (n_branch > 1 && (any_uncondjump_p (BB_END (bb))
2510 || any_condjump_p (BB_END (bb))))))
2511 {
2512 error ("too many outgoing branch edges from bb %i", bb->index);
2513 err = 1;
2514 }
2515 if (n_fallthru && any_uncondjump_p (BB_END (bb)))
2516 {
2517 error ("fallthru edge after unconditional jump in bb %i", bb->index);
2518 err = 1;
2519 }
2520 if (n_branch != 1 && any_uncondjump_p (BB_END (bb)))
2521 {
2522 error ("wrong number of branch edges after unconditional jump"
2523 " in bb %i", bb->index);
2524 err = 1;
2525 }
2526 if (n_branch != 1 && any_condjump_p (BB_END (bb))
2527 && JUMP_LABEL (BB_END (bb)) != BB_HEAD (fallthru->dest))
2528 {
2529 error ("wrong amount of branch edges after conditional jump"
2530 " in bb %i", bb->index);
2531 err = 1;
2532 }
2533 if (n_abnormal_call && !CALL_P (BB_END (bb)))
2534 {
2535 error ("abnormal call edges for non-call insn in bb %i", bb->index);
2536 err = 1;
2537 }
2538 if (n_sibcall && !CALL_P (BB_END (bb)))
2539 {
2540 error ("sibcall edges for non-call insn in bb %i", bb->index);
2541 err = 1;
2542 }
2543 if (n_abnormal > n_eh
2544 && !(CALL_P (BB_END (bb))
2545 && n_abnormal == n_abnormal_call + n_sibcall)
2546 && (!JUMP_P (BB_END (bb))
2547 || any_condjump_p (BB_END (bb))
2548 || any_uncondjump_p (BB_END (bb))))
2549 {
2550 error ("abnormal edges for no purpose in bb %i", bb->index);
2551 err = 1;
2552 }
2553 }
2554
2555 /* If there are partitions, do a sanity check on them: A basic block in
2556   a cold partition cannot dominate a basic block in a hot partition.  */
2557 if (crtl->has_bb_partition && !err)
2558 {
2559 vec<basic_block> bbs_to_fix = find_partition_fixes (true);
2560 err = !bbs_to_fix.is_empty ();
2561 }
2562
2563 /* Clean up. */
2564 return err;
2565 }
2566
2567 /* Checks on the instructions within blocks. Currently checks that each
2568 block starts with a basic block note, and that basic block notes and
2569 control flow jumps are not found in the middle of the block. */
2570
2571 static int
2572 rtl_verify_bb_insns (void)
2573 {
2574 rtx x;
2575 int err = 0;
2576 basic_block bb;
2577
2578 FOR_EACH_BB_REVERSE (bb)
2579 {
2580 /* Now check the header of basic
2581 block. It ought to contain optional CODE_LABEL followed
2582 by NOTE_BASIC_BLOCK. */
2583 x = BB_HEAD (bb);
2584 if (LABEL_P (x))
2585 {
2586 if (BB_END (bb) == x)
2587 {
2588 error ("NOTE_INSN_BASIC_BLOCK is missing for block %d",
2589 bb->index);
2590 err = 1;
2591 }
2592
2593 x = NEXT_INSN (x);
2594 }
2595
2596 if (!NOTE_INSN_BASIC_BLOCK_P (x) || NOTE_BASIC_BLOCK (x) != bb)
2597 {
2598 error ("NOTE_INSN_BASIC_BLOCK is missing for block %d",
2599 bb->index);
2600 err = 1;
2601 }
2602
2603 if (BB_END (bb) == x)
2604 /* Do checks for empty blocks here. */
2605 ;
2606 else
2607 for (x = NEXT_INSN (x); x; x = NEXT_INSN (x))
2608 {
2609 if (NOTE_INSN_BASIC_BLOCK_P (x))
2610 {
2611 error ("NOTE_INSN_BASIC_BLOCK %d in middle of basic block %d",
2612 INSN_UID (x), bb->index);
2613 err = 1;
2614 }
2615
2616 if (x == BB_END (bb))
2617 break;
2618
2619 if (control_flow_insn_p (x))
2620 {
2621 error ("in basic block %d:", bb->index);
2622 fatal_insn ("flow control insn inside a basic block", x);
2623 }
2624 }
2625 }
2626
2627 /* Clean up. */
2628 return err;
2629 }
2630
2631 /* Verify that block pointers for instructions in basic blocks, headers and
2632 footers are set appropriately. */
2633
2634 static int
2635 rtl_verify_bb_pointers (void)
2636 {
2637 int err = 0;
2638 basic_block bb;
2639
2640 /* Check the general integrity of the basic blocks. */
2641 FOR_EACH_BB_REVERSE (bb)
2642 {
2643 rtx insn;
2644
2645 if (!(bb->flags & BB_RTL))
2646 {
2647 error ("BB_RTL flag not set for block %d", bb->index);
2648 err = 1;
2649 }
2650
2651 FOR_BB_INSNS (bb, insn)
2652 if (BLOCK_FOR_INSN (insn) != bb)
2653 {
2654 error ("insn %d basic block pointer is %d, should be %d",
2655 INSN_UID (insn),
2656 BLOCK_FOR_INSN (insn) ? BLOCK_FOR_INSN (insn)->index : 0,
2657 bb->index);
2658 err = 1;
2659 }
2660
2661 for (insn = BB_HEADER (bb); insn; insn = NEXT_INSN (insn))
2662 if (!BARRIER_P (insn)
2663 && BLOCK_FOR_INSN (insn) != NULL)
2664 {
2665 error ("insn %d in header of bb %d has non-NULL basic block",
2666 INSN_UID (insn), bb->index);
2667 err = 1;
2668 }
2669 for (insn = BB_FOOTER (bb); insn; insn = NEXT_INSN (insn))
2670 if (!BARRIER_P (insn)
2671 && BLOCK_FOR_INSN (insn) != NULL)
2672 {
2673 error ("insn %d in footer of bb %d has non-NULL basic block",
2674 INSN_UID (insn), bb->index);
2675 err = 1;
2676 }
2677 }
2678
2679 /* Clean up. */
2680 return err;
2681 }
2682
2683 /* Verify the CFG and RTL consistency common for both underlying RTL and
2684 cfglayout RTL.
2685
2686 Currently it does following checks:
2687
2688 - overlapping of basic blocks
2689 - insns with wrong BLOCK_FOR_INSN pointers
2690 - headers of basic blocks (the NOTE_INSN_BASIC_BLOCK note)
2691 - tails of basic blocks (ensure that boundary is necessary)
2692 - scans body of the basic block for JUMP_INSN, CODE_LABEL
2693 and NOTE_INSN_BASIC_BLOCK
2694 - verify that no fall_thru edge crosses hot/cold partition boundaries
2695 - verify that there are no pending RTL branch predictions
2696 - verify that hot blocks are not dominated by cold blocks
2697
2698 In future it can be extended check a lot of other stuff as well
2699 (reachability of basic blocks, life information, etc. etc.). */
2700
2701 static int
2702 rtl_verify_flow_info_1 (void)
2703 {
2704 int err = 0;
2705
2706 err |= rtl_verify_bb_pointers ();
2707
2708 err |= rtl_verify_bb_insns ();
2709
2710 err |= rtl_verify_edges ();
2711
2712 return err;
2713 }
2714
2715 /* Walk the instruction chain and verify that bb head/end pointers
2716 are correct, and that instructions are in exactly one bb and have
2717 correct block pointers. */
2718
2719 static int
2720 rtl_verify_bb_insn_chain (void)
2721 {
2722 basic_block bb;
2723 int err = 0;
2724 rtx x;
2725 rtx last_head = get_last_insn ();
2726 basic_block *bb_info;
2727 const int max_uid = get_max_uid ();
2728
2729 bb_info = XCNEWVEC (basic_block, max_uid);
2730
2731 FOR_EACH_BB_REVERSE (bb)
2732 {
2733 rtx head = BB_HEAD (bb);
2734 rtx end = BB_END (bb);
2735
2736 for (x = last_head; x != NULL_RTX; x = PREV_INSN (x))
2737 {
2738 /* Verify the end of the basic block is in the INSN chain. */
2739 if (x == end)
2740 break;
2741
2742 /* And that the code outside of basic blocks has NULL bb field. */
2743 if (!BARRIER_P (x)
2744 && BLOCK_FOR_INSN (x) != NULL)
2745 {
2746 error ("insn %d outside of basic blocks has non-NULL bb field",
2747 INSN_UID (x));
2748 err = 1;
2749 }
2750 }
2751
2752 if (!x)
2753 {
2754 error ("end insn %d for block %d not found in the insn stream",
2755 INSN_UID (end), bb->index);
2756 err = 1;
2757 }
2758
2759 /* Work backwards from the end to the head of the basic block
2760 to verify the head is in the RTL chain. */
2761 for (; x != NULL_RTX; x = PREV_INSN (x))
2762 {
2763 /* While walking over the insn chain, verify insns appear
2764 in only one basic block. */
2765 if (bb_info[INSN_UID (x)] != NULL)
2766 {
2767 error ("insn %d is in multiple basic blocks (%d and %d)",
2768 INSN_UID (x), bb->index, bb_info[INSN_UID (x)]->index);
2769 err = 1;
2770 }
2771
2772 bb_info[INSN_UID (x)] = bb;
2773
2774 if (x == head)
2775 break;
2776 }
2777 if (!x)
2778 {
2779 error ("head insn %d for block %d not found in the insn stream",
2780 INSN_UID (head), bb->index);
2781 err = 1;
2782 }
2783
2784 last_head = PREV_INSN (x);
2785 }
2786
2787 for (x = last_head; x != NULL_RTX; x = PREV_INSN (x))
2788 {
2789 /* Check that the code before the first basic block has NULL
2790 bb field. */
2791 if (!BARRIER_P (x)
2792 && BLOCK_FOR_INSN (x) != NULL)
2793 {
2794 error ("insn %d outside of basic blocks has non-NULL bb field",
2795 INSN_UID (x));
2796 err = 1;
2797 }
2798 }
2799 free (bb_info);
2800
2801 return err;
2802 }
2803
2804 /* Verify that fallthru edges point to adjacent blocks in layout order and
2805 that barriers exist after non-fallthru blocks. */
2806
2807 static int
2808 rtl_verify_fallthru (void)
2809 {
2810 basic_block bb;
2811 int err = 0;
2812
2813 FOR_EACH_BB_REVERSE (bb)
2814 {
2815 edge e;
2816
2817 e = find_fallthru_edge (bb->succs);
2818 if (!e)
2819 {
2820 rtx insn;
2821
2822 /* Ensure existence of barrier in BB with no fallthru edges. */
2823 for (insn = NEXT_INSN (BB_END (bb)); ; insn = NEXT_INSN (insn))
2824 {
2825 if (!insn || NOTE_INSN_BASIC_BLOCK_P (insn))
2826 {
2827 error ("missing barrier after block %i", bb->index);
2828 err = 1;
2829 break;
2830 }
2831 if (BARRIER_P (insn))
2832 break;
2833 }
2834 }
2835 else if (e->src != ENTRY_BLOCK_PTR
2836 && e->dest != EXIT_BLOCK_PTR)
2837 {
2838 rtx insn;
2839
2840 if (e->src->next_bb != e->dest)
2841 {
2842 error
2843 ("verify_flow_info: Incorrect blocks for fallthru %i->%i",
2844 e->src->index, e->dest->index);
2845 err = 1;
2846 }
2847 else
2848 for (insn = NEXT_INSN (BB_END (e->src)); insn != BB_HEAD (e->dest);
2849 insn = NEXT_INSN (insn))
2850 if (BARRIER_P (insn) || INSN_P (insn))
2851 {
2852 error ("verify_flow_info: Incorrect fallthru %i->%i",
2853 e->src->index, e->dest->index);
2854 fatal_insn ("wrong insn in the fallthru edge", insn);
2855 err = 1;
2856 }
2857 }
2858 }
2859
2860 return err;
2861 }
2862
2863 /* Verify that blocks are laid out in consecutive order. While walking the
2864 instructions, verify that all expected instructions are inside the basic
2865 blocks, and that all returns are followed by barriers. */
2866
2867 static int
2868 rtl_verify_bb_layout (void)
2869 {
2870 basic_block bb;
2871 int err = 0;
2872 rtx x;
2873 int num_bb_notes;
2874 const rtx rtx_first = get_insns ();
2875 basic_block last_bb_seen = ENTRY_BLOCK_PTR, curr_bb = NULL;
2876
2877 num_bb_notes = 0;
2878 last_bb_seen = ENTRY_BLOCK_PTR;
2879
2880 for (x = rtx_first; x; x = NEXT_INSN (x))
2881 {
2882 if (NOTE_INSN_BASIC_BLOCK_P (x))
2883 {
2884 bb = NOTE_BASIC_BLOCK (x);
2885
2886 num_bb_notes++;
2887 if (bb != last_bb_seen->next_bb)
2888 internal_error ("basic blocks not laid down consecutively");
2889
2890 curr_bb = last_bb_seen = bb;
2891 }
2892
2893 if (!curr_bb)
2894 {
2895 switch (GET_CODE (x))
2896 {
2897 case BARRIER:
2898 case NOTE:
2899 break;
2900
2901 case CODE_LABEL:
2902 /* An ADDR_VEC is placed outside any basic block. */
2903 if (NEXT_INSN (x)
2904 && JUMP_TABLE_DATA_P (NEXT_INSN (x)))
2905 x = NEXT_INSN (x);
2906
2907 /* But in any case, non-deletable labels can appear anywhere. */
2908 break;
2909
2910 default:
2911 fatal_insn ("insn outside basic block", x);
2912 }
2913 }
2914
2915 if (JUMP_P (x)
2916 && returnjump_p (x) && ! condjump_p (x)
2917 && ! (next_nonnote_insn (x) && BARRIER_P (next_nonnote_insn (x))))
2918 fatal_insn ("return not followed by barrier", x);
2919
2920 if (curr_bb && x == BB_END (curr_bb))
2921 curr_bb = NULL;
2922 }
2923
2924 if (num_bb_notes != n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS)
2925 internal_error
2926 ("number of bb notes in insn chain (%d) != n_basic_blocks (%d)",
2927 num_bb_notes, n_basic_blocks_for_fn (cfun));
2928
2929 return err;
2930 }
2931
2932 /* Verify the CFG and RTL consistency common for both underlying RTL and
2933 cfglayout RTL, plus consistency checks specific to linearized RTL mode.
2934
2935 Currently it does following checks:
2936 - all checks of rtl_verify_flow_info_1
2937 - test head/end pointers
2938 - check that blocks are laid out in consecutive order
2939 - check that all insns are in the basic blocks
2940 (except the switch handling code, barriers and notes)
2941 - check that all returns are followed by barriers
2942 - check that all fallthru edge points to the adjacent blocks
2943 - verify that there is a single hot/cold partition boundary after bbro */
2944
2945 static int
2946 rtl_verify_flow_info (void)
2947 {
2948 int err = 0;
2949
2950 err |= rtl_verify_flow_info_1 ();
2951
2952 err |= rtl_verify_bb_insn_chain ();
2953
2954 err |= rtl_verify_fallthru ();
2955
2956 err |= rtl_verify_bb_layout ();
2957
2958 err |= verify_hot_cold_block_grouping ();
2959
2960 return err;
2961 }
2962 \f
2963 /* Assume that the preceding pass has possibly eliminated jump instructions
2964 or converted the unconditional jumps. Eliminate the edges from CFG.
2965 Return true if any edges are eliminated. */
2966
2967 bool
2968 purge_dead_edges (basic_block bb)
2969 {
2970 edge e;
2971 rtx insn = BB_END (bb), note;
2972 bool purged = false;
2973 bool found;
2974 edge_iterator ei;
2975
2976 if (DEBUG_INSN_P (insn) && insn != BB_HEAD (bb))
2977 do
2978 insn = PREV_INSN (insn);
2979 while ((DEBUG_INSN_P (insn) || NOTE_P (insn)) && insn != BB_HEAD (bb));
2980
2981 /* If this instruction cannot trap, remove REG_EH_REGION notes. */
2982 if (NONJUMP_INSN_P (insn)
2983 && (note = find_reg_note (insn, REG_EH_REGION, NULL)))
2984 {
2985 rtx eqnote;
2986
2987 if (! may_trap_p (PATTERN (insn))
2988 || ((eqnote = find_reg_equal_equiv_note (insn))
2989 && ! may_trap_p (XEXP (eqnote, 0))))
2990 remove_note (insn, note);
2991 }
2992
2993 /* Cleanup abnormal edges caused by exceptions or non-local gotos. */
2994 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
2995 {
2996 bool remove = false;
2997
2998 /* There are three types of edges we need to handle correctly here: EH
2999 edges, abnormal call EH edges, and abnormal call non-EH edges. The
3000 latter can appear when nonlocal gotos are used. */
3001 if (e->flags & EDGE_ABNORMAL_CALL)
3002 {
3003 if (!CALL_P (insn))
3004 remove = true;
3005 else if (can_nonlocal_goto (insn))
3006 ;
3007 else if ((e->flags & EDGE_EH) && can_throw_internal (insn))
3008 ;
3009 else if (flag_tm && find_reg_note (insn, REG_TM, NULL))
3010 ;
3011 else
3012 remove = true;
3013 }
3014 else if (e->flags & EDGE_EH)
3015 remove = !can_throw_internal (insn);
3016
3017 if (remove)
3018 {
3019 remove_edge (e);
3020 df_set_bb_dirty (bb);
3021 purged = true;
3022 }
3023 else
3024 ei_next (&ei);
3025 }
3026
3027 if (JUMP_P (insn))
3028 {
3029 rtx note;
3030 edge b,f;
3031 edge_iterator ei;
3032
3033 /* We do care only about conditional jumps and simplejumps. */
3034 if (!any_condjump_p (insn)
3035 && !returnjump_p (insn)
3036 && !simplejump_p (insn))
3037 return purged;
3038
3039 /* Branch probability/prediction notes are defined only for
3040 condjumps. We've possibly turned condjump into simplejump. */
3041 if (simplejump_p (insn))
3042 {
3043 note = find_reg_note (insn, REG_BR_PROB, NULL);
3044 if (note)
3045 remove_note (insn, note);
3046 while ((note = find_reg_note (insn, REG_BR_PRED, NULL)))
3047 remove_note (insn, note);
3048 }
3049
3050 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
3051 {
3052 /* Avoid abnormal flags to leak from computed jumps turned
3053 into simplejumps. */
3054
3055 e->flags &= ~EDGE_ABNORMAL;
3056
3057 /* See if this edge is one we should keep. */
3058 if ((e->flags & EDGE_FALLTHRU) && any_condjump_p (insn))
3059 /* A conditional jump can fall through into the next
3060 block, so we should keep the edge. */
3061 {
3062 ei_next (&ei);
3063 continue;
3064 }
3065 else if (e->dest != EXIT_BLOCK_PTR
3066 && BB_HEAD (e->dest) == JUMP_LABEL (insn))
3067 /* If the destination block is the target of the jump,
3068 keep the edge. */
3069 {
3070 ei_next (&ei);
3071 continue;
3072 }
3073 else if (e->dest == EXIT_BLOCK_PTR && returnjump_p (insn))
3074 /* If the destination block is the exit block, and this
3075 instruction is a return, then keep the edge. */
3076 {
3077 ei_next (&ei);
3078 continue;
3079 }
3080 else if ((e->flags & EDGE_EH) && can_throw_internal (insn))
3081 /* Keep the edges that correspond to exceptions thrown by
3082 this instruction and rematerialize the EDGE_ABNORMAL
3083 flag we just cleared above. */
3084 {
3085 e->flags |= EDGE_ABNORMAL;
3086 ei_next (&ei);
3087 continue;
3088 }
3089
3090 /* We do not need this edge. */
3091 df_set_bb_dirty (bb);
3092 purged = true;
3093 remove_edge (e);
3094 }
3095
3096 if (EDGE_COUNT (bb->succs) == 0 || !purged)
3097 return purged;
3098
3099 if (dump_file)
3100 fprintf (dump_file, "Purged edges from bb %i\n", bb->index);
3101
3102 if (!optimize)
3103 return purged;
3104
3105 /* Redistribute probabilities. */
3106 if (single_succ_p (bb))
3107 {
3108 single_succ_edge (bb)->probability = REG_BR_PROB_BASE;
3109 single_succ_edge (bb)->count = bb->count;
3110 }
3111 else
3112 {
3113 note = find_reg_note (insn, REG_BR_PROB, NULL);
3114 if (!note)
3115 return purged;
3116
3117 b = BRANCH_EDGE (bb);
3118 f = FALLTHRU_EDGE (bb);
3119 b->probability = XINT (note, 0);
3120 f->probability = REG_BR_PROB_BASE - b->probability;
3121 /* Update these to use GCOV_COMPUTE_SCALE. */
3122 b->count = bb->count * b->probability / REG_BR_PROB_BASE;
3123 f->count = bb->count * f->probability / REG_BR_PROB_BASE;
3124 }
3125
3126 return purged;
3127 }
3128 else if (CALL_P (insn) && SIBLING_CALL_P (insn))
3129 {
3130 /* First, there should not be any EH or ABCALL edges resulting
3131 from non-local gotos and the like. If there were, we shouldn't
3132 have created the sibcall in the first place. Second, there
3133 should of course never have been a fallthru edge. */
3134 gcc_assert (single_succ_p (bb));
3135 gcc_assert (single_succ_edge (bb)->flags
3136 == (EDGE_SIBCALL | EDGE_ABNORMAL));
3137
3138 return 0;
3139 }
3140
3141 /* If we don't see a jump insn, we don't know exactly why the block would
3142 have been broken at this point. Look for a simple, non-fallthru edge,
3143 as these are only created by conditional branches. If we find such an
3144 edge we know that there used to be a jump here and can then safely
3145 remove all non-fallthru edges. */
3146 found = false;
3147 FOR_EACH_EDGE (e, ei, bb->succs)
3148 if (! (e->flags & (EDGE_COMPLEX | EDGE_FALLTHRU)))
3149 {
3150 found = true;
3151 break;
3152 }
3153
3154 if (!found)
3155 return purged;
3156
3157 /* Remove all but the fake and fallthru edges. The fake edge may be
3158 the only successor for this block in the case of noreturn
3159 calls. */
3160 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
3161 {
3162 if (!(e->flags & (EDGE_FALLTHRU | EDGE_FAKE)))
3163 {
3164 df_set_bb_dirty (bb);
3165 remove_edge (e);
3166 purged = true;
3167 }
3168 else
3169 ei_next (&ei);
3170 }
3171
3172 gcc_assert (single_succ_p (bb));
3173
3174 single_succ_edge (bb)->probability = REG_BR_PROB_BASE;
3175 single_succ_edge (bb)->count = bb->count;
3176
3177 if (dump_file)
3178 fprintf (dump_file, "Purged non-fallthru edges from bb %i\n",
3179 bb->index);
3180 return purged;
3181 }
3182
3183 /* Search all basic blocks for potentially dead edges and purge them. Return
3184 true if some edge has been eliminated. */
3185
3186 bool
3187 purge_all_dead_edges (void)
3188 {
3189 int purged = false;
3190 basic_block bb;
3191
3192 FOR_EACH_BB (bb)
3193 {
3194 bool purged_here = purge_dead_edges (bb);
3195
3196 purged |= purged_here;
3197 }
3198
3199 return purged;
3200 }
3201
3202 /* This is used by a few passes that emit some instructions after abnormal
3203 calls, moving the basic block's end, while they in fact do want to emit
3204 them on the fallthru edge. Look for abnormal call edges, find backward
3205 the call in the block and insert the instructions on the edge instead.
3206
3207 Similarly, handle instructions throwing exceptions internally.
3208
3209 Return true when instructions have been found and inserted on edges. */
3210
3211 bool
3212 fixup_abnormal_edges (void)
3213 {
3214 bool inserted = false;
3215 basic_block bb;
3216
3217 FOR_EACH_BB (bb)
3218 {
3219 edge e;
3220 edge_iterator ei;
3221
3222 /* Look for cases we are interested in - calls or instructions causing
3223 exceptions. */
3224 FOR_EACH_EDGE (e, ei, bb->succs)
3225 if ((e->flags & EDGE_ABNORMAL_CALL)
3226 || ((e->flags & (EDGE_ABNORMAL | EDGE_EH))
3227 == (EDGE_ABNORMAL | EDGE_EH)))
3228 break;
3229
3230 if (e && !CALL_P (BB_END (bb)) && !can_throw_internal (BB_END (bb)))
3231 {
3232 rtx insn;
3233
3234 /* Get past the new insns generated. Allow notes, as the insns
3235 may be already deleted. */
3236 insn = BB_END (bb);
3237 while ((NONJUMP_INSN_P (insn) || NOTE_P (insn))
3238 && !can_throw_internal (insn)
3239 && insn != BB_HEAD (bb))
3240 insn = PREV_INSN (insn);
3241
3242 if (CALL_P (insn) || can_throw_internal (insn))
3243 {
3244 rtx stop, next;
3245
3246 e = find_fallthru_edge (bb->succs);
3247
3248 stop = NEXT_INSN (BB_END (bb));
3249 BB_END (bb) = insn;
3250
3251 for (insn = NEXT_INSN (insn); insn != stop; insn = next)
3252 {
3253 next = NEXT_INSN (insn);
3254 if (INSN_P (insn))
3255 {
3256 delete_insn (insn);
3257
3258 /* Sometimes there's still the return value USE.
3259 If it's placed after a trapping call (i.e. that
3260 call is the last insn anyway), we have no fallthru
3261 edge. Simply delete this use and don't try to insert
3262 on the non-existent edge. */
3263 if (GET_CODE (PATTERN (insn)) != USE)
3264 {
3265 /* We're not deleting it, we're moving it. */
3266 INSN_DELETED_P (insn) = 0;
3267 PREV_INSN (insn) = NULL_RTX;
3268 NEXT_INSN (insn) = NULL_RTX;
3269
3270 insert_insn_on_edge (insn, e);
3271 inserted = true;
3272 }
3273 }
3274 else if (!BARRIER_P (insn))
3275 set_block_for_insn (insn, NULL);
3276 }
3277 }
3278
3279 /* It may be that we don't find any trapping insn. In this
3280 case we discovered quite late that the insn that had been
3281 marked as can_throw_internal in fact couldn't trap at all.
3282 So we should in fact delete the EH edges out of the block. */
3283 else
3284 purge_dead_edges (bb);
3285 }
3286 }
3287
3288 return inserted;
3289 }
3290 \f
3291 /* Cut the insns from FIRST to LAST out of the insns stream. */
3292
3293 rtx
3294 unlink_insn_chain (rtx first, rtx last)
3295 {
3296 rtx prevfirst = PREV_INSN (first);
3297 rtx nextlast = NEXT_INSN (last);
3298
3299 PREV_INSN (first) = NULL;
3300 NEXT_INSN (last) = NULL;
3301 if (prevfirst)
3302 NEXT_INSN (prevfirst) = nextlast;
3303 if (nextlast)
3304 PREV_INSN (nextlast) = prevfirst;
3305 else
3306 set_last_insn (prevfirst);
3307 if (!prevfirst)
3308 set_first_insn (nextlast);
3309 return first;
3310 }
3311 \f
3312 /* Skip over inter-block insns occurring after BB which are typically
3313 associated with BB (e.g., barriers). If there are any such insns,
3314 we return the last one. Otherwise, we return the end of BB. */
3315
3316 static rtx
3317 skip_insns_after_block (basic_block bb)
3318 {
3319 rtx insn, last_insn, next_head, prev;
3320
3321 next_head = NULL_RTX;
3322 if (bb->next_bb != EXIT_BLOCK_PTR)
3323 next_head = BB_HEAD (bb->next_bb);
3324
3325 for (last_insn = insn = BB_END (bb); (insn = NEXT_INSN (insn)) != 0; )
3326 {
3327 if (insn == next_head)
3328 break;
3329
3330 switch (GET_CODE (insn))
3331 {
3332 case BARRIER:
3333 last_insn = insn;
3334 continue;
3335
3336 case NOTE:
3337 switch (NOTE_KIND (insn))
3338 {
3339 case NOTE_INSN_BLOCK_END:
3340 gcc_unreachable ();
3341 continue;
3342 default:
3343 continue;
3344 break;
3345 }
3346 break;
3347
3348 case CODE_LABEL:
3349 if (NEXT_INSN (insn)
3350 && JUMP_TABLE_DATA_P (NEXT_INSN (insn)))
3351 {
3352 insn = NEXT_INSN (insn);
3353 last_insn = insn;
3354 continue;
3355 }
3356 break;
3357
3358 default:
3359 break;
3360 }
3361
3362 break;
3363 }
3364
3365 /* It is possible to hit contradictory sequence. For instance:
3366
3367 jump_insn
3368 NOTE_INSN_BLOCK_BEG
3369 barrier
3370
3371 Where barrier belongs to jump_insn, but the note does not. This can be
3372 created by removing the basic block originally following
3373 NOTE_INSN_BLOCK_BEG. In such case reorder the notes. */
3374
3375 for (insn = last_insn; insn != BB_END (bb); insn = prev)
3376 {
3377 prev = PREV_INSN (insn);
3378 if (NOTE_P (insn))
3379 switch (NOTE_KIND (insn))
3380 {
3381 case NOTE_INSN_BLOCK_END:
3382 gcc_unreachable ();
3383 break;
3384 case NOTE_INSN_DELETED:
3385 case NOTE_INSN_DELETED_LABEL:
3386 case NOTE_INSN_DELETED_DEBUG_LABEL:
3387 continue;
3388 default:
3389 reorder_insns (insn, insn, last_insn);
3390 }
3391 }
3392
3393 return last_insn;
3394 }
3395
3396 /* Locate or create a label for a given basic block. */
3397
3398 static rtx
3399 label_for_bb (basic_block bb)
3400 {
3401 rtx label = BB_HEAD (bb);
3402
3403 if (!LABEL_P (label))
3404 {
3405 if (dump_file)
3406 fprintf (dump_file, "Emitting label for block %d\n", bb->index);
3407
3408 label = block_label (bb);
3409 }
3410
3411 return label;
3412 }
3413
3414 /* Locate the effective beginning and end of the insn chain for each
3415 block, as defined by skip_insns_after_block above. */
3416
3417 static void
3418 record_effective_endpoints (void)
3419 {
3420 rtx next_insn;
3421 basic_block bb;
3422 rtx insn;
3423
3424 for (insn = get_insns ();
3425 insn
3426 && NOTE_P (insn)
3427 && NOTE_KIND (insn) != NOTE_INSN_BASIC_BLOCK;
3428 insn = NEXT_INSN (insn))
3429 continue;
3430 /* No basic blocks at all? */
3431 gcc_assert (insn);
3432
3433 if (PREV_INSN (insn))
3434 cfg_layout_function_header =
3435 unlink_insn_chain (get_insns (), PREV_INSN (insn));
3436 else
3437 cfg_layout_function_header = NULL_RTX;
3438
3439 next_insn = get_insns ();
3440 FOR_EACH_BB (bb)
3441 {
3442 rtx end;
3443
3444 if (PREV_INSN (BB_HEAD (bb)) && next_insn != BB_HEAD (bb))
3445 BB_HEADER (bb) = unlink_insn_chain (next_insn,
3446 PREV_INSN (BB_HEAD (bb)));
3447 end = skip_insns_after_block (bb);
3448 if (NEXT_INSN (BB_END (bb)) && BB_END (bb) != end)
3449 BB_FOOTER (bb) = unlink_insn_chain (NEXT_INSN (BB_END (bb)), end);
3450 next_insn = NEXT_INSN (BB_END (bb));
3451 }
3452
3453 cfg_layout_function_footer = next_insn;
3454 if (cfg_layout_function_footer)
3455 cfg_layout_function_footer = unlink_insn_chain (cfg_layout_function_footer, get_last_insn ());
3456 }
3457 \f
3458 static unsigned int
3459 into_cfg_layout_mode (void)
3460 {
3461 cfg_layout_initialize (0);
3462 return 0;
3463 }
3464
3465 static unsigned int
3466 outof_cfg_layout_mode (void)
3467 {
3468 basic_block bb;
3469
3470 FOR_EACH_BB (bb)
3471 if (bb->next_bb != EXIT_BLOCK_PTR)
3472 bb->aux = bb->next_bb;
3473
3474 cfg_layout_finalize ();
3475
3476 return 0;
3477 }
3478
3479 namespace {
3480
3481 const pass_data pass_data_into_cfg_layout_mode =
3482 {
3483 RTL_PASS, /* type */
3484 "into_cfglayout", /* name */
3485 OPTGROUP_NONE, /* optinfo_flags */
3486 false, /* has_gate */
3487 true, /* has_execute */
3488 TV_CFG, /* tv_id */
3489 0, /* properties_required */
3490 PROP_cfglayout, /* properties_provided */
3491 0, /* properties_destroyed */
3492 0, /* todo_flags_start */
3493 0, /* todo_flags_finish */
3494 };
3495
3496 class pass_into_cfg_layout_mode : public rtl_opt_pass
3497 {
3498 public:
3499 pass_into_cfg_layout_mode (gcc::context *ctxt)
3500 : rtl_opt_pass (pass_data_into_cfg_layout_mode, ctxt)
3501 {}
3502
3503 /* opt_pass methods: */
3504 unsigned int execute () { return into_cfg_layout_mode (); }
3505
3506 }; // class pass_into_cfg_layout_mode
3507
3508 } // anon namespace
3509
3510 rtl_opt_pass *
3511 make_pass_into_cfg_layout_mode (gcc::context *ctxt)
3512 {
3513 return new pass_into_cfg_layout_mode (ctxt);
3514 }
3515
3516 namespace {
3517
3518 const pass_data pass_data_outof_cfg_layout_mode =
3519 {
3520 RTL_PASS, /* type */
3521 "outof_cfglayout", /* name */
3522 OPTGROUP_NONE, /* optinfo_flags */
3523 false, /* has_gate */
3524 true, /* has_execute */
3525 TV_CFG, /* tv_id */
3526 0, /* properties_required */
3527 0, /* properties_provided */
3528 PROP_cfglayout, /* properties_destroyed */
3529 0, /* todo_flags_start */
3530 0, /* todo_flags_finish */
3531 };
3532
3533 class pass_outof_cfg_layout_mode : public rtl_opt_pass
3534 {
3535 public:
3536 pass_outof_cfg_layout_mode (gcc::context *ctxt)
3537 : rtl_opt_pass (pass_data_outof_cfg_layout_mode, ctxt)
3538 {}
3539
3540 /* opt_pass methods: */
3541 unsigned int execute () { return outof_cfg_layout_mode (); }
3542
3543 }; // class pass_outof_cfg_layout_mode
3544
3545 } // anon namespace
3546
3547 rtl_opt_pass *
3548 make_pass_outof_cfg_layout_mode (gcc::context *ctxt)
3549 {
3550 return new pass_outof_cfg_layout_mode (ctxt);
3551 }
3552 \f
3553
3554 /* Link the basic blocks in the correct order, compacting the basic
3555 block queue while at it. If STAY_IN_CFGLAYOUT_MODE is false, this
3556 function also clears the basic block header and footer fields.
3557
3558 This function is usually called after a pass (e.g. tracer) finishes
3559 some transformations while in cfglayout mode. The required sequence
3560 of the basic blocks is in a linked list along the bb->aux field.
3561 This functions re-links the basic block prev_bb and next_bb pointers
3562 accordingly, and it compacts and renumbers the blocks.
3563
3564 FIXME: This currently works only for RTL, but the only RTL-specific
3565 bits are the STAY_IN_CFGLAYOUT_MODE bits. The tracer pass was moved
3566 to GIMPLE a long time ago, but it doesn't relink the basic block
3567 chain. It could do that (to give better initial RTL) if this function
3568 is made IR-agnostic (and moved to cfganal.c or cfg.c while at it). */
3569
3570 void
3571 relink_block_chain (bool stay_in_cfglayout_mode)
3572 {
3573 basic_block bb, prev_bb;
3574 int index;
3575
3576 /* Maybe dump the re-ordered sequence. */
3577 if (dump_file)
3578 {
3579 fprintf (dump_file, "Reordered sequence:\n");
3580 for (bb = ENTRY_BLOCK_PTR->next_bb, index = NUM_FIXED_BLOCKS;
3581 bb;
3582 bb = (basic_block) bb->aux, index++)
3583 {
3584 fprintf (dump_file, " %i ", index);
3585 if (get_bb_original (bb))
3586 fprintf (dump_file, "duplicate of %i ",
3587 get_bb_original (bb)->index);
3588 else if (forwarder_block_p (bb)
3589 && !LABEL_P (BB_HEAD (bb)))
3590 fprintf (dump_file, "compensation ");
3591 else
3592 fprintf (dump_file, "bb %i ", bb->index);
3593 fprintf (dump_file, " [%i]\n", bb->frequency);
3594 }
3595 }
3596
3597 /* Now reorder the blocks. */
3598 prev_bb = ENTRY_BLOCK_PTR;
3599 bb = ENTRY_BLOCK_PTR->next_bb;
3600 for (; bb; prev_bb = bb, bb = (basic_block) bb->aux)
3601 {
3602 bb->prev_bb = prev_bb;
3603 prev_bb->next_bb = bb;
3604 }
3605 prev_bb->next_bb = EXIT_BLOCK_PTR;
3606 EXIT_BLOCK_PTR->prev_bb = prev_bb;
3607
3608 /* Then, clean up the aux fields. */
3609 FOR_ALL_BB (bb)
3610 {
3611 bb->aux = NULL;
3612 if (!stay_in_cfglayout_mode)
3613 BB_HEADER (bb) = BB_FOOTER (bb) = NULL;
3614 }
3615
3616 /* Maybe reset the original copy tables, they are not valid anymore
3617 when we renumber the basic blocks in compact_blocks. If we are
3618 are going out of cfglayout mode, don't re-allocate the tables. */
3619 free_original_copy_tables ();
3620 if (stay_in_cfglayout_mode)
3621 initialize_original_copy_tables ();
3622
3623 /* Finally, put basic_block_info in the new order. */
3624 compact_blocks ();
3625 }
3626 \f
3627
3628 /* Given a reorder chain, rearrange the code to match. */
3629
3630 static void
3631 fixup_reorder_chain (void)
3632 {
3633 basic_block bb;
3634 rtx insn = NULL;
3635
3636 if (cfg_layout_function_header)
3637 {
3638 set_first_insn (cfg_layout_function_header);
3639 insn = cfg_layout_function_header;
3640 while (NEXT_INSN (insn))
3641 insn = NEXT_INSN (insn);
3642 }
3643
3644 /* First do the bulk reordering -- rechain the blocks without regard to
3645 the needed changes to jumps and labels. */
3646
3647 for (bb = ENTRY_BLOCK_PTR->next_bb; bb; bb = (basic_block) bb->aux)
3648 {
3649 if (BB_HEADER (bb))
3650 {
3651 if (insn)
3652 NEXT_INSN (insn) = BB_HEADER (bb);
3653 else
3654 set_first_insn (BB_HEADER (bb));
3655 PREV_INSN (BB_HEADER (bb)) = insn;
3656 insn = BB_HEADER (bb);
3657 while (NEXT_INSN (insn))
3658 insn = NEXT_INSN (insn);
3659 }
3660 if (insn)
3661 NEXT_INSN (insn) = BB_HEAD (bb);
3662 else
3663 set_first_insn (BB_HEAD (bb));
3664 PREV_INSN (BB_HEAD (bb)) = insn;
3665 insn = BB_END (bb);
3666 if (BB_FOOTER (bb))
3667 {
3668 NEXT_INSN (insn) = BB_FOOTER (bb);
3669 PREV_INSN (BB_FOOTER (bb)) = insn;
3670 while (NEXT_INSN (insn))
3671 insn = NEXT_INSN (insn);
3672 }
3673 }
3674
3675 NEXT_INSN (insn) = cfg_layout_function_footer;
3676 if (cfg_layout_function_footer)
3677 PREV_INSN (cfg_layout_function_footer) = insn;
3678
3679 while (NEXT_INSN (insn))
3680 insn = NEXT_INSN (insn);
3681
3682 set_last_insn (insn);
3683 #ifdef ENABLE_CHECKING
3684 verify_insn_chain ();
3685 #endif
3686
3687 /* Now add jumps and labels as needed to match the blocks new
3688 outgoing edges. */
3689
3690 for (bb = ENTRY_BLOCK_PTR->next_bb; bb ; bb = (basic_block) bb->aux)
3691 {
3692 edge e_fall, e_taken, e;
3693 rtx bb_end_insn;
3694 rtx ret_label = NULL_RTX;
3695 basic_block nb;
3696 edge_iterator ei;
3697
3698 if (EDGE_COUNT (bb->succs) == 0)
3699 continue;
3700
3701 /* Find the old fallthru edge, and another non-EH edge for
3702 a taken jump. */
3703 e_taken = e_fall = NULL;
3704
3705 FOR_EACH_EDGE (e, ei, bb->succs)
3706 if (e->flags & EDGE_FALLTHRU)
3707 e_fall = e;
3708 else if (! (e->flags & EDGE_EH))
3709 e_taken = e;
3710
3711 bb_end_insn = BB_END (bb);
3712 if (JUMP_P (bb_end_insn))
3713 {
3714 ret_label = JUMP_LABEL (bb_end_insn);
3715 if (any_condjump_p (bb_end_insn))
3716 {
3717 /* This might happen if the conditional jump has side
3718 effects and could therefore not be optimized away.
3719 Make the basic block to end with a barrier in order
3720 to prevent rtl_verify_flow_info from complaining. */
3721 if (!e_fall)
3722 {
3723 gcc_assert (!onlyjump_p (bb_end_insn)
3724 || returnjump_p (bb_end_insn));
3725 emit_barrier_after (bb_end_insn);
3726 continue;
3727 }
3728
3729 /* If the old fallthru is still next, nothing to do. */
3730 if (bb->aux == e_fall->dest
3731 || e_fall->dest == EXIT_BLOCK_PTR)
3732 continue;
3733
3734 /* The degenerated case of conditional jump jumping to the next
3735 instruction can happen for jumps with side effects. We need
3736 to construct a forwarder block and this will be done just
3737 fine by force_nonfallthru below. */
3738 if (!e_taken)
3739 ;
3740
3741 /* There is another special case: if *neither* block is next,
3742 such as happens at the very end of a function, then we'll
3743 need to add a new unconditional jump. Choose the taken
3744 edge based on known or assumed probability. */
3745 else if (bb->aux != e_taken->dest)
3746 {
3747 rtx note = find_reg_note (bb_end_insn, REG_BR_PROB, 0);
3748
3749 if (note
3750 && XINT (note, 0) < REG_BR_PROB_BASE / 2
3751 && invert_jump (bb_end_insn,
3752 (e_fall->dest == EXIT_BLOCK_PTR
3753 ? NULL_RTX
3754 : label_for_bb (e_fall->dest)), 0))
3755 {
3756 e_fall->flags &= ~EDGE_FALLTHRU;
3757 gcc_checking_assert (could_fall_through
3758 (e_taken->src, e_taken->dest));
3759 e_taken->flags |= EDGE_FALLTHRU;
3760 update_br_prob_note (bb);
3761 e = e_fall, e_fall = e_taken, e_taken = e;
3762 }
3763 }
3764
3765 /* If the "jumping" edge is a crossing edge, and the fall
3766 through edge is non-crossing, leave things as they are. */
3767 else if ((e_taken->flags & EDGE_CROSSING)
3768 && !(e_fall->flags & EDGE_CROSSING))
3769 continue;
3770
3771 /* Otherwise we can try to invert the jump. This will
3772 basically never fail, however, keep up the pretense. */
3773 else if (invert_jump (bb_end_insn,
3774 (e_fall->dest == EXIT_BLOCK_PTR
3775 ? NULL_RTX
3776 : label_for_bb (e_fall->dest)), 0))
3777 {
3778 e_fall->flags &= ~EDGE_FALLTHRU;
3779 gcc_checking_assert (could_fall_through
3780 (e_taken->src, e_taken->dest));
3781 e_taken->flags |= EDGE_FALLTHRU;
3782 update_br_prob_note (bb);
3783 if (LABEL_NUSES (ret_label) == 0
3784 && single_pred_p (e_taken->dest))
3785 delete_insn (ret_label);
3786 continue;
3787 }
3788 }
3789 else if (extract_asm_operands (PATTERN (bb_end_insn)) != NULL)
3790 {
3791 /* If the old fallthru is still next or if
3792 asm goto doesn't have a fallthru (e.g. when followed by
3793 __builtin_unreachable ()), nothing to do. */
3794 if (! e_fall
3795 || bb->aux == e_fall->dest
3796 || e_fall->dest == EXIT_BLOCK_PTR)
3797 continue;
3798
3799 /* Otherwise we'll have to use the fallthru fixup below. */
3800 }
3801 else
3802 {
3803 /* Otherwise we have some return, switch or computed
3804 jump. In the 99% case, there should not have been a
3805 fallthru edge. */
3806 gcc_assert (returnjump_p (bb_end_insn) || !e_fall);
3807 continue;
3808 }
3809 }
3810 else
3811 {
3812 /* No fallthru implies a noreturn function with EH edges, or
3813 something similarly bizarre. In any case, we don't need to
3814 do anything. */
3815 if (! e_fall)
3816 continue;
3817
3818 /* If the fallthru block is still next, nothing to do. */
3819 if (bb->aux == e_fall->dest)
3820 continue;
3821
3822 /* A fallthru to exit block. */
3823 if (e_fall->dest == EXIT_BLOCK_PTR)
3824 continue;
3825 }
3826
3827 /* We got here if we need to add a new jump insn.
3828 Note force_nonfallthru can delete E_FALL and thus we have to
3829 save E_FALL->src prior to the call to force_nonfallthru. */
3830 nb = force_nonfallthru_and_redirect (e_fall, e_fall->dest, ret_label);
3831 if (nb)
3832 {
3833 nb->aux = bb->aux;
3834 bb->aux = nb;
3835 /* Don't process this new block. */
3836 bb = nb;
3837 }
3838 }
3839
3840 relink_block_chain (/*stay_in_cfglayout_mode=*/false);
3841
3842 /* Annoying special case - jump around dead jumptables left in the code. */
3843 FOR_EACH_BB (bb)
3844 {
3845 edge e = find_fallthru_edge (bb->succs);
3846
3847 if (e && !can_fallthru (e->src, e->dest))
3848 force_nonfallthru (e);
3849 }
3850
3851 /* Ensure goto_locus from edges has some instructions with that locus
3852 in RTL. */
3853 if (!optimize)
3854 FOR_EACH_BB (bb)
3855 {
3856 edge e;
3857 edge_iterator ei;
3858
3859 FOR_EACH_EDGE (e, ei, bb->succs)
3860 if (LOCATION_LOCUS (e->goto_locus) != UNKNOWN_LOCATION
3861 && !(e->flags & EDGE_ABNORMAL))
3862 {
3863 edge e2;
3864 edge_iterator ei2;
3865 basic_block dest, nb;
3866 rtx end;
3867
3868 insn = BB_END (e->src);
3869 end = PREV_INSN (BB_HEAD (e->src));
3870 while (insn != end
3871 && (!NONDEBUG_INSN_P (insn) || !INSN_HAS_LOCATION (insn)))
3872 insn = PREV_INSN (insn);
3873 if (insn != end
3874 && INSN_LOCATION (insn) == e->goto_locus)
3875 continue;
3876 if (simplejump_p (BB_END (e->src))
3877 && !INSN_HAS_LOCATION (BB_END (e->src)))
3878 {
3879 INSN_LOCATION (BB_END (e->src)) = e->goto_locus;
3880 continue;
3881 }
3882 dest = e->dest;
3883 if (dest == EXIT_BLOCK_PTR)
3884 {
3885 /* Non-fallthru edges to the exit block cannot be split. */
3886 if (!(e->flags & EDGE_FALLTHRU))
3887 continue;
3888 }
3889 else
3890 {
3891 insn = BB_HEAD (dest);
3892 end = NEXT_INSN (BB_END (dest));
3893 while (insn != end && !NONDEBUG_INSN_P (insn))
3894 insn = NEXT_INSN (insn);
3895 if (insn != end && INSN_HAS_LOCATION (insn)
3896 && INSN_LOCATION (insn) == e->goto_locus)
3897 continue;
3898 }
3899 nb = split_edge (e);
3900 if (!INSN_P (BB_END (nb)))
3901 BB_END (nb) = emit_insn_after_noloc (gen_nop (), BB_END (nb),
3902 nb);
3903 INSN_LOCATION (BB_END (nb)) = e->goto_locus;
3904
3905 /* If there are other incoming edges to the destination block
3906 with the same goto locus, redirect them to the new block as
3907 well, this can prevent other such blocks from being created
3908 in subsequent iterations of the loop. */
3909 for (ei2 = ei_start (dest->preds); (e2 = ei_safe_edge (ei2)); )
3910 if (LOCATION_LOCUS (e2->goto_locus) != UNKNOWN_LOCATION
3911 && !(e2->flags & (EDGE_ABNORMAL | EDGE_FALLTHRU))
3912 && e->goto_locus == e2->goto_locus)
3913 redirect_edge_and_branch (e2, nb);
3914 else
3915 ei_next (&ei2);
3916 }
3917 }
3918 }
3919 \f
3920 /* Perform sanity checks on the insn chain.
3921 1. Check that next/prev pointers are consistent in both the forward and
3922 reverse direction.
3923 2. Count insns in chain, going both directions, and check if equal.
3924 3. Check that get_last_insn () returns the actual end of chain. */
3925
3926 DEBUG_FUNCTION void
3927 verify_insn_chain (void)
3928 {
3929 rtx x, prevx, nextx;
3930 int insn_cnt1, insn_cnt2;
3931
3932 for (prevx = NULL, insn_cnt1 = 1, x = get_insns ();
3933 x != 0;
3934 prevx = x, insn_cnt1++, x = NEXT_INSN (x))
3935 gcc_assert (PREV_INSN (x) == prevx);
3936
3937 gcc_assert (prevx == get_last_insn ());
3938
3939 for (nextx = NULL, insn_cnt2 = 1, x = get_last_insn ();
3940 x != 0;
3941 nextx = x, insn_cnt2++, x = PREV_INSN (x))
3942 gcc_assert (NEXT_INSN (x) == nextx);
3943
3944 gcc_assert (insn_cnt1 == insn_cnt2);
3945 }
3946 \f
3947 /* If we have assembler epilogues, the block falling through to exit must
3948 be the last one in the reordered chain when we reach final. Ensure
3949 that this condition is met. */
3950 static void
3951 fixup_fallthru_exit_predecessor (void)
3952 {
3953 edge e;
3954 basic_block bb = NULL;
3955
3956 /* This transformation is not valid before reload, because we might
3957 separate a call from the instruction that copies the return
3958 value. */
3959 gcc_assert (reload_completed);
3960
3961 e = find_fallthru_edge (EXIT_BLOCK_PTR->preds);
3962 if (e)
3963 bb = e->src;
3964
3965 if (bb && bb->aux)
3966 {
3967 basic_block c = ENTRY_BLOCK_PTR->next_bb;
3968
3969 /* If the very first block is the one with the fall-through exit
3970 edge, we have to split that block. */
3971 if (c == bb)
3972 {
3973 bb = split_block (bb, NULL)->dest;
3974 bb->aux = c->aux;
3975 c->aux = bb;
3976 BB_FOOTER (bb) = BB_FOOTER (c);
3977 BB_FOOTER (c) = NULL;
3978 }
3979
3980 while (c->aux != bb)
3981 c = (basic_block) c->aux;
3982
3983 c->aux = bb->aux;
3984 while (c->aux)
3985 c = (basic_block) c->aux;
3986
3987 c->aux = bb;
3988 bb->aux = NULL;
3989 }
3990 }
3991
3992 /* In case there are more than one fallthru predecessors of exit, force that
3993 there is only one. */
3994
3995 static void
3996 force_one_exit_fallthru (void)
3997 {
3998 edge e, predecessor = NULL;
3999 bool more = false;
4000 edge_iterator ei;
4001 basic_block forwarder, bb;
4002
4003 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
4004 if (e->flags & EDGE_FALLTHRU)
4005 {
4006 if (predecessor == NULL)
4007 predecessor = e;
4008 else
4009 {
4010 more = true;
4011 break;
4012 }
4013 }
4014
4015 if (!more)
4016 return;
4017
4018 /* Exit has several fallthru predecessors. Create a forwarder block for
4019 them. */
4020 forwarder = split_edge (predecessor);
4021 for (ei = ei_start (EXIT_BLOCK_PTR->preds); (e = ei_safe_edge (ei)); )
4022 {
4023 if (e->src == forwarder
4024 || !(e->flags & EDGE_FALLTHRU))
4025 ei_next (&ei);
4026 else
4027 redirect_edge_and_branch_force (e, forwarder);
4028 }
4029
4030 /* Fix up the chain of blocks -- make FORWARDER immediately precede the
4031 exit block. */
4032 FOR_EACH_BB (bb)
4033 {
4034 if (bb->aux == NULL && bb != forwarder)
4035 {
4036 bb->aux = forwarder;
4037 break;
4038 }
4039 }
4040 }
4041 \f
4042 /* Return true in case it is possible to duplicate the basic block BB. */
4043
4044 static bool
4045 cfg_layout_can_duplicate_bb_p (const_basic_block bb)
4046 {
4047 /* Do not attempt to duplicate tablejumps, as we need to unshare
4048 the dispatch table. This is difficult to do, as the instructions
4049 computing jump destination may be hoisted outside the basic block. */
4050 if (tablejump_p (BB_END (bb), NULL, NULL))
4051 return false;
4052
4053 /* Do not duplicate blocks containing insns that can't be copied. */
4054 if (targetm.cannot_copy_insn_p)
4055 {
4056 rtx insn = BB_HEAD (bb);
4057 while (1)
4058 {
4059 if (INSN_P (insn) && targetm.cannot_copy_insn_p (insn))
4060 return false;
4061 if (insn == BB_END (bb))
4062 break;
4063 insn = NEXT_INSN (insn);
4064 }
4065 }
4066
4067 return true;
4068 }
4069
4070 rtx
4071 duplicate_insn_chain (rtx from, rtx to)
4072 {
4073 rtx insn, next, last, copy;
4074
4075 /* Avoid updating of boundaries of previous basic block. The
4076 note will get removed from insn stream in fixup. */
4077 last = emit_note (NOTE_INSN_DELETED);
4078
4079 /* Create copy at the end of INSN chain. The chain will
4080 be reordered later. */
4081 for (insn = from; insn != NEXT_INSN (to); insn = NEXT_INSN (insn))
4082 {
4083 switch (GET_CODE (insn))
4084 {
4085 case DEBUG_INSN:
4086 /* Don't duplicate label debug insns. */
4087 if (TREE_CODE (INSN_VAR_LOCATION_DECL (insn)) == LABEL_DECL)
4088 break;
4089 /* FALLTHRU */
4090 case INSN:
4091 case CALL_INSN:
4092 case JUMP_INSN:
4093 copy = emit_copy_of_insn_after (insn, get_last_insn ());
4094 if (JUMP_P (insn) && JUMP_LABEL (insn) != NULL_RTX
4095 && ANY_RETURN_P (JUMP_LABEL (insn)))
4096 JUMP_LABEL (copy) = JUMP_LABEL (insn);
4097 maybe_copy_prologue_epilogue_insn (insn, copy);
4098 break;
4099
4100 case JUMP_TABLE_DATA:
4101 /* Avoid copying of dispatch tables. We never duplicate
4102 tablejumps, so this can hit only in case the table got
4103 moved far from original jump.
4104 Avoid copying following barrier as well if any
4105 (and debug insns in between). */
4106 for (next = NEXT_INSN (insn);
4107 next != NEXT_INSN (to);
4108 next = NEXT_INSN (next))
4109 if (!DEBUG_INSN_P (next))
4110 break;
4111 if (next != NEXT_INSN (to) && BARRIER_P (next))
4112 insn = next;
4113 break;
4114
4115 case CODE_LABEL:
4116 break;
4117
4118 case BARRIER:
4119 emit_barrier ();
4120 break;
4121
4122 case NOTE:
4123 switch (NOTE_KIND (insn))
4124 {
4125 /* In case prologue is empty and function contain label
4126 in first BB, we may want to copy the block. */
4127 case NOTE_INSN_PROLOGUE_END:
4128
4129 case NOTE_INSN_DELETED:
4130 case NOTE_INSN_DELETED_LABEL:
4131 case NOTE_INSN_DELETED_DEBUG_LABEL:
4132 /* No problem to strip these. */
4133 case NOTE_INSN_FUNCTION_BEG:
4134 /* There is always just single entry to function. */
4135 case NOTE_INSN_BASIC_BLOCK:
4136 /* We should only switch text sections once. */
4137 case NOTE_INSN_SWITCH_TEXT_SECTIONS:
4138 break;
4139
4140 case NOTE_INSN_EPILOGUE_BEG:
4141 emit_note_copy (insn);
4142 break;
4143
4144 default:
4145 /* All other notes should have already been eliminated. */
4146 gcc_unreachable ();
4147 }
4148 break;
4149 default:
4150 gcc_unreachable ();
4151 }
4152 }
4153 insn = NEXT_INSN (last);
4154 delete_insn (last);
4155 return insn;
4156 }
4157
4158 /* Create a duplicate of the basic block BB. */
4159
4160 static basic_block
4161 cfg_layout_duplicate_bb (basic_block bb)
4162 {
4163 rtx insn;
4164 basic_block new_bb;
4165
4166 insn = duplicate_insn_chain (BB_HEAD (bb), BB_END (bb));
4167 new_bb = create_basic_block (insn,
4168 insn ? get_last_insn () : NULL,
4169 EXIT_BLOCK_PTR->prev_bb);
4170
4171 BB_COPY_PARTITION (new_bb, bb);
4172 if (BB_HEADER (bb))
4173 {
4174 insn = BB_HEADER (bb);
4175 while (NEXT_INSN (insn))
4176 insn = NEXT_INSN (insn);
4177 insn = duplicate_insn_chain (BB_HEADER (bb), insn);
4178 if (insn)
4179 BB_HEADER (new_bb) = unlink_insn_chain (insn, get_last_insn ());
4180 }
4181
4182 if (BB_FOOTER (bb))
4183 {
4184 insn = BB_FOOTER (bb);
4185 while (NEXT_INSN (insn))
4186 insn = NEXT_INSN (insn);
4187 insn = duplicate_insn_chain (BB_FOOTER (bb), insn);
4188 if (insn)
4189 BB_FOOTER (new_bb) = unlink_insn_chain (insn, get_last_insn ());
4190 }
4191
4192 return new_bb;
4193 }
4194
4195 \f
4196 /* Main entry point to this module - initialize the datastructures for
4197 CFG layout changes. It keeps LOOPS up-to-date if not null.
4198
4199 FLAGS is a set of additional flags to pass to cleanup_cfg(). */
4200
4201 void
4202 cfg_layout_initialize (unsigned int flags)
4203 {
4204 rtx x;
4205 basic_block bb;
4206
4207 /* Once bb reordering is complete, cfg layout mode should not be re-entered.
4208 Entering cfg layout mode will perform optimizations on the cfg that
4209 could affect the bb layout negatively or even require fixups. An
4210 example of the latter is if edge forwarding performed when optimizing
4211 the cfg layout required moving a block from the hot to the cold section
4212 under -freorder-blocks-and-partition. This would create an illegal
4213 partitioning unless some manual fixup was performed. */
4214 gcc_assert (!crtl->bb_reorder_complete);
4215
4216 initialize_original_copy_tables ();
4217
4218 cfg_layout_rtl_register_cfg_hooks ();
4219
4220 record_effective_endpoints ();
4221
4222 /* Make sure that the targets of non local gotos are marked. */
4223 for (x = nonlocal_goto_handler_labels; x; x = XEXP (x, 1))
4224 {
4225 bb = BLOCK_FOR_INSN (XEXP (x, 0));
4226 bb->flags |= BB_NON_LOCAL_GOTO_TARGET;
4227 }
4228
4229 cleanup_cfg (CLEANUP_CFGLAYOUT | flags);
4230 }
4231
4232 /* Splits superblocks. */
4233 void
4234 break_superblocks (void)
4235 {
4236 sbitmap superblocks;
4237 bool need = false;
4238 basic_block bb;
4239
4240 superblocks = sbitmap_alloc (last_basic_block);
4241 bitmap_clear (superblocks);
4242
4243 FOR_EACH_BB (bb)
4244 if (bb->flags & BB_SUPERBLOCK)
4245 {
4246 bb->flags &= ~BB_SUPERBLOCK;
4247 bitmap_set_bit (superblocks, bb->index);
4248 need = true;
4249 }
4250
4251 if (need)
4252 {
4253 rebuild_jump_labels (get_insns ());
4254 find_many_sub_basic_blocks (superblocks);
4255 }
4256
4257 free (superblocks);
4258 }
4259
4260 /* Finalize the changes: reorder insn list according to the sequence specified
4261 by aux pointers, enter compensation code, rebuild scope forest. */
4262
4263 void
4264 cfg_layout_finalize (void)
4265 {
4266 #ifdef ENABLE_CHECKING
4267 verify_flow_info ();
4268 #endif
4269 force_one_exit_fallthru ();
4270 rtl_register_cfg_hooks ();
4271 if (reload_completed
4272 #ifdef HAVE_epilogue
4273 && !HAVE_epilogue
4274 #endif
4275 )
4276 fixup_fallthru_exit_predecessor ();
4277 fixup_reorder_chain ();
4278
4279 rebuild_jump_labels (get_insns ());
4280 delete_dead_jumptables ();
4281
4282 #ifdef ENABLE_CHECKING
4283 verify_insn_chain ();
4284 verify_flow_info ();
4285 #endif
4286 }
4287
4288
4289 /* Same as split_block but update cfg_layout structures. */
4290
4291 static basic_block
4292 cfg_layout_split_block (basic_block bb, void *insnp)
4293 {
4294 rtx insn = (rtx) insnp;
4295 basic_block new_bb = rtl_split_block (bb, insn);
4296
4297 BB_FOOTER (new_bb) = BB_FOOTER (bb);
4298 BB_FOOTER (bb) = NULL;
4299
4300 return new_bb;
4301 }
4302
4303 /* Redirect Edge to DEST. */
4304 static edge
4305 cfg_layout_redirect_edge_and_branch (edge e, basic_block dest)
4306 {
4307 basic_block src = e->src;
4308 edge ret;
4309
4310 if (e->flags & (EDGE_ABNORMAL_CALL | EDGE_EH))
4311 return NULL;
4312
4313 if (e->dest == dest)
4314 return e;
4315
4316 if (e->src != ENTRY_BLOCK_PTR
4317 && (ret = try_redirect_by_replacing_jump (e, dest, true)))
4318 {
4319 df_set_bb_dirty (src);
4320 return ret;
4321 }
4322
4323 if (e->src == ENTRY_BLOCK_PTR
4324 && (e->flags & EDGE_FALLTHRU) && !(e->flags & EDGE_COMPLEX))
4325 {
4326 if (dump_file)
4327 fprintf (dump_file, "Redirecting entry edge from bb %i to %i\n",
4328 e->src->index, dest->index);
4329
4330 df_set_bb_dirty (e->src);
4331 redirect_edge_succ (e, dest);
4332 return e;
4333 }
4334
4335 /* Redirect_edge_and_branch may decide to turn branch into fallthru edge
4336 in the case the basic block appears to be in sequence. Avoid this
4337 transformation. */
4338
4339 if (e->flags & EDGE_FALLTHRU)
4340 {
4341 /* Redirect any branch edges unified with the fallthru one. */
4342 if (JUMP_P (BB_END (src))
4343 && label_is_jump_target_p (BB_HEAD (e->dest),
4344 BB_END (src)))
4345 {
4346 edge redirected;
4347
4348 if (dump_file)
4349 fprintf (dump_file, "Fallthru edge unified with branch "
4350 "%i->%i redirected to %i\n",
4351 e->src->index, e->dest->index, dest->index);
4352 e->flags &= ~EDGE_FALLTHRU;
4353 redirected = redirect_branch_edge (e, dest);
4354 gcc_assert (redirected);
4355 redirected->flags |= EDGE_FALLTHRU;
4356 df_set_bb_dirty (redirected->src);
4357 return redirected;
4358 }
4359 /* In case we are redirecting fallthru edge to the branch edge
4360 of conditional jump, remove it. */
4361 if (EDGE_COUNT (src->succs) == 2)
4362 {
4363 /* Find the edge that is different from E. */
4364 edge s = EDGE_SUCC (src, EDGE_SUCC (src, 0) == e);
4365
4366 if (s->dest == dest
4367 && any_condjump_p (BB_END (src))
4368 && onlyjump_p (BB_END (src)))
4369 delete_insn (BB_END (src));
4370 }
4371 if (dump_file)
4372 fprintf (dump_file, "Redirecting fallthru edge %i->%i to %i\n",
4373 e->src->index, e->dest->index, dest->index);
4374 ret = redirect_edge_succ_nodup (e, dest);
4375 }
4376 else
4377 ret = redirect_branch_edge (e, dest);
4378
4379 /* We don't want simplejumps in the insn stream during cfglayout. */
4380 gcc_assert (!simplejump_p (BB_END (src)));
4381
4382 df_set_bb_dirty (src);
4383 return ret;
4384 }
4385
4386 /* Simple wrapper as we always can redirect fallthru edges. */
4387 static basic_block
4388 cfg_layout_redirect_edge_and_branch_force (edge e, basic_block dest)
4389 {
4390 edge redirected = cfg_layout_redirect_edge_and_branch (e, dest);
4391
4392 gcc_assert (redirected);
4393 return NULL;
4394 }
4395
4396 /* Same as delete_basic_block but update cfg_layout structures. */
4397
4398 static void
4399 cfg_layout_delete_block (basic_block bb)
4400 {
4401 rtx insn, next, prev = PREV_INSN (BB_HEAD (bb)), *to, remaints;
4402
4403 if (BB_HEADER (bb))
4404 {
4405 next = BB_HEAD (bb);
4406 if (prev)
4407 NEXT_INSN (prev) = BB_HEADER (bb);
4408 else
4409 set_first_insn (BB_HEADER (bb));
4410 PREV_INSN (BB_HEADER (bb)) = prev;
4411 insn = BB_HEADER (bb);
4412 while (NEXT_INSN (insn))
4413 insn = NEXT_INSN (insn);
4414 NEXT_INSN (insn) = next;
4415 PREV_INSN (next) = insn;
4416 }
4417 next = NEXT_INSN (BB_END (bb));
4418 if (BB_FOOTER (bb))
4419 {
4420 insn = BB_FOOTER (bb);
4421 while (insn)
4422 {
4423 if (BARRIER_P (insn))
4424 {
4425 if (PREV_INSN (insn))
4426 NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
4427 else
4428 BB_FOOTER (bb) = NEXT_INSN (insn);
4429 if (NEXT_INSN (insn))
4430 PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
4431 }
4432 if (LABEL_P (insn))
4433 break;
4434 insn = NEXT_INSN (insn);
4435 }
4436 if (BB_FOOTER (bb))
4437 {
4438 insn = BB_END (bb);
4439 NEXT_INSN (insn) = BB_FOOTER (bb);
4440 PREV_INSN (BB_FOOTER (bb)) = insn;
4441 while (NEXT_INSN (insn))
4442 insn = NEXT_INSN (insn);
4443 NEXT_INSN (insn) = next;
4444 if (next)
4445 PREV_INSN (next) = insn;
4446 else
4447 set_last_insn (insn);
4448 }
4449 }
4450 if (bb->next_bb != EXIT_BLOCK_PTR)
4451 to = &BB_HEADER (bb->next_bb);
4452 else
4453 to = &cfg_layout_function_footer;
4454
4455 rtl_delete_block (bb);
4456
4457 if (prev)
4458 prev = NEXT_INSN (prev);
4459 else
4460 prev = get_insns ();
4461 if (next)
4462 next = PREV_INSN (next);
4463 else
4464 next = get_last_insn ();
4465
4466 if (next && NEXT_INSN (next) != prev)
4467 {
4468 remaints = unlink_insn_chain (prev, next);
4469 insn = remaints;
4470 while (NEXT_INSN (insn))
4471 insn = NEXT_INSN (insn);
4472 NEXT_INSN (insn) = *to;
4473 if (*to)
4474 PREV_INSN (*to) = insn;
4475 *to = remaints;
4476 }
4477 }
4478
4479 /* Return true when blocks A and B can be safely merged. */
4480
4481 static bool
4482 cfg_layout_can_merge_blocks_p (basic_block a, basic_block b)
4483 {
4484 /* If we are partitioning hot/cold basic blocks, we don't want to
4485 mess up unconditional or indirect jumps that cross between hot
4486 and cold sections.
4487
4488 Basic block partitioning may result in some jumps that appear to
4489 be optimizable (or blocks that appear to be mergeable), but which really
4490 must be left untouched (they are required to make it safely across
4491 partition boundaries). See the comments at the top of
4492 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
4493
4494 if (BB_PARTITION (a) != BB_PARTITION (b))
4495 return false;
4496
4497 /* Protect the loop latches. */
4498 if (current_loops && b->loop_father->latch == b)
4499 return false;
4500
4501 /* If we would end up moving B's instructions, make sure it doesn't fall
4502 through into the exit block, since we cannot recover from a fallthrough
4503 edge into the exit block occurring in the middle of a function. */
4504 if (NEXT_INSN (BB_END (a)) != BB_HEAD (b))
4505 {
4506 edge e = find_fallthru_edge (b->succs);
4507 if (e && e->dest == EXIT_BLOCK_PTR)
4508 return false;
4509 }
4510
4511 /* There must be exactly one edge in between the blocks. */
4512 return (single_succ_p (a)
4513 && single_succ (a) == b
4514 && single_pred_p (b) == 1
4515 && a != b
4516 /* Must be simple edge. */
4517 && !(single_succ_edge (a)->flags & EDGE_COMPLEX)
4518 && a != ENTRY_BLOCK_PTR && b != EXIT_BLOCK_PTR
4519 /* If the jump insn has side effects, we can't kill the edge.
4520 When not optimizing, try_redirect_by_replacing_jump will
4521 not allow us to redirect an edge by replacing a table jump. */
4522 && (!JUMP_P (BB_END (a))
4523 || ((!optimize || reload_completed)
4524 ? simplejump_p (BB_END (a)) : onlyjump_p (BB_END (a)))));
4525 }
4526
4527 /* Merge block A and B. The blocks must be mergeable. */
4528
4529 static void
4530 cfg_layout_merge_blocks (basic_block a, basic_block b)
4531 {
4532 bool forwarder_p = (b->flags & BB_FORWARDER_BLOCK) != 0;
4533 rtx insn;
4534
4535 gcc_checking_assert (cfg_layout_can_merge_blocks_p (a, b));
4536
4537 if (dump_file)
4538 fprintf (dump_file, "Merging block %d into block %d...\n", b->index,
4539 a->index);
4540
4541 /* If there was a CODE_LABEL beginning B, delete it. */
4542 if (LABEL_P (BB_HEAD (b)))
4543 {
4544 delete_insn (BB_HEAD (b));
4545 }
4546
4547 /* We should have fallthru edge in a, or we can do dummy redirection to get
4548 it cleaned up. */
4549 if (JUMP_P (BB_END (a)))
4550 try_redirect_by_replacing_jump (EDGE_SUCC (a, 0), b, true);
4551 gcc_assert (!JUMP_P (BB_END (a)));
4552
4553 /* When not optimizing CFG and the edge is the only place in RTL which holds
4554 some unique locus, emit a nop with that locus in between. */
4555 if (!optimize)
4556 emit_nop_for_unique_locus_between (a, b);
4557
4558 /* Move things from b->footer after a->footer. */
4559 if (BB_FOOTER (b))
4560 {
4561 if (!BB_FOOTER (a))
4562 BB_FOOTER (a) = BB_FOOTER (b);
4563 else
4564 {
4565 rtx last = BB_FOOTER (a);
4566
4567 while (NEXT_INSN (last))
4568 last = NEXT_INSN (last);
4569 NEXT_INSN (last) = BB_FOOTER (b);
4570 PREV_INSN (BB_FOOTER (b)) = last;
4571 }
4572 BB_FOOTER (b) = NULL;
4573 }
4574
4575 /* Move things from b->header before a->footer.
4576 Note that this may include dead tablejump data, but we don't clean
4577 those up until we go out of cfglayout mode. */
4578 if (BB_HEADER (b))
4579 {
4580 if (! BB_FOOTER (a))
4581 BB_FOOTER (a) = BB_HEADER (b);
4582 else
4583 {
4584 rtx last = BB_HEADER (b);
4585
4586 while (NEXT_INSN (last))
4587 last = NEXT_INSN (last);
4588 NEXT_INSN (last) = BB_FOOTER (a);
4589 PREV_INSN (BB_FOOTER (a)) = last;
4590 BB_FOOTER (a) = BB_HEADER (b);
4591 }
4592 BB_HEADER (b) = NULL;
4593 }
4594
4595 /* In the case basic blocks are not adjacent, move them around. */
4596 if (NEXT_INSN (BB_END (a)) != BB_HEAD (b))
4597 {
4598 insn = unlink_insn_chain (BB_HEAD (b), BB_END (b));
4599
4600 emit_insn_after_noloc (insn, BB_END (a), a);
4601 }
4602 /* Otherwise just re-associate the instructions. */
4603 else
4604 {
4605 insn = BB_HEAD (b);
4606 BB_END (a) = BB_END (b);
4607 }
4608
4609 /* emit_insn_after_noloc doesn't call df_insn_change_bb.
4610 We need to explicitly call. */
4611 update_bb_for_insn_chain (insn, BB_END (b), a);
4612
4613 /* Skip possible DELETED_LABEL insn. */
4614 if (!NOTE_INSN_BASIC_BLOCK_P (insn))
4615 insn = NEXT_INSN (insn);
4616 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
4617 BB_HEAD (b) = BB_END (b) = NULL;
4618 delete_insn (insn);
4619
4620 df_bb_delete (b->index);
4621
4622 /* If B was a forwarder block, propagate the locus on the edge. */
4623 if (forwarder_p
4624 && LOCATION_LOCUS (EDGE_SUCC (b, 0)->goto_locus) == UNKNOWN_LOCATION)
4625 EDGE_SUCC (b, 0)->goto_locus = EDGE_SUCC (a, 0)->goto_locus;
4626
4627 if (dump_file)
4628 fprintf (dump_file, "Merged blocks %d and %d.\n", a->index, b->index);
4629 }
4630
4631 /* Split edge E. */
4632
4633 static basic_block
4634 cfg_layout_split_edge (edge e)
4635 {
4636 basic_block new_bb =
4637 create_basic_block (e->src != ENTRY_BLOCK_PTR
4638 ? NEXT_INSN (BB_END (e->src)) : get_insns (),
4639 NULL_RTX, e->src);
4640
4641 if (e->dest == EXIT_BLOCK_PTR)
4642 BB_COPY_PARTITION (new_bb, e->src);
4643 else
4644 BB_COPY_PARTITION (new_bb, e->dest);
4645 make_edge (new_bb, e->dest, EDGE_FALLTHRU);
4646 redirect_edge_and_branch_force (e, new_bb);
4647
4648 return new_bb;
4649 }
4650
4651 /* Do postprocessing after making a forwarder block joined by edge FALLTHRU. */
4652
4653 static void
4654 rtl_make_forwarder_block (edge fallthru ATTRIBUTE_UNUSED)
4655 {
4656 }
4657
4658 /* Return true if BB contains only labels or non-executable
4659 instructions. */
4660
4661 static bool
4662 rtl_block_empty_p (basic_block bb)
4663 {
4664 rtx insn;
4665
4666 if (bb == ENTRY_BLOCK_PTR || bb == EXIT_BLOCK_PTR)
4667 return true;
4668
4669 FOR_BB_INSNS (bb, insn)
4670 if (NONDEBUG_INSN_P (insn) && !any_uncondjump_p (insn))
4671 return false;
4672
4673 return true;
4674 }
4675
4676 /* Split a basic block if it ends with a conditional branch and if
4677 the other part of the block is not empty. */
4678
4679 static basic_block
4680 rtl_split_block_before_cond_jump (basic_block bb)
4681 {
4682 rtx insn;
4683 rtx split_point = NULL;
4684 rtx last = NULL;
4685 bool found_code = false;
4686
4687 FOR_BB_INSNS (bb, insn)
4688 {
4689 if (any_condjump_p (insn))
4690 split_point = last;
4691 else if (NONDEBUG_INSN_P (insn))
4692 found_code = true;
4693 last = insn;
4694 }
4695
4696 /* Did not find everything. */
4697 if (found_code && split_point)
4698 return split_block (bb, split_point)->dest;
4699 else
4700 return NULL;
4701 }
4702
4703 /* Return 1 if BB ends with a call, possibly followed by some
4704 instructions that must stay with the call, 0 otherwise. */
4705
4706 static bool
4707 rtl_block_ends_with_call_p (basic_block bb)
4708 {
4709 rtx insn = BB_END (bb);
4710
4711 while (!CALL_P (insn)
4712 && insn != BB_HEAD (bb)
4713 && (keep_with_call_p (insn)
4714 || NOTE_P (insn)
4715 || DEBUG_INSN_P (insn)))
4716 insn = PREV_INSN (insn);
4717 return (CALL_P (insn));
4718 }
4719
4720 /* Return 1 if BB ends with a conditional branch, 0 otherwise. */
4721
4722 static bool
4723 rtl_block_ends_with_condjump_p (const_basic_block bb)
4724 {
4725 return any_condjump_p (BB_END (bb));
4726 }
4727
4728 /* Return true if we need to add fake edge to exit.
4729 Helper function for rtl_flow_call_edges_add. */
4730
4731 static bool
4732 need_fake_edge_p (const_rtx insn)
4733 {
4734 if (!INSN_P (insn))
4735 return false;
4736
4737 if ((CALL_P (insn)
4738 && !SIBLING_CALL_P (insn)
4739 && !find_reg_note (insn, REG_NORETURN, NULL)
4740 && !(RTL_CONST_OR_PURE_CALL_P (insn))))
4741 return true;
4742
4743 return ((GET_CODE (PATTERN (insn)) == ASM_OPERANDS
4744 && MEM_VOLATILE_P (PATTERN (insn)))
4745 || (GET_CODE (PATTERN (insn)) == PARALLEL
4746 && asm_noperands (insn) != -1
4747 && MEM_VOLATILE_P (XVECEXP (PATTERN (insn), 0, 0)))
4748 || GET_CODE (PATTERN (insn)) == ASM_INPUT);
4749 }
4750
4751 /* Add fake edges to the function exit for any non constant and non noreturn
4752 calls, volatile inline assembly in the bitmap of blocks specified by
4753 BLOCKS or to the whole CFG if BLOCKS is zero. Return the number of blocks
4754 that were split.
4755
4756 The goal is to expose cases in which entering a basic block does not imply
4757 that all subsequent instructions must be executed. */
4758
4759 static int
4760 rtl_flow_call_edges_add (sbitmap blocks)
4761 {
4762 int i;
4763 int blocks_split = 0;
4764 int last_bb = last_basic_block;
4765 bool check_last_block = false;
4766
4767 if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
4768 return 0;
4769
4770 if (! blocks)
4771 check_last_block = true;
4772 else
4773 check_last_block = bitmap_bit_p (blocks, EXIT_BLOCK_PTR->prev_bb->index);
4774
4775 /* In the last basic block, before epilogue generation, there will be
4776 a fallthru edge to EXIT. Special care is required if the last insn
4777 of the last basic block is a call because make_edge folds duplicate
4778 edges, which would result in the fallthru edge also being marked
4779 fake, which would result in the fallthru edge being removed by
4780 remove_fake_edges, which would result in an invalid CFG.
4781
4782 Moreover, we can't elide the outgoing fake edge, since the block
4783 profiler needs to take this into account in order to solve the minimal
4784 spanning tree in the case that the call doesn't return.
4785
4786 Handle this by adding a dummy instruction in a new last basic block. */
4787 if (check_last_block)
4788 {
4789 basic_block bb = EXIT_BLOCK_PTR->prev_bb;
4790 rtx insn = BB_END (bb);
4791
4792 /* Back up past insns that must be kept in the same block as a call. */
4793 while (insn != BB_HEAD (bb)
4794 && keep_with_call_p (insn))
4795 insn = PREV_INSN (insn);
4796
4797 if (need_fake_edge_p (insn))
4798 {
4799 edge e;
4800
4801 e = find_edge (bb, EXIT_BLOCK_PTR);
4802 if (e)
4803 {
4804 insert_insn_on_edge (gen_use (const0_rtx), e);
4805 commit_edge_insertions ();
4806 }
4807 }
4808 }
4809
4810 /* Now add fake edges to the function exit for any non constant
4811 calls since there is no way that we can determine if they will
4812 return or not... */
4813
4814 for (i = NUM_FIXED_BLOCKS; i < last_bb; i++)
4815 {
4816 basic_block bb = BASIC_BLOCK (i);
4817 rtx insn;
4818 rtx prev_insn;
4819
4820 if (!bb)
4821 continue;
4822
4823 if (blocks && !bitmap_bit_p (blocks, i))
4824 continue;
4825
4826 for (insn = BB_END (bb); ; insn = prev_insn)
4827 {
4828 prev_insn = PREV_INSN (insn);
4829 if (need_fake_edge_p (insn))
4830 {
4831 edge e;
4832 rtx split_at_insn = insn;
4833
4834 /* Don't split the block between a call and an insn that should
4835 remain in the same block as the call. */
4836 if (CALL_P (insn))
4837 while (split_at_insn != BB_END (bb)
4838 && keep_with_call_p (NEXT_INSN (split_at_insn)))
4839 split_at_insn = NEXT_INSN (split_at_insn);
4840
4841 /* The handling above of the final block before the epilogue
4842 should be enough to verify that there is no edge to the exit
4843 block in CFG already. Calling make_edge in such case would
4844 cause us to mark that edge as fake and remove it later. */
4845
4846 #ifdef ENABLE_CHECKING
4847 if (split_at_insn == BB_END (bb))
4848 {
4849 e = find_edge (bb, EXIT_BLOCK_PTR);
4850 gcc_assert (e == NULL);
4851 }
4852 #endif
4853
4854 /* Note that the following may create a new basic block
4855 and renumber the existing basic blocks. */
4856 if (split_at_insn != BB_END (bb))
4857 {
4858 e = split_block (bb, split_at_insn);
4859 if (e)
4860 blocks_split++;
4861 }
4862
4863 make_edge (bb, EXIT_BLOCK_PTR, EDGE_FAKE);
4864 }
4865
4866 if (insn == BB_HEAD (bb))
4867 break;
4868 }
4869 }
4870
4871 if (blocks_split)
4872 verify_flow_info ();
4873
4874 return blocks_split;
4875 }
4876
4877 /* Add COMP_RTX as a condition at end of COND_BB. FIRST_HEAD is
4878 the conditional branch target, SECOND_HEAD should be the fall-thru
4879 there is no need to handle this here the loop versioning code handles
4880 this. the reason for SECON_HEAD is that it is needed for condition
4881 in trees, and this should be of the same type since it is a hook. */
4882 static void
4883 rtl_lv_add_condition_to_bb (basic_block first_head ,
4884 basic_block second_head ATTRIBUTE_UNUSED,
4885 basic_block cond_bb, void *comp_rtx)
4886 {
4887 rtx label, seq, jump;
4888 rtx op0 = XEXP ((rtx)comp_rtx, 0);
4889 rtx op1 = XEXP ((rtx)comp_rtx, 1);
4890 enum rtx_code comp = GET_CODE ((rtx)comp_rtx);
4891 enum machine_mode mode;
4892
4893
4894 label = block_label (first_head);
4895 mode = GET_MODE (op0);
4896 if (mode == VOIDmode)
4897 mode = GET_MODE (op1);
4898
4899 start_sequence ();
4900 op0 = force_operand (op0, NULL_RTX);
4901 op1 = force_operand (op1, NULL_RTX);
4902 do_compare_rtx_and_jump (op0, op1, comp, 0,
4903 mode, NULL_RTX, NULL_RTX, label, -1);
4904 jump = get_last_insn ();
4905 JUMP_LABEL (jump) = label;
4906 LABEL_NUSES (label)++;
4907 seq = get_insns ();
4908 end_sequence ();
4909
4910 /* Add the new cond , in the new head. */
4911 emit_insn_after (seq, BB_END (cond_bb));
4912 }
4913
4914
4915 /* Given a block B with unconditional branch at its end, get the
4916 store the return the branch edge and the fall-thru edge in
4917 BRANCH_EDGE and FALLTHRU_EDGE respectively. */
4918 static void
4919 rtl_extract_cond_bb_edges (basic_block b, edge *branch_edge,
4920 edge *fallthru_edge)
4921 {
4922 edge e = EDGE_SUCC (b, 0);
4923
4924 if (e->flags & EDGE_FALLTHRU)
4925 {
4926 *fallthru_edge = e;
4927 *branch_edge = EDGE_SUCC (b, 1);
4928 }
4929 else
4930 {
4931 *branch_edge = e;
4932 *fallthru_edge = EDGE_SUCC (b, 1);
4933 }
4934 }
4935
4936 void
4937 init_rtl_bb_info (basic_block bb)
4938 {
4939 gcc_assert (!bb->il.x.rtl);
4940 bb->il.x.head_ = NULL;
4941 bb->il.x.rtl = ggc_alloc_cleared_rtl_bb_info ();
4942 }
4943
4944 /* Returns true if it is possible to remove edge E by redirecting
4945 it to the destination of the other edge from E->src. */
4946
4947 static bool
4948 rtl_can_remove_branch_p (const_edge e)
4949 {
4950 const_basic_block src = e->src;
4951 const_basic_block target = EDGE_SUCC (src, EDGE_SUCC (src, 0) == e)->dest;
4952 const_rtx insn = BB_END (src), set;
4953
4954 /* The conditions are taken from try_redirect_by_replacing_jump. */
4955 if (target == EXIT_BLOCK_PTR)
4956 return false;
4957
4958 if (e->flags & (EDGE_ABNORMAL_CALL | EDGE_EH))
4959 return false;
4960
4961 if (BB_PARTITION (src) != BB_PARTITION (target))
4962 return false;
4963
4964 if (!onlyjump_p (insn)
4965 || tablejump_p (insn, NULL, NULL))
4966 return false;
4967
4968 set = single_set (insn);
4969 if (!set || side_effects_p (set))
4970 return false;
4971
4972 return true;
4973 }
4974
4975 static basic_block
4976 rtl_duplicate_bb (basic_block bb)
4977 {
4978 bb = cfg_layout_duplicate_bb (bb);
4979 bb->aux = NULL;
4980 return bb;
4981 }
4982
4983 /* Do book-keeping of basic block BB for the profile consistency checker.
4984 If AFTER_PASS is 0, do pre-pass accounting, or if AFTER_PASS is 1
4985 then do post-pass accounting. Store the counting in RECORD. */
4986 static void
4987 rtl_account_profile_record (basic_block bb, int after_pass,
4988 struct profile_record *record)
4989 {
4990 rtx insn;
4991 FOR_BB_INSNS (bb, insn)
4992 if (INSN_P (insn))
4993 {
4994 record->size[after_pass]
4995 += insn_rtx_cost (PATTERN (insn), false);
4996 if (profile_status == PROFILE_READ)
4997 record->time[after_pass]
4998 += insn_rtx_cost (PATTERN (insn), true) * bb->count;
4999 else if (profile_status == PROFILE_GUESSED)
5000 record->time[after_pass]
5001 += insn_rtx_cost (PATTERN (insn), true) * bb->frequency;
5002 }
5003 }
5004
5005 /* Implementation of CFG manipulation for linearized RTL. */
5006 struct cfg_hooks rtl_cfg_hooks = {
5007 "rtl",
5008 rtl_verify_flow_info,
5009 rtl_dump_bb,
5010 rtl_dump_bb_for_graph,
5011 rtl_create_basic_block,
5012 rtl_redirect_edge_and_branch,
5013 rtl_redirect_edge_and_branch_force,
5014 rtl_can_remove_branch_p,
5015 rtl_delete_block,
5016 rtl_split_block,
5017 rtl_move_block_after,
5018 rtl_can_merge_blocks, /* can_merge_blocks_p */
5019 rtl_merge_blocks,
5020 rtl_predict_edge,
5021 rtl_predicted_by_p,
5022 cfg_layout_can_duplicate_bb_p,
5023 rtl_duplicate_bb,
5024 rtl_split_edge,
5025 rtl_make_forwarder_block,
5026 rtl_tidy_fallthru_edge,
5027 rtl_force_nonfallthru,
5028 rtl_block_ends_with_call_p,
5029 rtl_block_ends_with_condjump_p,
5030 rtl_flow_call_edges_add,
5031 NULL, /* execute_on_growing_pred */
5032 NULL, /* execute_on_shrinking_pred */
5033 NULL, /* duplicate loop for trees */
5034 NULL, /* lv_add_condition_to_bb */
5035 NULL, /* lv_adjust_loop_header_phi*/
5036 NULL, /* extract_cond_bb_edges */
5037 NULL, /* flush_pending_stmts */
5038 rtl_block_empty_p, /* block_empty_p */
5039 rtl_split_block_before_cond_jump, /* split_block_before_cond_jump */
5040 rtl_account_profile_record,
5041 };
5042
5043 /* Implementation of CFG manipulation for cfg layout RTL, where
5044 basic block connected via fallthru edges does not have to be adjacent.
5045 This representation will hopefully become the default one in future
5046 version of the compiler. */
5047
5048 struct cfg_hooks cfg_layout_rtl_cfg_hooks = {
5049 "cfglayout mode",
5050 rtl_verify_flow_info_1,
5051 rtl_dump_bb,
5052 rtl_dump_bb_for_graph,
5053 cfg_layout_create_basic_block,
5054 cfg_layout_redirect_edge_and_branch,
5055 cfg_layout_redirect_edge_and_branch_force,
5056 rtl_can_remove_branch_p,
5057 cfg_layout_delete_block,
5058 cfg_layout_split_block,
5059 rtl_move_block_after,
5060 cfg_layout_can_merge_blocks_p,
5061 cfg_layout_merge_blocks,
5062 rtl_predict_edge,
5063 rtl_predicted_by_p,
5064 cfg_layout_can_duplicate_bb_p,
5065 cfg_layout_duplicate_bb,
5066 cfg_layout_split_edge,
5067 rtl_make_forwarder_block,
5068 NULL, /* tidy_fallthru_edge */
5069 rtl_force_nonfallthru,
5070 rtl_block_ends_with_call_p,
5071 rtl_block_ends_with_condjump_p,
5072 rtl_flow_call_edges_add,
5073 NULL, /* execute_on_growing_pred */
5074 NULL, /* execute_on_shrinking_pred */
5075 duplicate_loop_to_header_edge, /* duplicate loop for trees */
5076 rtl_lv_add_condition_to_bb, /* lv_add_condition_to_bb */
5077 NULL, /* lv_adjust_loop_header_phi*/
5078 rtl_extract_cond_bb_edges, /* extract_cond_bb_edges */
5079 NULL, /* flush_pending_stmts */
5080 rtl_block_empty_p, /* block_empty_p */
5081 rtl_split_block_before_cond_jump, /* split_block_before_cond_jump */
5082 rtl_account_profile_record,
5083 };
5084
5085 #include "gt-cfgrtl.h"